예제 #1
0
        /// <exception cref="System.Exception"/>
        private void TestMinimumAllocation(YarnConfiguration conf, int testAlloc)
        {
            MockRM rm = new MockRM(conf);

            rm.Start();
            // Register node1
            MockNM nm1 = rm.RegisterNode("127.0.0.1:1234", 6 * Gb);
            // Submit an application
            RMApp app1 = rm.SubmitApp(testAlloc);

            // kick the scheduling
            nm1.NodeHeartbeat(true);
            RMAppAttempt attempt1 = app1.GetCurrentAppAttempt();
            MockAM       am1      = rm.SendAMLaunched(attempt1.GetAppAttemptId());

            am1.RegisterAppAttempt();
            SchedulerNodeReport report_nm1 = rm.GetResourceScheduler().GetNodeReport(nm1.GetNodeId
                                                                                         ());
            int checkAlloc = conf.GetInt(YarnConfiguration.RmSchedulerMinimumAllocationMb, YarnConfiguration
                                         .DefaultRmSchedulerMinimumAllocationMb);

            NUnit.Framework.Assert.AreEqual(checkAlloc, report_nm1.GetUsedResource().GetMemory
                                                ());
            rm.Stop();
        }
예제 #2
0
        /// <exception cref="System.Exception"/>
        public virtual void TestAppCleanupWhenNMReconnects()
        {
            conf.SetInt(YarnConfiguration.RmAmMaxAttempts, 1);
            MemoryRMStateStore memStore = new MemoryRMStateStore();

            memStore.Init(conf);
            // start RM
            MockRM rm1 = new MockRM(conf, memStore);

            rm1.Start();
            MockNM nm1 = new MockNM("127.0.0.1:1234", 15120, rm1.GetResourceTrackerService());

            nm1.RegisterNode();
            // create app and launch the AM
            RMApp  app0 = rm1.SubmitApp(200);
            MockAM am0  = LaunchAM(app0, rm1, nm1);

            nm1.NodeHeartbeat(am0.GetApplicationAttemptId(), 1, ContainerState.Complete);
            rm1.WaitForState(app0.GetApplicationId(), RMAppState.Failed);
            // wait for application cleanup message received
            WaitForAppCleanupMessageRecved(nm1, app0.GetApplicationId());
            // reconnect NM with application still active
            nm1.RegisterNode(Arrays.AsList(app0.GetApplicationId()));
            WaitForAppCleanupMessageRecved(nm1, app0.GetApplicationId());
            rm1.Stop();
        }
예제 #3
0
 public virtual void TearDown()
 {
     if (rm != null)
     {
         rm.Stop();
     }
 }
예제 #4
0
        public virtual void TestNodeUpdateBeforeAppAttemptInit()
        {
            FifoScheduler scheduler = new FifoScheduler();
            MockRM        rm        = new MockRM(conf);

            scheduler.SetRMContext(rm.GetRMContext());
            scheduler.Init(conf);
            scheduler.Start();
            scheduler.Reinitialize(conf, rm.GetRMContext());
            RMNode node = MockNodes.NewNodeInfo(1, Resources.CreateResource(1024, 4), 1, "127.0.0.1"
                                                );

            scheduler.Handle(new NodeAddedSchedulerEvent(node));
            ApplicationId appId = ApplicationId.NewInstance(0, 1);

            scheduler.AddApplication(appId, "queue1", "user1", false);
            NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node);

            try
            {
                scheduler.Handle(updateEvent);
            }
            catch (ArgumentNullException)
            {
                NUnit.Framework.Assert.Fail();
            }
            ApplicationAttemptId attId = ApplicationAttemptId.NewInstance(appId, 1);

            scheduler.AddApplicationAttempt(attId, false, false);
            rm.Stop();
        }
예제 #5
0
 public static void TearDown()
 {
     if (resourceManager != null)
     {
         resourceManager.Stop();
     }
 }
예제 #6
0
 public virtual void TearDown()
 {
     if (resourceManager != null)
     {
         resourceManager.Stop();
     }
 }
예제 #7
0
        /// <exception cref="System.Exception"/>
        public virtual void TestTransitionedToStandbyShouldNotHang()
        {
            configuration.SetBoolean(YarnConfiguration.AutoFailoverEnabled, false);
            Configuration      conf     = new YarnConfiguration(configuration);
            MemoryRMStateStore memStore = new _MemoryRMStateStore_464();

            memStore.Init(conf);
            rm = new _MockRM_472(conf, memStore);
            rm.Init(conf);
            HAServiceProtocol.StateChangeRequestInfo requestInfo = new HAServiceProtocol.StateChangeRequestInfo
                                                                       (HAServiceProtocol.RequestSource.RequestByUser);
            NUnit.Framework.Assert.AreEqual(StateErr, HAServiceProtocol.HAServiceState.Initializing
                                            , rm.adminService.GetServiceStatus().GetState());
            NUnit.Framework.Assert.IsFalse("RM is ready to become active before being started"
                                           , rm.adminService.GetServiceStatus().IsReadyToBecomeActive());
            CheckMonitorHealth();
            rm.Start();
            CheckMonitorHealth();
            CheckStandbyRMFunctionality();
            // 2. Transition to Active.
            rm.adminService.TransitionToActive(requestInfo);
            // 3. Try Transition to standby
            Sharpen.Thread t = new Sharpen.Thread(new _Runnable_498(this));
            // TODO Auto-generated catch block
            t.Start();
            rm.GetRMContext().GetStateStore().UpdateApplicationState(null);
            t.Join();
            // wait for thread to finish
            rm.adminService.TransitionToStandby(requestInfo);
            CheckStandbyRMFunctionality();
            rm.Stop();
        }
예제 #8
0
        public virtual void TestProxyUserConfiguration()
        {
            MockRM rm = null;

            try
            {
                rm = new MockRM(conf);
                rm.Start();
                // wait for web server starting
                Sharpen.Thread.Sleep(10000);
                UserGroupInformation proxyUser = UserGroupInformation.CreateProxyUser(BarUser.GetShortUserName
                                                                                          (), FooUser);
                try
                {
                    ProxyUsers.GetDefaultImpersonationProvider().Authorize(proxyUser, ipAddress);
                }
                catch (AuthorizationException)
                {
                    // Exception is not expected
                    NUnit.Framework.Assert.Fail();
                }
            }
            finally
            {
                if (rm != null)
                {
                    rm.Stop();
                    rm.Close();
                }
            }
        }
예제 #9
0
        /// <exception cref="System.Exception"/>
        public virtual void TestInvalidContainerReleaseRequest()
        {
            MockRM rm = new MockRM(conf);

            try
            {
                rm.Start();
                // Register node1
                MockNM nm1 = rm.RegisterNode("127.0.0.1:1234", 6 * Gb);
                // Submit an application
                RMApp app1 = rm.SubmitApp(1024);
                // kick the scheduling
                nm1.NodeHeartbeat(true);
                RMAppAttempt attempt1 = app1.GetCurrentAppAttempt();
                MockAM       am1      = rm.SendAMLaunched(attempt1.GetAppAttemptId());
                am1.RegisterAppAttempt();
                am1.AddRequests(new string[] { "127.0.0.1" }, Gb, 1, 1);
                AllocateResponse alloc1Response = am1.Schedule();
                // send the request
                // kick the scheduler
                nm1.NodeHeartbeat(true);
                while (alloc1Response.GetAllocatedContainers().Count < 1)
                {
                    Log.Info("Waiting for containers to be created for app 1...");
                    Sharpen.Thread.Sleep(1000);
                    alloc1Response = am1.Schedule();
                }
                NUnit.Framework.Assert.IsTrue(alloc1Response.GetAllocatedContainers().Count > 0);
                RMApp app2 = rm.SubmitApp(1024);
                nm1.NodeHeartbeat(true);
                RMAppAttempt attempt2 = app2.GetCurrentAppAttempt();
                MockAM       am2      = rm.SendAMLaunched(attempt2.GetAppAttemptId());
                am2.RegisterAppAttempt();
                // Now trying to release container allocated for app1 -> appAttempt1.
                ContainerId cId = alloc1Response.GetAllocatedContainers()[0].GetId();
                am2.AddContainerToBeReleased(cId);
                try
                {
                    am2.Schedule();
                    NUnit.Framework.Assert.Fail("Exception was expected!!");
                }
                catch (InvalidContainerReleaseException e)
                {
                    StringBuilder sb = new StringBuilder("Cannot release container : ");
                    sb.Append(cId.ToString());
                    sb.Append(" not belonging to this application attempt : ");
                    sb.Append(attempt2.GetAppAttemptId().ToString());
                    NUnit.Framework.Assert.IsTrue(e.Message.Contains(sb.ToString()));
                }
            }
            finally
            {
                if (rm != null)
                {
                    rm.Stop();
                }
            }
        }
예제 #10
0
        /// <exception cref="System.Exception"/>
        public virtual void TestAppOnMultiNode()
        {
            Logger rootLogger = LogManager.GetRootLogger();

            rootLogger.SetLevel(Level.Debug);
            conf.Set("yarn.scheduler.capacity.node-locality-delay", "-1");
            MockRM rm = new MockRM(conf);

            rm.Start();
            MockNM nm1 = rm.RegisterNode("h1:1234", 5120);
            MockNM nm2 = rm.RegisterNode("h2:5678", 10240);
            RMApp  app = rm.SubmitApp(2000);

            //kick the scheduling
            nm1.NodeHeartbeat(true);
            RMAppAttempt attempt = app.GetCurrentAppAttempt();
            MockAM       am      = rm.SendAMLaunched(attempt.GetAppAttemptId());

            am.RegisterAppAttempt();
            //request for containers
            int request = 13;

            am.Allocate("h1", 1000, request, new AList <ContainerId>());
            //kick the scheduler
            IList <Container> conts = am.Allocate(new AList <ResourceRequest>(), new AList <ContainerId
                                                                                            >()).GetAllocatedContainers();
            int contReceived = conts.Count;

            while (contReceived < 3)
            {
                //only 3 containers are available on node1
                nm1.NodeHeartbeat(true);
                Sharpen.Collections.AddAll(conts, am.Allocate(new AList <ResourceRequest>(), new AList
                                                              <ContainerId>()).GetAllocatedContainers());
                contReceived = conts.Count;
                Log.Info("Got " + contReceived + " containers. Waiting to get " + 3);
                Sharpen.Thread.Sleep(WaitSleepMs);
            }
            NUnit.Framework.Assert.AreEqual(3, conts.Count);
            //send node2 heartbeat
            conts = am.Allocate(new AList <ResourceRequest>(), new AList <ContainerId>()).GetAllocatedContainers
                        ();
            contReceived = conts.Count;
            while (contReceived < 10)
            {
                nm2.NodeHeartbeat(true);
                Sharpen.Collections.AddAll(conts, am.Allocate(new AList <ResourceRequest>(), new AList
                                                              <ContainerId>()).GetAllocatedContainers());
                contReceived = conts.Count;
                Log.Info("Got " + contReceived + " containers. Waiting to get " + 10);
                Sharpen.Thread.Sleep(WaitSleepMs);
            }
            NUnit.Framework.Assert.AreEqual(10, conts.Count);
            am.UnregisterAppAttempt();
            nm1.NodeHeartbeat(attempt.GetAppAttemptId(), 1, ContainerState.Complete);
            am.WaitForState(RMAppAttemptState.Finished);
            rm.Stop();
        }
예제 #11
0
        // Other stuff is verified in the regular web-services related tests
        /// <summary>Test to verify the following RM HA transitions to the following states.</summary>
        /// <remarks>
        /// Test to verify the following RM HA transitions to the following states.
        /// 1. Standby: Should be a no-op
        /// 2. Active: Active services should start
        /// 3. Active: Should be a no-op.
        /// While active, submit a couple of jobs
        /// 4. Standby: Active services should stop
        /// 5. Active: Active services should start
        /// 6. Stop the RM: All services should stop and RM should not be ready to
        /// become Active
        /// </remarks>
        /// <exception cref="System.Exception"/>
        public virtual void TestFailoverAndTransitions()
        {
            configuration.SetBoolean(YarnConfiguration.AutoFailoverEnabled, false);
            Configuration conf = new YarnConfiguration(configuration);

            rm = new MockRM(conf);
            rm.Init(conf);
            HAServiceProtocol.StateChangeRequestInfo requestInfo = new HAServiceProtocol.StateChangeRequestInfo
                                                                       (HAServiceProtocol.RequestSource.RequestByUser);
            NUnit.Framework.Assert.AreEqual(StateErr, HAServiceProtocol.HAServiceState.Initializing
                                            , rm.adminService.GetServiceStatus().GetState());
            NUnit.Framework.Assert.IsFalse("RM is ready to become active before being started"
                                           , rm.adminService.GetServiceStatus().IsReadyToBecomeActive());
            CheckMonitorHealth();
            rm.Start();
            CheckMonitorHealth();
            CheckStandbyRMFunctionality();
            VerifyClusterMetrics(0, 0, 0, 0, 0, 0);
            // 1. Transition to Standby - must be a no-op
            rm.adminService.TransitionToStandby(requestInfo);
            CheckMonitorHealth();
            CheckStandbyRMFunctionality();
            VerifyClusterMetrics(0, 0, 0, 0, 0, 0);
            // 2. Transition to active
            rm.adminService.TransitionToActive(requestInfo);
            CheckMonitorHealth();
            CheckActiveRMFunctionality();
            VerifyClusterMetrics(1, 1, 1, 1, 2048, 1);
            // 3. Transition to active - no-op
            rm.adminService.TransitionToActive(requestInfo);
            CheckMonitorHealth();
            CheckActiveRMFunctionality();
            VerifyClusterMetrics(1, 2, 2, 2, 2048, 2);
            // 4. Transition to standby
            rm.adminService.TransitionToStandby(requestInfo);
            CheckMonitorHealth();
            CheckStandbyRMFunctionality();
            VerifyClusterMetrics(0, 0, 0, 0, 0, 0);
            // 5. Transition to active to check Active->Standby->Active works
            rm.adminService.TransitionToActive(requestInfo);
            CheckMonitorHealth();
            CheckActiveRMFunctionality();
            VerifyClusterMetrics(1, 1, 1, 1, 2048, 1);
            // 6. Stop the RM. All services should stop and RM should not be ready to
            // become active
            rm.Stop();
            NUnit.Framework.Assert.AreEqual(StateErr, HAServiceProtocol.HAServiceState.Stopping
                                            , rm.adminService.GetServiceStatus().GetState());
            NUnit.Framework.Assert.IsFalse("RM is ready to become active even after it is stopped"
                                           , rm.adminService.GetServiceStatus().IsReadyToBecomeActive());
            NUnit.Framework.Assert.IsFalse("Active RM services are started", rm.AreActiveServicesRunning
                                               ());
            CheckMonitorHealth();
        }
예제 #12
0
 public virtual void Teardown()
 {
     if (rm1 != null)
     {
         rm1.Stop();
     }
     if (rm2 != null)
     {
         rm2.Stop();
     }
 }
예제 #13
0
        /// <exception cref="System.Exception"/>
        public virtual void TestAppCleanupWhenRMRestartedBeforeAppFinished()
        {
            conf.SetInt(YarnConfiguration.RmAmMaxAttempts, 1);
            MemoryRMStateStore memStore = new MemoryRMStateStore();

            memStore.Init(conf);
            // start RM
            MockRM rm1 = new MockRM(conf, memStore);

            rm1.Start();
            MockNM nm1 = new MockNM("127.0.0.1:1234", 1024, rm1.GetResourceTrackerService());

            nm1.RegisterNode();
            MockNM nm2 = new MockNM("127.0.0.1:5678", 1024, rm1.GetResourceTrackerService());

            nm2.RegisterNode();
            // create app and launch the AM
            RMApp  app0 = rm1.SubmitApp(200);
            MockAM am0  = LaunchAM(app0, rm1, nm1);
            // alloc another container on nm2
            AllocateResponse allocResponse = am0.Allocate(Arrays.AsList(ResourceRequest.NewInstance
                                                                            (Priority.NewInstance(1), "*", Resource.NewInstance(1024, 0), 1)), null);

            while (null == allocResponse.GetAllocatedContainers() || allocResponse.GetAllocatedContainers
                       ().IsEmpty())
            {
                nm2.NodeHeartbeat(true);
                allocResponse = am0.Allocate(null, null);
                Sharpen.Thread.Sleep(1000);
            }
            // start new RM
            MockRM rm2 = new MockRM(conf, memStore);

            rm2.Start();
            // nm1/nm2 register to rm2, and do a heartbeat
            nm1.SetResourceTrackerService(rm2.GetResourceTrackerService());
            nm1.RegisterNode(Arrays.AsList(NMContainerStatus.NewInstance(ContainerId.NewContainerId
                                                                             (am0.GetApplicationAttemptId(), 1), ContainerState.Complete, Resource.NewInstance
                                                                             (1024, 1), string.Empty, 0, Priority.NewInstance(0), 1234)), Arrays.AsList(app0.
                                                                                                                                                        GetApplicationId()));
            nm2.SetResourceTrackerService(rm2.GetResourceTrackerService());
            nm2.RegisterNode(Arrays.AsList(app0.GetApplicationId()));
            // assert app state has been saved.
            rm2.WaitForState(app0.GetApplicationId(), RMAppState.Failed);
            // wait for application cleanup message received on NM1
            WaitForAppCleanupMessageRecved(nm1, app0.GetApplicationId());
            // wait for application cleanup message received on NM2
            WaitForAppCleanupMessageRecved(nm2, app0.GetApplicationId());
            rm1.Stop();
            rm2.Stop();
        }
예제 #14
0
        public virtual void TestGetNewAppId()
        {
            Logger rootLogger = LogManager.GetRootLogger();

            rootLogger.SetLevel(Level.Debug);
            MockRM rm = new MockRM(conf);

            rm.Start();
            GetNewApplicationResponse resp = rm.GetNewAppId();

            System.Diagnostics.Debug.Assert((resp.GetApplicationId().GetId() != 0));
            System.Diagnostics.Debug.Assert((resp.GetMaximumResourceCapability().GetMemory()
                                             > 0));
            rm.Stop();
        }
예제 #15
0
        public virtual void TestRMDispatcherForHA()
        {
            string errorMessageForEventHandler = "Expect to get the same number of handlers";
            string errorMessageForService      = "Expect to get the same number of services";

            configuration.SetBoolean(YarnConfiguration.AutoFailoverEnabled, false);
            Configuration conf = new YarnConfiguration(configuration);

            rm = new _MockRM_313(conf);
            rm.Init(conf);
            int expectedEventHandlerCount = ((TestRMHA.MyCountingDispatcher)rm.GetRMContext()
                                             .GetDispatcher()).GetEventHandlerCount();
            int expectedServiceCount = rm.GetServices().Count;

            NUnit.Framework.Assert.IsTrue(expectedEventHandlerCount != 0);
            HAServiceProtocol.StateChangeRequestInfo requestInfo = new HAServiceProtocol.StateChangeRequestInfo
                                                                       (HAServiceProtocol.RequestSource.RequestByUser);
            NUnit.Framework.Assert.AreEqual(StateErr, HAServiceProtocol.HAServiceState.Initializing
                                            , rm.adminService.GetServiceStatus().GetState());
            NUnit.Framework.Assert.IsFalse("RM is ready to become active before being started"
                                           , rm.adminService.GetServiceStatus().IsReadyToBecomeActive());
            rm.Start();
            //call transitions to standby and active a couple of times
            rm.adminService.TransitionToStandby(requestInfo);
            rm.adminService.TransitionToActive(requestInfo);
            rm.adminService.TransitionToStandby(requestInfo);
            rm.adminService.TransitionToActive(requestInfo);
            rm.adminService.TransitionToStandby(requestInfo);
            TestRMHA.MyCountingDispatcher dispatcher = (TestRMHA.MyCountingDispatcher)rm.GetRMContext
                                                           ().GetDispatcher();
            NUnit.Framework.Assert.IsTrue(!dispatcher.IsStopped());
            rm.adminService.TransitionToActive(requestInfo);
            NUnit.Framework.Assert.AreEqual(errorMessageForEventHandler, expectedEventHandlerCount
                                            , ((TestRMHA.MyCountingDispatcher)rm.GetRMContext().GetDispatcher()).GetEventHandlerCount
                                                ());
            NUnit.Framework.Assert.AreEqual(errorMessageForService, expectedServiceCount, rm.
                                            GetServices().Count);
            // Keep the dispatcher reference before transitioning to standby
            dispatcher = (TestRMHA.MyCountingDispatcher)rm.GetRMContext().GetDispatcher();
            rm.adminService.TransitionToStandby(requestInfo);
            NUnit.Framework.Assert.AreEqual(errorMessageForEventHandler, expectedEventHandlerCount
                                            , ((TestRMHA.MyCountingDispatcher)rm.GetRMContext().GetDispatcher()).GetEventHandlerCount
                                                ());
            NUnit.Framework.Assert.AreEqual(errorMessageForService, expectedServiceCount, rm.
                                            GetServices().Count);
            NUnit.Framework.Assert.IsTrue(dispatcher.IsStopped());
            rm.Stop();
        }
예제 #16
0
        /// <exception cref="System.Exception"/>
        public virtual void TestMoveSuccessful()
        {
            MockRM rm1 = new MockRM(conf);

            rm1.Start();
            RMApp           app             = rm1.SubmitApp(1024);
            ClientRMService clientRMService = rm1.GetClientRMService();

            // FIFO scheduler does not support moves
            clientRMService.MoveApplicationAcrossQueues(MoveApplicationAcrossQueuesRequest.NewInstance
                                                            (app.GetApplicationId(), "newqueue"));
            RMApp rmApp = rm1.GetRMContext().GetRMApps()[app.GetApplicationId()];

            NUnit.Framework.Assert.AreEqual("newqueue", rmApp.GetQueue());
            rm1.Stop();
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestUsageWithOneAttemptAndOneContainer()
        {
            MockRM rm = new MockRM(conf);

            rm.Start();
            MockNM nm = new MockNM("127.0.0.1:1234", 15120, rm.GetResourceTrackerService());

            nm.RegisterNode();
            RMApp        app0         = rm.SubmitApp(200);
            RMAppMetrics rmAppMetrics = app0.GetRMAppMetrics();

            NUnit.Framework.Assert.IsTrue("Before app submittion, memory seconds should have been 0 but was "
                                          + rmAppMetrics.GetMemorySeconds(), rmAppMetrics.GetMemorySeconds() == 0);
            NUnit.Framework.Assert.IsTrue("Before app submission, vcore seconds should have been 0 but was "
                                          + rmAppMetrics.GetVcoreSeconds(), rmAppMetrics.GetVcoreSeconds() == 0);
            RMAppAttempt attempt0 = app0.GetCurrentAppAttempt();

            nm.NodeHeartbeat(true);
            MockAM am0 = rm.SendAMLaunched(attempt0.GetAppAttemptId());

            am0.RegisterAppAttempt();
            RMContainer rmContainer = rm.GetResourceScheduler().GetRMContainer(attempt0.GetMasterContainer
                                                                                   ().GetId());
            // Allow metrics to accumulate.
            int sleepInterval       = 1000;
            int cumulativeSleepTime = 0;

            while (rmAppMetrics.GetMemorySeconds() <= 0 && cumulativeSleepTime < 5000)
            {
                Sharpen.Thread.Sleep(sleepInterval);
                cumulativeSleepTime += sleepInterval;
            }
            rmAppMetrics = app0.GetRMAppMetrics();
            NUnit.Framework.Assert.IsTrue("While app is running, memory seconds should be >0 but is "
                                          + rmAppMetrics.GetMemorySeconds(), rmAppMetrics.GetMemorySeconds() > 0);
            NUnit.Framework.Assert.IsTrue("While app is running, vcore seconds should be >0 but is "
                                          + rmAppMetrics.GetVcoreSeconds(), rmAppMetrics.GetVcoreSeconds() > 0);
            MockRM.FinishAMAndVerifyAppState(app0, rm, nm, am0);
            AggregateAppResourceUsage ru = CalculateContainerResourceMetrics(rmContainer);

            rmAppMetrics = app0.GetRMAppMetrics();
            NUnit.Framework.Assert.AreEqual("Unexcpected MemorySeconds value", ru.GetMemorySeconds
                                                (), rmAppMetrics.GetMemorySeconds());
            NUnit.Framework.Assert.AreEqual("Unexpected VcoreSeconds value", ru.GetVcoreSeconds
                                                (), rmAppMetrics.GetVcoreSeconds());
            rm.Stop();
        }
예제 #18
0
        /// <exception cref="System.Exception"/>
        public virtual void TestResourceTypes()
        {
            Dictionary <YarnConfiguration, EnumSet <YarnServiceProtos.SchedulerResourceTypes> >
            driver = new Dictionary <YarnConfiguration, EnumSet <YarnServiceProtos.SchedulerResourceTypes
                                                                 > >();
            CapacitySchedulerConfiguration csconf = new CapacitySchedulerConfiguration();

            csconf.SetResourceComparator(typeof(DominantResourceCalculator));
            YarnConfiguration testCapacityDRConf = new YarnConfiguration(csconf);

            testCapacityDRConf.SetClass(YarnConfiguration.RmScheduler, typeof(CapacityScheduler
                                                                              ), typeof(ResourceScheduler));
            YarnConfiguration testCapacityDefConf = new YarnConfiguration();

            testCapacityDefConf.SetClass(YarnConfiguration.RmScheduler, typeof(CapacityScheduler
                                                                               ), typeof(ResourceScheduler));
            YarnConfiguration testFairDefConf = new YarnConfiguration();

            testFairDefConf.SetClass(YarnConfiguration.RmScheduler, typeof(FairScheduler), typeof(
                                         ResourceScheduler));
            driver[conf] = EnumSet.Of(YarnServiceProtos.SchedulerResourceTypes.Memory);
            driver[testCapacityDRConf] = EnumSet.Of(YarnServiceProtos.SchedulerResourceTypes.
                                                    Cpu, YarnServiceProtos.SchedulerResourceTypes.Memory);
            driver[testCapacityDefConf] = EnumSet.Of(YarnServiceProtos.SchedulerResourceTypes
                                                     .Memory);
            driver[testFairDefConf] = EnumSet.Of(YarnServiceProtos.SchedulerResourceTypes.Memory
                                                 , YarnServiceProtos.SchedulerResourceTypes.Cpu);
            foreach (KeyValuePair <YarnConfiguration, EnumSet <YarnServiceProtos.SchedulerResourceTypes
                                                               > > entry in driver)
            {
                EnumSet <YarnServiceProtos.SchedulerResourceTypes> expectedValue = entry.Value;
                MockRM rm = new MockRM(entry.Key);
                rm.Start();
                MockNM nm1  = rm.RegisterNode("127.0.0.1:1234", 6 * Gb);
                RMApp  app1 = rm.SubmitApp(2048);
                nm1.NodeHeartbeat(true);
                RMAppAttempt attempt1 = app1.GetCurrentAppAttempt();
                MockAM       am1      = rm.SendAMLaunched(attempt1.GetAppAttemptId());
                RegisterApplicationMasterResponse resp = am1.RegisterAppAttempt();
                EnumSet <YarnServiceProtos.SchedulerResourceTypes> types = resp.GetSchedulerResourceTypes
                                                                               ();
                Log.Info("types = " + types.ToString());
                NUnit.Framework.Assert.AreEqual(expectedValue, types);
                rm.Stop();
            }
        }
예제 #19
0
        public virtual void TearDown()
        {
            if (hostFile != null && hostFile.Exists())
            {
                hostFile.Delete();
            }
            ClusterMetrics.Destroy();
            if (rm != null)
            {
                rm.Stop();
            }
            MetricsSystem ms = DefaultMetricsSystem.Instance();

            if (ms.GetSource("ClusterMetrics") != null)
            {
                DefaultMetricsSystem.Shutdown();
            }
        }
예제 #20
0
        /// <exception cref="System.Exception"/>
        public virtual void TestFinishApplicationMasterBeforeRegistering()
        {
            MockRM rm = new MockRM(conf);

            try
            {
                rm.Start();
                // Register node1
                MockNM nm1 = rm.RegisterNode("127.0.0.1:1234", 6 * Gb);
                // Submit an application
                RMApp  app1 = rm.SubmitApp(2048);
                MockAM am1  = MockRM.LaunchAM(app1, rm, nm1);
                FinishApplicationMasterRequest req = FinishApplicationMasterRequest.NewInstance(FinalApplicationStatus
                                                                                                .Failed, string.Empty, string.Empty);
                try
                {
                    am1.UnregisterAppAttempt(req, false);
                    NUnit.Framework.Assert.Fail("ApplicationMasterNotRegisteredException should be thrown"
                                                );
                }
                catch (ApplicationMasterNotRegisteredException e)
                {
                    NUnit.Framework.Assert.IsNotNull(e);
                    NUnit.Framework.Assert.IsNotNull(e.Message);
                    NUnit.Framework.Assert.IsTrue(e.Message.Contains("Application Master is trying to unregister before registering for:"
                                                                     ));
                }
                catch (Exception)
                {
                    NUnit.Framework.Assert.Fail("ApplicationMasterNotRegisteredException should be thrown"
                                                );
                }
                am1.RegisterAppAttempt();
                am1.UnregisterAppAttempt(req, false);
                am1.WaitForState(RMAppAttemptState.Finishing);
            }
            finally
            {
                if (rm != null)
                {
                    rm.Stop();
                }
            }
        }
예제 #21
0
        /// <exception cref="System.Exception"/>
        public virtual void TestAppWithNoContainers()
        {
            Logger rootLogger = LogManager.GetRootLogger();

            rootLogger.SetLevel(Level.Debug);
            MockRM rm = new MockRM(conf);

            rm.Start();
            MockNM nm1 = rm.RegisterNode("h1:1234", 5120);
            RMApp  app = rm.SubmitApp(2000);

            //kick the scheduling
            nm1.NodeHeartbeat(true);
            RMAppAttempt attempt = app.GetCurrentAppAttempt();
            MockAM       am      = rm.SendAMLaunched(attempt.GetAppAttemptId());

            am.RegisterAppAttempt();
            am.UnregisterAppAttempt();
            nm1.NodeHeartbeat(attempt.GetAppAttemptId(), 1, ContainerState.Complete);
            am.WaitForState(RMAppAttemptState.Finished);
            rm.Stop();
        }
예제 #22
0
        /// <exception cref="System.Exception"/>
        public virtual void TestRMIdentifierOnContainerAllocation()
        {
            MockRM rm = new MockRM(conf);

            rm.Start();
            // Register node1
            MockNM nm1 = rm.RegisterNode("127.0.0.1:1234", 6 * Gb);
            // Submit an application
            RMApp app1 = rm.SubmitApp(2048);

            // kick the scheduling
            nm1.NodeHeartbeat(true);
            RMAppAttempt attempt1 = app1.GetCurrentAppAttempt();
            MockAM       am1      = rm.SendAMLaunched(attempt1.GetAppAttemptId());

            am1.RegisterAppAttempt();
            am1.AddRequests(new string[] { "127.0.0.1" }, Gb, 1, 1);
            AllocateResponse alloc1Response = am1.Schedule();

            // send the request
            // kick the scheduler
            nm1.NodeHeartbeat(true);
            while (alloc1Response.GetAllocatedContainers().Count < 1)
            {
                Log.Info("Waiting for containers to be created for app 1...");
                Sharpen.Thread.Sleep(1000);
                alloc1Response = am1.Schedule();
            }
            // assert RMIdentifer is set properly in allocated containers
            Container allocatedContainer     = alloc1Response.GetAllocatedContainers()[0];
            ContainerTokenIdentifier tokenId = BuilderUtils.NewContainerTokenIdentifier(allocatedContainer
                                                                                        .GetContainerToken());

            NUnit.Framework.Assert.AreEqual(MockRM.GetClusterTimeStamp(), tokenId.GetRMIdentifier
                                                ());
            rm.Stop();
        }
예제 #23
0
        public virtual void TestAllocateContainerOnNodeWithoutOffSwitchSpecified()
        {
            Logger rootLogger = LogManager.GetRootLogger();

            rootLogger.SetLevel(Level.Debug);
            MockRM rm = new MockRM(conf);

            rm.Start();
            MockNM nm1  = rm.RegisterNode("127.0.0.1:1234", 6 * Gb);
            RMApp  app1 = rm.SubmitApp(2048);

            // kick the scheduling, 2 GB given to AM1, remaining 4GB on nm1
            nm1.NodeHeartbeat(true);
            RMAppAttempt attempt1 = app1.GetCurrentAppAttempt();
            MockAM       am1      = rm.SendAMLaunched(attempt1.GetAppAttemptId());

            am1.RegisterAppAttempt();
            // add request for containers
            IList <ResourceRequest> requests = new AList <ResourceRequest>();

            requests.AddItem(am1.CreateResourceReq("127.0.0.1", 1 * Gb, 1, 1));
            requests.AddItem(am1.CreateResourceReq("/default-rack", 1 * Gb, 1, 1));
            am1.Allocate(requests, null);
            // send the request
            try
            {
                // kick the schedule
                nm1.NodeHeartbeat(true);
            }
            catch (ArgumentNullException)
            {
                NUnit.Framework.Assert.Fail("NPE when allocating container on node but " + "forget to set off-switch request should be handled"
                                            );
            }
            rm.Stop();
        }
예제 #24
0
        /// <exception cref="System.Exception"/>
        public virtual void TestActivatingApplicationAfterAddingNM()
        {
            MockRM rm1 = new MockRM(conf);

            // start like normal because state is empty
            rm1.Start();
            // app that gets launched
            RMApp app1 = rm1.SubmitApp(200);
            // app that does not get launched
            RMApp app2 = rm1.SubmitApp(200);
            // app1 and app2 should be scheduled, but because no resource is available,
            // they are not activated.
            RMAppAttempt         attempt1   = app1.GetCurrentAppAttempt();
            ApplicationAttemptId attemptId1 = attempt1.GetAppAttemptId();

            rm1.WaitForState(attemptId1, RMAppAttemptState.Scheduled);
            RMAppAttempt         attempt2   = app2.GetCurrentAppAttempt();
            ApplicationAttemptId attemptId2 = attempt2.GetAppAttemptId();

            rm1.WaitForState(attemptId2, RMAppAttemptState.Scheduled);
            MockNM nm1 = new MockNM("h1:1234", 15120, rm1.GetResourceTrackerService());
            MockNM nm2 = new MockNM("h2:5678", 15120, rm1.GetResourceTrackerService());

            nm1.RegisterNode();
            nm2.RegisterNode();
            //kick the scheduling
            nm1.NodeHeartbeat(true);
            // app1 should be allocated now
            rm1.WaitForState(attemptId1, RMAppAttemptState.Allocated);
            rm1.WaitForState(attemptId2, RMAppAttemptState.Scheduled);
            nm2.NodeHeartbeat(true);
            // app2 should be allocated now
            rm1.WaitForState(attemptId1, RMAppAttemptState.Allocated);
            rm1.WaitForState(attemptId2, RMAppAttemptState.Allocated);
            rm1.Stop();
        }
예제 #25
0
        public virtual void TestResourceOverCommit()
        {
            MockRM rm = new MockRM(conf);

            rm.Start();
            MockNM nm1  = rm.RegisterNode("127.0.0.1:1234", 4 * Gb);
            RMApp  app1 = rm.SubmitApp(2048);

            // kick the scheduling, 2 GB given to AM1, remaining 2GB on nm1
            nm1.NodeHeartbeat(true);
            RMAppAttempt attempt1 = app1.GetCurrentAppAttempt();
            MockAM       am1      = rm.SendAMLaunched(attempt1.GetAppAttemptId());

            am1.RegisterAppAttempt();
            SchedulerNodeReport report_nm1 = rm.GetResourceScheduler().GetNodeReport(nm1.GetNodeId
                                                                                         ());

            // check node report, 2 GB used and 2 GB available
            NUnit.Framework.Assert.AreEqual(2 * Gb, report_nm1.GetUsedResource().GetMemory());
            NUnit.Framework.Assert.AreEqual(2 * Gb, report_nm1.GetAvailableResource().GetMemory
                                                ());
            // add request for containers
            am1.AddRequests(new string[] { "127.0.0.1", "127.0.0.2" }, 2 * Gb, 1, 1);
            AllocateResponse alloc1Response = am1.Schedule();

            // send the request
            // kick the scheduler, 2 GB given to AM1, resource remaining 0
            nm1.NodeHeartbeat(true);
            while (alloc1Response.GetAllocatedContainers().Count < 1)
            {
                Log.Info("Waiting for containers to be created for app 1...");
                Sharpen.Thread.Sleep(1000);
                alloc1Response = am1.Schedule();
            }
            IList <Container> allocated1 = alloc1Response.GetAllocatedContainers();

            NUnit.Framework.Assert.AreEqual(1, allocated1.Count);
            NUnit.Framework.Assert.AreEqual(2 * Gb, allocated1[0].GetResource().GetMemory());
            NUnit.Framework.Assert.AreEqual(nm1.GetNodeId(), allocated1[0].GetNodeId());
            report_nm1 = rm.GetResourceScheduler().GetNodeReport(nm1.GetNodeId());
            // check node report, 4 GB used and 0 GB available
            NUnit.Framework.Assert.AreEqual(0, report_nm1.GetAvailableResource().GetMemory());
            NUnit.Framework.Assert.AreEqual(4 * Gb, report_nm1.GetUsedResource().GetMemory());
            // check container is assigned with 2 GB.
            Container c1 = allocated1[0];

            NUnit.Framework.Assert.AreEqual(2 * Gb, c1.GetResource().GetMemory());
            // update node resource to 2 GB, so resource is over-consumed.
            IDictionary <NodeId, ResourceOption> nodeResourceMap = new Dictionary <NodeId, ResourceOption
                                                                                   >();

            nodeResourceMap[nm1.GetNodeId()] = ResourceOption.NewInstance(Org.Apache.Hadoop.Yarn.Api.Records.Resource
                                                                          .NewInstance(2 * Gb, 1), -1);
            UpdateNodeResourceRequest request = UpdateNodeResourceRequest.NewInstance(nodeResourceMap
                                                                                      );
            AdminService @as = rm.adminService;

            @as.UpdateNodeResource(request);
            // Now, the used resource is still 4 GB, and available resource is minus value.
            report_nm1 = rm.GetResourceScheduler().GetNodeReport(nm1.GetNodeId());
            NUnit.Framework.Assert.AreEqual(4 * Gb, report_nm1.GetUsedResource().GetMemory());
            NUnit.Framework.Assert.AreEqual(-2 * Gb, report_nm1.GetAvailableResource().GetMemory
                                                ());
            // Check container can complete successfully in case of resource over-commitment.
            ContainerStatus containerStatus = BuilderUtils.NewContainerStatus(c1.GetId(), ContainerState
                                                                              .Complete, string.Empty, 0);

            nm1.ContainerStatus(containerStatus);
            int waitCount = 0;

            while (attempt1.GetJustFinishedContainers().Count < 1 && waitCount++ != 20)
            {
                Log.Info("Waiting for containers to be finished for app 1... Tried " + waitCount
                         + " times already..");
                Sharpen.Thread.Sleep(100);
            }
            NUnit.Framework.Assert.AreEqual(1, attempt1.GetJustFinishedContainers().Count);
            NUnit.Framework.Assert.AreEqual(1, am1.Schedule().GetCompletedContainersStatuses(
                                                ).Count);
            report_nm1 = rm.GetResourceScheduler().GetNodeReport(nm1.GetNodeId());
            NUnit.Framework.Assert.AreEqual(2 * Gb, report_nm1.GetUsedResource().GetMemory());
            // As container return 2 GB back, the available resource becomes 0 again.
            NUnit.Framework.Assert.AreEqual(0 * Gb, report_nm1.GetAvailableResource().GetMemory
                                                ());
            rm.Stop();
        }
        /// <exception cref="System.Exception"/>
        private void AmRestartTests(bool keepRunningContainers)
        {
            MockRM rm = new MockRM(conf);

            rm.Start();
            RMApp app = rm.SubmitApp(200, "name", "user", new Dictionary <ApplicationAccessType
                                                                          , string>(), false, "default", -1, null, "MAPREDUCE", false, keepRunningContainers
                                     );
            MockNM nm = new MockNM("127.0.0.1:1234", 10240, rm.GetResourceTrackerService());

            nm.RegisterNode();
            MockAM am0           = MockRM.LaunchAndRegisterAM(app, rm, nm);
            int    NumContainers = 1;

            // allocate NUM_CONTAINERS containers
            am0.Allocate("127.0.0.1", 1024, NumContainers, new AList <ContainerId>());
            nm.NodeHeartbeat(true);
            // wait for containers to be allocated.
            IList <Container> containers = am0.Allocate(new AList <ResourceRequest>(), new AList
                                                        <ContainerId>()).GetAllocatedContainers();

            while (containers.Count != NumContainers)
            {
                nm.NodeHeartbeat(true);
                Sharpen.Collections.AddAll(containers, am0.Allocate(new AList <ResourceRequest>(),
                                                                    new AList <ContainerId>()).GetAllocatedContainers());
                Sharpen.Thread.Sleep(200);
            }
            // launch the 2nd container.
            ContainerId containerId2 = ContainerId.NewContainerId(am0.GetApplicationAttemptId
                                                                      (), 2);

            nm.NodeHeartbeat(am0.GetApplicationAttemptId(), containerId2.GetContainerId(), ContainerState
                             .Running);
            rm.WaitForState(nm, containerId2, RMContainerState.Running);
            // Capture the containers here so the metrics can be calculated after the
            // app has completed.
            ICollection <RMContainer> rmContainers = rm.scheduler.GetSchedulerAppInfo(am0.GetApplicationAttemptId
                                                                                          ()).GetLiveContainers();
            // fail the first app attempt by sending CONTAINER_FINISHED event without
            // registering.
            ContainerId amContainerId = app.GetCurrentAppAttempt().GetMasterContainer().GetId
                                            ();

            nm.NodeHeartbeat(am0.GetApplicationAttemptId(), amContainerId.GetContainerId(), ContainerState
                             .Complete);
            am0.WaitForState(RMAppAttemptState.Failed);
            long memorySeconds = 0;
            long vcoreSeconds  = 0;

            // Calculate container usage metrics for first attempt.
            if (keepRunningContainers)
            {
                // Only calculate the usage for the one container that has completed.
                foreach (RMContainer c in rmContainers)
                {
                    if (c.GetContainerId().Equals(amContainerId))
                    {
                        AggregateAppResourceUsage ru = CalculateContainerResourceMetrics(c);
                        memorySeconds += ru.GetMemorySeconds();
                        vcoreSeconds  += ru.GetVcoreSeconds();
                    }
                    else
                    {
                        // The remaining container should be RUNNING.
                        NUnit.Framework.Assert.IsTrue("After first attempt failed, remaining container "
                                                      + "should still be running. ", c.GetContainerState().Equals(ContainerState.Running
                                                                                                                  ));
                    }
                }
            }
            else
            {
                // If keepRunningContainers is false, all live containers should now
                // be completed. Calculate the resource usage metrics for all of them.
                foreach (RMContainer c in rmContainers)
                {
                    AggregateAppResourceUsage ru = CalculateContainerResourceMetrics(c);
                    memorySeconds += ru.GetMemorySeconds();
                    vcoreSeconds  += ru.GetVcoreSeconds();
                }
            }
            // wait for app to start a new attempt.
            rm.WaitForState(app.GetApplicationId(), RMAppState.Accepted);
            // assert this is a new AM.
            RMAppAttempt attempt2 = app.GetCurrentAppAttempt();

            NUnit.Framework.Assert.IsFalse(attempt2.GetAppAttemptId().Equals(am0.GetApplicationAttemptId
                                                                                 ()));
            // launch the new AM
            nm.NodeHeartbeat(true);
            MockAM am1 = rm.SendAMLaunched(attempt2.GetAppAttemptId());

            am1.RegisterAppAttempt();
            // allocate NUM_CONTAINERS containers
            am1.Allocate("127.0.0.1", 1024, NumContainers, new AList <ContainerId>());
            nm.NodeHeartbeat(true);
            // wait for containers to be allocated.
            containers = am1.Allocate(new AList <ResourceRequest>(), new AList <ContainerId>())
                         .GetAllocatedContainers();
            while (containers.Count != NumContainers)
            {
                nm.NodeHeartbeat(true);
                Sharpen.Collections.AddAll(containers, am1.Allocate(new AList <ResourceRequest>(),
                                                                    new AList <ContainerId>()).GetAllocatedContainers());
                Sharpen.Thread.Sleep(200);
            }
            rm.WaitForState(app.GetApplicationId(), RMAppState.Running);
            // Capture running containers for later use by metrics calculations.
            rmContainers = rm.scheduler.GetSchedulerAppInfo(attempt2.GetAppAttemptId()).GetLiveContainers
                               ();
            // complete container by sending the container complete event which has
            // earlier attempt's attemptId
            amContainerId = app.GetCurrentAppAttempt().GetMasterContainer().GetId();
            nm.NodeHeartbeat(am0.GetApplicationAttemptId(), amContainerId.GetContainerId(), ContainerState
                             .Complete);
            MockRM.FinishAMAndVerifyAppState(app, rm, nm, am1);
            // Calculate container usage metrics for second attempt.
            foreach (RMContainer c_1 in rmContainers)
            {
                AggregateAppResourceUsage ru = CalculateContainerResourceMetrics(c_1);
                memorySeconds += ru.GetMemorySeconds();
                vcoreSeconds  += ru.GetVcoreSeconds();
            }
            RMAppMetrics rmAppMetrics = app.GetRMAppMetrics();

            NUnit.Framework.Assert.AreEqual("Unexcpected MemorySeconds value", memorySeconds,
                                            rmAppMetrics.GetMemorySeconds());
            NUnit.Framework.Assert.AreEqual("Unexpected VcoreSeconds value", vcoreSeconds, rmAppMetrics
                                            .GetVcoreSeconds());
            rm.Stop();
            return;
        }
        /// <exception cref="System.Exception"/>
        public virtual void TestUsageWithMultipleContainersAndRMRestart()
        {
            // Set max attempts to 1 so that when the first attempt fails, the app
            // won't try to start a new one.
            conf.SetInt(YarnConfiguration.RmAmMaxAttempts, 1);
            conf.SetBoolean(YarnConfiguration.RecoveryEnabled, true);
            conf.SetBoolean(YarnConfiguration.RmWorkPreservingRecoveryEnabled, false);
            MemoryRMStateStore memStore = new MemoryRMStateStore();

            memStore.Init(conf);
            MockRM rm0 = new MockRM(conf, memStore);

            rm0.Start();
            MockNM nm = new MockNM("127.0.0.1:1234", 65536, rm0.GetResourceTrackerService());

            nm.RegisterNode();
            RMApp app0 = rm0.SubmitApp(200);

            rm0.WaitForState(app0.GetApplicationId(), RMAppState.Accepted);
            RMAppAttempt         attempt0   = app0.GetCurrentAppAttempt();
            ApplicationAttemptId attemptId0 = attempt0.GetAppAttemptId();

            rm0.WaitForState(attemptId0, RMAppAttemptState.Scheduled);
            nm.NodeHeartbeat(true);
            rm0.WaitForState(attemptId0, RMAppAttemptState.Allocated);
            MockAM am0 = rm0.SendAMLaunched(attempt0.GetAppAttemptId());

            am0.RegisterAppAttempt();
            int NumContainers = 2;

            am0.Allocate("127.0.0.1", 1000, NumContainers, new AList <ContainerId>());
            nm.NodeHeartbeat(true);
            IList <Container> conts = am0.Allocate(new AList <ResourceRequest>(), new AList <ContainerId
                                                                                             >()).GetAllocatedContainers();

            while (conts.Count != NumContainers)
            {
                nm.NodeHeartbeat(true);
                Sharpen.Collections.AddAll(conts, am0.Allocate(new AList <ResourceRequest>(), new
                                                               AList <ContainerId>()).GetAllocatedContainers());
                Sharpen.Thread.Sleep(500);
            }
            // launch the 2nd and 3rd containers.
            foreach (Container c in conts)
            {
                nm.NodeHeartbeat(attempt0.GetAppAttemptId(), c.GetId().GetContainerId(), ContainerState
                                 .Running);
                rm0.WaitForState(nm, c.GetId(), RMContainerState.Running);
            }
            // Get the RMContainers for all of the live containers, to be used later
            // for metrics calculations and comparisons.
            ICollection <RMContainer> rmContainers = rm0.scheduler.GetSchedulerAppInfo(attempt0
                                                                                       .GetAppAttemptId()).GetLiveContainers();
            // Allow metrics to accumulate.
            int sleepInterval       = 1000;
            int cumulativeSleepTime = 0;

            while (app0.GetRMAppMetrics().GetMemorySeconds() <= 0 && cumulativeSleepTime < 5000
                   )
            {
                Sharpen.Thread.Sleep(sleepInterval);
                cumulativeSleepTime += sleepInterval;
            }
            // Stop all non-AM containers
            foreach (Container c_1 in conts)
            {
                if (c_1.GetId().GetContainerId() == 1)
                {
                    continue;
                }
                nm.NodeHeartbeat(attempt0.GetAppAttemptId(), c_1.GetId().GetContainerId(), ContainerState
                                 .Complete);
                rm0.WaitForState(nm, c_1.GetId(), RMContainerState.Completed);
            }
            // After all other containers have completed, manually complete the master
            // container in order to trigger a save to the state store of the resource
            // usage metrics. This will cause the attempt to fail, and, since the max
            // attempt retries is 1, the app will also fail. This is intentional so
            // that all containers will complete prior to saving.
            ContainerId cId = ContainerId.NewContainerId(attempt0.GetAppAttemptId(), 1);

            nm.NodeHeartbeat(attempt0.GetAppAttemptId(), cId.GetContainerId(), ContainerState
                             .Complete);
            rm0.WaitForState(nm, cId, RMContainerState.Completed);
            // Check that the container metrics match those from the app usage report.
            long memorySeconds = 0;
            long vcoreSeconds  = 0;

            foreach (RMContainer c_2 in rmContainers)
            {
                AggregateAppResourceUsage ru = CalculateContainerResourceMetrics(c_2);
                memorySeconds += ru.GetMemorySeconds();
                vcoreSeconds  += ru.GetVcoreSeconds();
            }
            RMAppMetrics metricsBefore = app0.GetRMAppMetrics();

            NUnit.Framework.Assert.AreEqual("Unexcpected MemorySeconds value", memorySeconds,
                                            metricsBefore.GetMemorySeconds());
            NUnit.Framework.Assert.AreEqual("Unexpected VcoreSeconds value", vcoreSeconds, metricsBefore
                                            .GetVcoreSeconds());
            // create new RM to represent RM restart. Load up the state store.
            MockRM rm1 = new MockRM(conf, memStore);

            rm1.Start();
            RMApp app0After = rm1.GetRMContext().GetRMApps()[app0.GetApplicationId()];
            // Compare container resource usage metrics from before and after restart.
            RMAppMetrics metricsAfter = app0After.GetRMAppMetrics();

            NUnit.Framework.Assert.AreEqual("Vcore seconds were not the same after RM Restart"
                                            , metricsBefore.GetVcoreSeconds(), metricsAfter.GetVcoreSeconds());
            NUnit.Framework.Assert.AreEqual("Memory seconds were not the same after RM Restart"
                                            , metricsBefore.GetMemorySeconds(), metricsAfter.GetMemorySeconds());
            rm0.Stop();
            rm0.Close();
            rm1.Stop();
            rm1.Close();
        }
예제 #28
0
        // The test verifies processing of NMContainerStatuses which are sent during
        // NM registration.
        // 1. Start the cluster-RM,NM,Submit app with 1024MB,Launch & register AM
        // 2. AM sends ResourceRequest for 1 container with memory 2048MB.
        // 3. Verify for number of container allocated by RM
        // 4. Verify Memory Usage by cluster, it should be 3072. AM memory + requested
        // memory. 1024 + 2048=3072
        // 5. Re-register NM by sending completed container status
        // 6. Verify for Memory Used, it should be 1024
        // 7. Send AM heatbeat to RM. Allocated response should contain completed
        // container.
        /// <exception cref="System.Exception"/>
        public virtual void TestProcessingNMContainerStatusesOnNMRestart()
        {
            conf.SetInt(YarnConfiguration.RmAmMaxAttempts, 1);
            MemoryRMStateStore memStore = new MemoryRMStateStore();

            memStore.Init(conf);
            // 1. Start the cluster-RM,NM,Submit app with 1024MB,Launch & register AM
            MockRM rm1 = new MockRM(conf, memStore);

            rm1.Start();
            int    nmMemory        = 8192;
            int    amMemory        = 1024;
            int    containerMemory = 2048;
            MockNM nm1             = new MockNM("127.0.0.1:1234", nmMemory, rm1.GetResourceTrackerService
                                                    ());

            nm1.RegisterNode();
            RMApp  app0 = rm1.SubmitApp(amMemory);
            MockAM am0  = MockRM.LaunchAndRegisterAM(app0, rm1, nm1);
            // 2. AM sends ResourceRequest for 1 container with memory 2048MB.
            int noOfContainers = 1;
            IList <Container> allocateContainers = am0.AllocateAndWaitForContainers(noOfContainers
                                                                                    , containerMemory, nm1);

            // 3. Verify for number of container allocated by RM
            NUnit.Framework.Assert.AreEqual(noOfContainers, allocateContainers.Count);
            Container container = allocateContainers[0];

            nm1.NodeHeartbeat(am0.GetApplicationAttemptId(), 1, ContainerState.Running);
            nm1.NodeHeartbeat(am0.GetApplicationAttemptId(), container.GetId().GetContainerId
                                  (), ContainerState.Running);
            rm1.WaitForState(app0.GetApplicationId(), RMAppState.Running);
            // 4. Verify Memory Usage by cluster, it should be 3072. AM memory +
            // requested memory. 1024 + 2048=3072
            ResourceScheduler rs = rm1.GetRMContext().GetScheduler();
            int allocatedMB      = rs.GetRootQueueMetrics().GetAllocatedMB();

            NUnit.Framework.Assert.AreEqual(amMemory + containerMemory, allocatedMB);
            // 5. Re-register NM by sending completed container status
            IList <NMContainerStatus> nMContainerStatusForApp = CreateNMContainerStatusForApp(
                am0);

            nm1.RegisterNode(nMContainerStatusForApp, Arrays.AsList(app0.GetApplicationId()));
            WaitForClusterMemory(nm1, rs, amMemory);
            // 6. Verify for Memory Used, it should be 1024
            NUnit.Framework.Assert.AreEqual(amMemory, rs.GetRootQueueMetrics().GetAllocatedMB
                                                ());
            // 7. Send AM heatbeat to RM. Allocated response should contain completed
            // container
            AllocateRequest req = AllocateRequest.NewInstance(0, 0F, new AList <ResourceRequest
                                                                                >(), new AList <ContainerId>(), null);
            AllocateResponse        allocate = am0.Allocate(req);
            IList <ContainerStatus> completedContainersStatuses = allocate.GetCompletedContainersStatuses
                                                                      ();

            NUnit.Framework.Assert.AreEqual(noOfContainers, completedContainersStatuses.Count
                                            );
            // Application clean up should happen Cluster memory used is 0
            nm1.NodeHeartbeat(am0.GetApplicationAttemptId(), 1, ContainerState.Complete);
            WaitForClusterMemory(nm1, rs, 0);
            rm1.Stop();
        }
예제 #29
0
        /// <exception cref="System.Exception"/>
        public virtual void TestHeadroom()
        {
            Configuration conf = new Configuration();

            conf.SetClass(YarnConfiguration.RmScheduler, typeof(FifoScheduler), typeof(ResourceScheduler
                                                                                       ));
            MockRM rm = new MockRM(conf);

            rm.Start();
            FifoScheduler fs = (FifoScheduler)rm.GetResourceScheduler();
            // Add a node
            RMNode n1 = MockNodes.NewNodeInfo(0, MockNodes.NewResource(4 * Gb), 1, "127.0.0.2"
                                              );

            fs.Handle(new NodeAddedSchedulerEvent(n1));
            // Add two applications
            ApplicationId        appId1        = BuilderUtils.NewApplicationId(100, 1);
            ApplicationAttemptId appAttemptId1 = BuilderUtils.NewApplicationAttemptId(appId1,
                                                                                      1);

            CreateMockRMApp(appAttemptId1, rm.GetRMContext());
            SchedulerEvent appEvent = new AppAddedSchedulerEvent(appId1, "queue", "user");

            fs.Handle(appEvent);
            SchedulerEvent attemptEvent = new AppAttemptAddedSchedulerEvent(appAttemptId1, false
                                                                            );

            fs.Handle(attemptEvent);
            ApplicationId        appId2        = BuilderUtils.NewApplicationId(200, 2);
            ApplicationAttemptId appAttemptId2 = BuilderUtils.NewApplicationAttemptId(appId2,
                                                                                      1);

            CreateMockRMApp(appAttemptId2, rm.GetRMContext());
            SchedulerEvent appEvent2 = new AppAddedSchedulerEvent(appId2, "queue", "user");

            fs.Handle(appEvent2);
            SchedulerEvent attemptEvent2 = new AppAttemptAddedSchedulerEvent(appAttemptId2, false
                                                                             );

            fs.Handle(attemptEvent2);
            IList <ContainerId>     emptyId  = new AList <ContainerId>();
            IList <ResourceRequest> emptyAsk = new AList <ResourceRequest>();
            // Set up resource requests
            // Ask for a 1 GB container for app 1
            IList <ResourceRequest> ask1 = new AList <ResourceRequest>();

            ask1.AddItem(BuilderUtils.NewResourceRequest(BuilderUtils.NewPriority(0), ResourceRequest
                                                         .Any, BuilderUtils.NewResource(Gb, 1), 1));
            fs.Allocate(appAttemptId1, ask1, emptyId, null, null);
            // Ask for a 2 GB container for app 2
            IList <ResourceRequest> ask2 = new AList <ResourceRequest>();

            ask2.AddItem(BuilderUtils.NewResourceRequest(BuilderUtils.NewPriority(0), ResourceRequest
                                                         .Any, BuilderUtils.NewResource(2 * Gb, 1), 1));
            fs.Allocate(appAttemptId2, ask2, emptyId, null, null);
            // Trigger container assignment
            fs.Handle(new NodeUpdateSchedulerEvent(n1));
            // Get the allocation for the applications and verify headroom
            Allocation allocation1 = fs.Allocate(appAttemptId1, emptyAsk, emptyId, null, null
                                                 );

            NUnit.Framework.Assert.AreEqual("Allocation headroom", 1 * Gb, allocation1.GetResourceLimit
                                                ().GetMemory());
            Allocation allocation2 = fs.Allocate(appAttemptId2, emptyAsk, emptyId, null, null
                                                 );

            NUnit.Framework.Assert.AreEqual("Allocation headroom", 1 * Gb, allocation2.GetResourceLimit
                                                ().GetMemory());
            rm.Stop();
        }
예제 #30
0
        public virtual void TestAppCleanup()
        {
            Logger rootLogger = LogManager.GetRootLogger();

            rootLogger.SetLevel(Level.Debug);
            MockRM rm = new MockRM();

            rm.Start();
            MockNM nm1 = rm.RegisterNode("127.0.0.1:1234", 5000);
            RMApp  app = rm.SubmitApp(2000);

            //kick the scheduling
            nm1.NodeHeartbeat(true);
            RMAppAttempt attempt = app.GetCurrentAppAttempt();
            MockAM       am      = rm.SendAMLaunched(attempt.GetAppAttemptId());

            am.RegisterAppAttempt();
            //request for containers
            int request = 2;

            am.Allocate("127.0.0.1", 1000, request, new AList <ContainerId>());
            //kick the scheduler
            nm1.NodeHeartbeat(true);
            IList <Container> conts = am.Allocate(new AList <ResourceRequest>(), new AList <ContainerId
                                                                                            >()).GetAllocatedContainers();
            int contReceived = conts.Count;
            int waitCount    = 0;

            while (contReceived < request && waitCount++ < 200)
            {
                Log.Info("Got " + contReceived + " containers. Waiting to get " + request);
                Sharpen.Thread.Sleep(100);
                conts = am.Allocate(new AList <ResourceRequest>(), new AList <ContainerId>()).GetAllocatedContainers
                            ();
                contReceived += conts.Count;
                nm1.NodeHeartbeat(true);
            }
            NUnit.Framework.Assert.AreEqual(request, contReceived);
            am.UnregisterAppAttempt();
            NodeHeartbeatResponse resp = nm1.NodeHeartbeat(attempt.GetAppAttemptId(), 1, ContainerState
                                                           .Complete);

            am.WaitForState(RMAppAttemptState.Finished);
            //currently only containers are cleaned via this
            //AM container is cleaned via container launcher
            resp = nm1.NodeHeartbeat(true);
            IList <ContainerId>   containersToCleanup = resp.GetContainersToCleanup();
            IList <ApplicationId> appsToCleanup       = resp.GetApplicationsToCleanup();
            int numCleanedContainers = containersToCleanup.Count;
            int numCleanedApps       = appsToCleanup.Count;

            waitCount = 0;
            while ((numCleanedContainers < 2 || numCleanedApps < 1) && waitCount++ < 200)
            {
                Log.Info("Waiting to get cleanup events.. cleanedConts: " + numCleanedContainers
                         + " cleanedApps: " + numCleanedApps);
                Sharpen.Thread.Sleep(100);
                resp = nm1.NodeHeartbeat(true);
                IList <ContainerId>   deltaContainersToCleanup = resp.GetContainersToCleanup();
                IList <ApplicationId> deltaAppsToCleanup       = resp.GetApplicationsToCleanup();
                // Add the deltas to the global list
                Sharpen.Collections.AddAll(containersToCleanup, deltaContainersToCleanup);
                Sharpen.Collections.AddAll(appsToCleanup, deltaAppsToCleanup);
                // Update counts now
                numCleanedContainers = containersToCleanup.Count;
                numCleanedApps       = appsToCleanup.Count;
            }
            NUnit.Framework.Assert.AreEqual(1, appsToCleanup.Count);
            NUnit.Framework.Assert.AreEqual(app.GetApplicationId(), appsToCleanup[0]);
            NUnit.Framework.Assert.AreEqual(1, numCleanedApps);
            NUnit.Framework.Assert.AreEqual(2, numCleanedContainers);
            rm.Stop();
        }