public virtual NodeId GetNodeIdToUnreserve(Priority priority, Org.Apache.Hadoop.Yarn.Api.Records.Resource resourceNeedUnreserve, ResourceCalculator rc, Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterResource) { lock (this) { // first go around make this algorithm simple and just grab first // reservation that has enough resources IDictionary <NodeId, RMContainer> reservedContainers = this.reservedContainers[priority ]; if ((reservedContainers != null) && (!reservedContainers.IsEmpty())) { foreach (KeyValuePair <NodeId, RMContainer> entry in reservedContainers) { NodeId nodeId = entry.Key; Org.Apache.Hadoop.Yarn.Api.Records.Resource containerResource = entry.Value.GetContainer ().GetResource(); // make sure we unreserve one with at least the same amount of // resources, otherwise could affect capacity limits if (Resources.LessThanOrEqual(rc, clusterResource, resourceNeedUnreserve, containerResource )) { if (Log.IsDebugEnabled()) { Log.Debug("unreserving node with reservation size: " + containerResource + " in order to allocate container with size: " + resourceNeedUnreserve); } return(nodeId); } } } return(null); } }
public virtual bool Unreserve(FiCaSchedulerNode node, Priority priority) { lock (this) { IDictionary <NodeId, RMContainer> reservedContainers = this.reservedContainers[priority ]; if (reservedContainers != null) { RMContainer reservedContainer = Sharpen.Collections.Remove(reservedContainers, node .GetNodeID()); // unreserve is now triggered in new scenarios (preemption) // as a consequence reservedcontainer might be null, adding NP-checks if (reservedContainer != null && reservedContainer.GetContainer() != null && reservedContainer .GetContainer().GetResource() != null) { if (reservedContainers.IsEmpty()) { Sharpen.Collections.Remove(this.reservedContainers, priority); } // Reset the re-reservation count ResetReReservations(priority); Org.Apache.Hadoop.Yarn.Api.Records.Resource resource = reservedContainer.GetContainer ().GetResource(); Resources.SubtractFrom(currentReservation, resource); Log.Info("Application " + GetApplicationId() + " unreserved " + " on node " + node + ", currently has " + reservedContainers.Count + " at priority " + priority + "; currentReservation " + currentReservation); return(true); } } return(false); } }
/// <summary> /// This method produces an Allocation that includes the current view /// of the resources that will be allocated to and preempted from this /// application. /// </summary> /// <param name="rc"/> /// <param name="clusterResource"/> /// <param name="minimumAllocation"/> /// <returns>an allocation</returns> public virtual Allocation GetAllocation(ResourceCalculator rc, Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterResource, Org.Apache.Hadoop.Yarn.Api.Records.Resource minimumAllocation) { lock (this) { ICollection <ContainerId> currentContPreemption = Sharpen.Collections.UnmodifiableSet (new HashSet <ContainerId>(containersToPreempt)); containersToPreempt.Clear(); Org.Apache.Hadoop.Yarn.Api.Records.Resource tot = Org.Apache.Hadoop.Yarn.Api.Records.Resource .NewInstance(0, 0); foreach (ContainerId c in currentContPreemption) { Resources.AddTo(tot, liveContainers[c].GetContainer().GetResource()); } int numCont = (int)Math.Ceil(Resources.Divide(rc, clusterResource, tot, minimumAllocation )); ResourceRequest rr = ResourceRequest.NewInstance(Priority.Undefined, ResourceRequest .Any, minimumAllocation, numCont); SchedulerApplicationAttempt.ContainersAndNMTokensAllocation allocation = PullNewlyAllocatedContainersAndNMTokens (); Org.Apache.Hadoop.Yarn.Api.Records.Resource headroom = GetHeadroom(); SetApplicationHeadroomForMetrics(headroom); return(new Allocation(allocation.GetContainerList(), headroom, null, currentContPreemption , Sharpen.Collections.SingletonList(rr), allocation.GetNMTokenList())); } }
public virtual void TestUpdateMaxAllocationUsesTotal() { int configuredMaxVCores = 20; int configuredMaxMemory = 10 * 1024; Org.Apache.Hadoop.Yarn.Api.Records.Resource configuredMaximumResource = Org.Apache.Hadoop.Yarn.Api.Records.Resource .NewInstance(configuredMaxMemory, configuredMaxVCores); ConfigureScheduler(); YarnConfiguration conf = GetConf(); conf.SetInt(YarnConfiguration.RmSchedulerMaximumAllocationVcores, configuredMaxVCores ); conf.SetInt(YarnConfiguration.RmSchedulerMaximumAllocationMb, configuredMaxMemory ); conf.SetLong(YarnConfiguration.RmWorkPreservingRecoverySchedulingWaitMs, 0); MockRM rm = new MockRM(conf); try { rm.Start(); AbstractYarnScheduler scheduler = (AbstractYarnScheduler)rm.GetResourceScheduler( ); Org.Apache.Hadoop.Yarn.Api.Records.Resource emptyResource = Org.Apache.Hadoop.Yarn.Api.Records.Resource .NewInstance(0, 0); Org.Apache.Hadoop.Yarn.Api.Records.Resource fullResource1 = Org.Apache.Hadoop.Yarn.Api.Records.Resource .NewInstance(1024, 5); Org.Apache.Hadoop.Yarn.Api.Records.Resource fullResource2 = Org.Apache.Hadoop.Yarn.Api.Records.Resource .NewInstance(2048, 10); SchedulerNode mockNode1 = Org.Mockito.Mockito.Mock <SchedulerNode>(); Org.Mockito.Mockito.When(mockNode1.GetNodeID()).ThenReturn(NodeId.NewInstance("foo" , 8080)); Org.Mockito.Mockito.When(mockNode1.GetAvailableResource()).ThenReturn(emptyResource ); Org.Mockito.Mockito.When(mockNode1.GetTotalResource()).ThenReturn(fullResource1); SchedulerNode mockNode2 = Org.Mockito.Mockito.Mock <SchedulerNode>(); Org.Mockito.Mockito.When(mockNode1.GetNodeID()).ThenReturn(NodeId.NewInstance("bar" , 8081)); Org.Mockito.Mockito.When(mockNode2.GetAvailableResource()).ThenReturn(emptyResource ); Org.Mockito.Mockito.When(mockNode2.GetTotalResource()).ThenReturn(fullResource2); VerifyMaximumResourceCapability(configuredMaximumResource, scheduler); scheduler.nodes = new Dictionary <NodeId, SchedulerNode>(); scheduler.nodes[mockNode1.GetNodeID()] = mockNode1; scheduler.UpdateMaximumAllocation(mockNode1, true); VerifyMaximumResourceCapability(fullResource1, scheduler); scheduler.nodes[mockNode2.GetNodeID()] = mockNode2; scheduler.UpdateMaximumAllocation(mockNode2, true); VerifyMaximumResourceCapability(fullResource2, scheduler); Sharpen.Collections.Remove(scheduler.nodes, mockNode2.GetNodeID()); scheduler.UpdateMaximumAllocation(mockNode2, false); VerifyMaximumResourceCapability(fullResource1, scheduler); Sharpen.Collections.Remove(scheduler.nodes, mockNode1.GetNodeID()); scheduler.UpdateMaximumAllocation(mockNode1, false); VerifyMaximumResourceCapability(configuredMaximumResource, scheduler); } finally { rm.Stop(); } }
public virtual void TestUpdateReservationExceedsGangSize() { ReservationUpdateRequest request = CreateSimpleReservationUpdateRequest(1, 1, 1, 5, 4); Org.Apache.Hadoop.Yarn.Api.Records.Resource resource = Org.Apache.Hadoop.Yarn.Api.Records.Resource .NewInstance(512, 1); Org.Mockito.Mockito.When(plan.GetTotalCapacity()).ThenReturn(resource); Plan plan = null; try { plan = rrValidator.ValidateReservationUpdateRequest(rSystem, request); NUnit.Framework.Assert.Fail(); } catch (YarnException e) { NUnit.Framework.Assert.IsNull(plan); string message = e.Message; NUnit.Framework.Assert.IsTrue(message.StartsWith("The size of the largest gang in the reservation refinition" )); NUnit.Framework.Assert.IsTrue(message.Contains("exceed the capacity available ")); Log.Info(message); } }
public virtual void TestNormalizeRequestWithDominantResourceCalculator() { ResourceCalculator resourceCalculator = new DominantResourceCalculator(); Org.Apache.Hadoop.Yarn.Api.Records.Resource minResource = Resources.CreateResource (1024, 1); Org.Apache.Hadoop.Yarn.Api.Records.Resource maxResource = Resources.CreateResource (10240, 10); Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterResource = Resources.CreateResource (10 * 1024, 10); ResourceRequest ask = new ResourceRequestPBImpl(); // case negative memory/vcores ask.SetCapability(Resources.CreateResource(-1024, -1)); SchedulerUtils.NormalizeRequest(ask, resourceCalculator, clusterResource, minResource , maxResource); NUnit.Framework.Assert.AreEqual(minResource, ask.GetCapability()); // case zero memory/vcores ask.SetCapability(Resources.CreateResource(0, 0)); SchedulerUtils.NormalizeRequest(ask, resourceCalculator, clusterResource, minResource , maxResource); NUnit.Framework.Assert.AreEqual(minResource, ask.GetCapability()); NUnit.Framework.Assert.AreEqual(1, ask.GetCapability().GetVirtualCores()); NUnit.Framework.Assert.AreEqual(1024, ask.GetCapability().GetMemory()); // case non-zero memory & zero cores ask.SetCapability(Resources.CreateResource(1536, 0)); SchedulerUtils.NormalizeRequest(ask, resourceCalculator, clusterResource, minResource , maxResource); NUnit.Framework.Assert.AreEqual(Resources.CreateResource(2048, 1), ask.GetCapability ()); NUnit.Framework.Assert.AreEqual(1, ask.GetCapability().GetVirtualCores()); NUnit.Framework.Assert.AreEqual(2048, ask.GetCapability().GetMemory()); }
/// <exception cref="Org.Apache.Hadoop.Yarn.Exceptions.InvalidResourceRequestException /// "/> public static void NormalizeAndvalidateRequest(ResourceRequest resReq, Org.Apache.Hadoop.Yarn.Api.Records.Resource maximumResource, string queueName, YarnScheduler scheduler, RMContext rmContext ) { NormalizeAndValidateRequest(resReq, maximumResource, queueName, scheduler, false, rmContext, null); }
/// <summary>Get the maximum resource allocation for the given queue.</summary> /// <returns>the cap set on this queue, or Integer.MAX_VALUE if not set.</returns> public virtual Org.Apache.Hadoop.Yarn.Api.Records.Resource GetMaxResources(string queueName) { Org.Apache.Hadoop.Yarn.Api.Records.Resource maxQueueResource = maxQueueResources[ queueName]; return((maxQueueResource == null) ? Resources.Unbounded() : maxQueueResource); }
/// <summary>Get the minimum resource allocation for the given queue.</summary> /// <returns>the cap set on this queue, or 0 if not set.</returns> public virtual Org.Apache.Hadoop.Yarn.Api.Records.Resource GetMinResources(string queue) { Org.Apache.Hadoop.Yarn.Api.Records.Resource minQueueResource = minQueueResources[ queue]; return((minQueueResource == null) ? Resources.None() : minQueueResource); }
public virtual void Setup() { long seed = rand.NextLong(); rand.SetSeed(seed); Org.Mortbay.Log.Log.Info("Running with seed: " + seed); // setting completely loose quotas long timeWindow = 1000000L; Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterCapacity = Org.Apache.Hadoop.Yarn.Api.Records.Resource .NewInstance(100 * 1024, 100); step = 1000L; ReservationSystemTestUtil testUtil = new ReservationSystemTestUtil(); string reservationQ = testUtil.GetFullReservationQueueName(); float instConstraint = 100; float avgConstraint = 100; ReservationSchedulerConfiguration conf = ReservationSystemTestUtil.CreateConf(reservationQ , timeWindow, instConstraint, avgConstraint); CapacityOverTimePolicy policy = new CapacityOverTimePolicy(); policy.Init(reservationQ, conf); agent = new GreedyReservationAgent(); QueueMetrics queueMetrics = Org.Mockito.Mockito.Mock <QueueMetrics>(); plan = new InMemoryPlan(queueMetrics, policy, agent, clusterCapacity, step, res, minAlloc, maxAlloc, "dedicated", null, true); }
public virtual void TestOffSwitchSchedulingMultiLevelQueues() { // Setup queue configs SetupMultiLevelQueues(csConf); //B3 IDictionary <string, CSQueue> queues = new Dictionary <string, CSQueue>(); CSQueue root = CapacityScheduler.ParseQueue(csContext, csConf, null, CapacitySchedulerConfiguration .Root, queues, queues, TestUtils.spyHook); // Setup some nodes int memoryPerNode = 10; int coresPerNode = 10; int numNodes = 2; FiCaSchedulerNode node_0 = TestUtils.GetMockNode("host_0", DefaultRack, 0, memoryPerNode * Gb); FiCaSchedulerNode node_1 = TestUtils.GetMockNode("host_1", DefaultRack, 0, memoryPerNode * Gb); Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterResource = Resources.CreateResource (numNodes * (memoryPerNode * Gb), numNodes * coresPerNode); Org.Mockito.Mockito.When(csContext.GetNumClusterNodes()).ThenReturn(numNodes); // Start testing LeafQueue b3 = (LeafQueue)queues[B3]; LeafQueue b2 = (LeafQueue)queues[B2]; // Simulate B3 returning a container on node_0 StubQueueAllocation(b2, clusterResource, node_0, 0 * Gb, NodeType.OffSwitch); StubQueueAllocation(b3, clusterResource, node_0, 1 * Gb, NodeType.OffSwitch); root.AssignContainers(clusterResource, node_0, new ResourceLimits(clusterResource )); VerifyQueueMetrics(b2, 0 * Gb, clusterResource); VerifyQueueMetrics(b3, 1 * Gb, clusterResource); // Now, B2 should get the scheduling opportunity since B2=0G/2G, B3=1G/7G // also, B3 gets a scheduling opportunity since B2 allocates RACK_LOCAL StubQueueAllocation(b2, clusterResource, node_1, 1 * Gb, NodeType.RackLocal); StubQueueAllocation(b3, clusterResource, node_1, 1 * Gb, NodeType.OffSwitch); root.AssignContainers(clusterResource, node_1, new ResourceLimits(clusterResource )); InOrder allocationOrder = Org.Mockito.Mockito.InOrder(b2, b3); allocationOrder.Verify(b2).AssignContainers(Matchers.Eq(clusterResource), Matchers.Any <FiCaSchedulerNode>(), AnyResourceLimits()); allocationOrder.Verify(b3).AssignContainers(Matchers.Eq(clusterResource), Matchers.Any <FiCaSchedulerNode>(), AnyResourceLimits()); VerifyQueueMetrics(b2, 1 * Gb, clusterResource); VerifyQueueMetrics(b3, 2 * Gb, clusterResource); // Now, B3 should get the scheduling opportunity // since B2 has 1/2G while B3 has 2/7G, // However, since B3 returns off-switch, B2 won't get an opportunity StubQueueAllocation(b2, clusterResource, node_0, 1 * Gb, NodeType.NodeLocal); StubQueueAllocation(b3, clusterResource, node_0, 1 * Gb, NodeType.OffSwitch); root.AssignContainers(clusterResource, node_0, new ResourceLimits(clusterResource )); allocationOrder = Org.Mockito.Mockito.InOrder(b3, b2); allocationOrder.Verify(b3).AssignContainers(Matchers.Eq(clusterResource), Matchers.Any <FiCaSchedulerNode>(), AnyResourceLimits()); allocationOrder.Verify(b2).AssignContainers(Matchers.Eq(clusterResource), Matchers.Any <FiCaSchedulerNode>(), AnyResourceLimits()); VerifyQueueMetrics(b2, 1 * Gb, clusterResource); VerifyQueueMetrics(b3, 3 * Gb, clusterResource); }
public override void UpdateDemand() { // Compute demand by iterating through apps in the queue // Limit demand to maxResources Org.Apache.Hadoop.Yarn.Api.Records.Resource maxRes = scheduler.GetAllocationConfiguration ().GetMaxResources(GetName()); demand = Resources.CreateResource(0); foreach (FSQueue childQueue in childQueues) { childQueue.UpdateDemand(); Org.Apache.Hadoop.Yarn.Api.Records.Resource toAdd = childQueue.GetDemand(); if (Log.IsDebugEnabled()) { Log.Debug("Counting resource from " + childQueue.GetName() + " " + toAdd + "; Total resource consumption for " + GetName() + " now " + demand); } demand = Resources.Add(demand, toAdd); demand = Resources.ComponentwiseMin(demand, maxRes); if (Resources.Equals(demand, maxRes)) { break; } } if (Log.IsDebugEnabled()) { Log.Debug("The updated demand for " + GetName() + " is " + demand + "; the max is " + maxRes); } }
/// <exception cref="System.Exception"/> public CSAssignment Answer(InvocationOnMock invocation) { try { throw new Exception(); } catch (Exception) { TestChildQueueOrder.Log.Info("FOOBAR q.assignContainers q=" + queue.GetQueueName( ) + " alloc=" + allocation + " node=" + node.GetNodeName()); } Org.Apache.Hadoop.Yarn.Api.Records.Resource allocatedResource = Resources.CreateResource (allocation); if (queue is ParentQueue) { ((ParentQueue)queue).AllocateResource(clusterResource, allocatedResource, null); } else { FiCaSchedulerApp app1 = this._enclosing.GetMockApplication(0, string.Empty); ((LeafQueue)queue).AllocateResource(clusterResource, app1, allocatedResource, null ); } if (allocation > 0) { Org.Mockito.Mockito.DoReturn(new CSAssignment(Resources.None(), type)).When(queue ).AssignContainers(Matchers.Eq(clusterResource), Matchers.Eq(node), Matchers.Any <ResourceLimits>()); Org.Apache.Hadoop.Yarn.Api.Records.Resource available = node.GetAvailableResource (); Org.Mockito.Mockito.DoReturn(Resources.SubtractFrom(available, allocatedResource) ).When(node).GetAvailableResource(); } return(new CSAssignment(allocatedResource, type)); }
/// <summary> /// Headroom depends on resources in the cluster, current usage of the /// queue, queue's fair-share and queue's max-resources. /// </summary> public override Org.Apache.Hadoop.Yarn.Api.Records.Resource GetHeadroom() { FSQueue queue = (FSQueue)this.queue; SchedulingPolicy policy = queue.GetPolicy(); Org.Apache.Hadoop.Yarn.Api.Records.Resource queueFairShare = queue.GetFairShare(); Org.Apache.Hadoop.Yarn.Api.Records.Resource queueUsage = queue.GetResourceUsage(); Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterResource = this.scheduler.GetClusterResource (); Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterUsage = this.scheduler.GetRootQueueMetrics ().GetAllocatedResources(); Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterAvailableResources = Resources .Subtract(clusterResource, clusterUsage); Org.Apache.Hadoop.Yarn.Api.Records.Resource queueMaxAvailableResources = Resources .Subtract(queue.GetMaxShare(), queueUsage); Org.Apache.Hadoop.Yarn.Api.Records.Resource maxAvailableResource = Resources.ComponentwiseMin (clusterAvailableResources, queueMaxAvailableResources); Org.Apache.Hadoop.Yarn.Api.Records.Resource headroom = policy.GetHeadroom(queueFairShare , queueUsage, maxAvailableResource); if (Log.IsDebugEnabled()) { Log.Debug("Headroom calculation for " + this.GetName() + ":" + "Min(" + "(queueFairShare=" + queueFairShare + " - queueUsage=" + queueUsage + ")," + " maxAvailableResource=" + maxAvailableResource + "Headroom=" + headroom); } return(headroom); }
public virtual void ContainerCompleted(RMContainer rmContainer, ContainerStatus containerStatus , RMContainerEventType @event) { lock (this) { Container container = rmContainer.GetContainer(); ContainerId containerId = container.GetId(); // Remove from the list of newly allocated containers if found newlyAllocatedContainers.Remove(rmContainer); // Inform the container rmContainer.Handle(new RMContainerFinishedEvent(containerId, containerStatus, @event )); Log.Info("Completed container: " + rmContainer.GetContainerId() + " in state: " + rmContainer.GetState() + " event:" + @event); // Remove from the list of containers Sharpen.Collections.Remove(liveContainers, rmContainer.GetContainerId()); RMAuditLogger.LogSuccess(GetUser(), RMAuditLogger.AuditConstants.ReleaseContainer , "SchedulerApp", GetApplicationId(), containerId); // Update usage metrics Org.Apache.Hadoop.Yarn.Api.Records.Resource containerResource = rmContainer.GetContainer ().GetResource(); queue.GetMetrics().ReleaseResources(GetUser(), 1, containerResource); Resources.SubtractFrom(currentConsumption, containerResource); // remove from preemption map if it is completed Sharpen.Collections.Remove(preemptionMap, rmContainer); // Clear resource utilization metrics cache. lastMemoryAggregateAllocationUpdateTime = -1; } }
protected internal virtual void VerifyHeadroom(FSAppAttempt schedulerApp, int expectedMemory , int expectedCPU) { Org.Apache.Hadoop.Yarn.Api.Records.Resource headroom = schedulerApp.GetHeadroom(); NUnit.Framework.Assert.AreEqual(expectedMemory, headroom.GetMemory()); NUnit.Framework.Assert.AreEqual(expectedCPU, headroom.GetVirtualCores()); }
private bool IsStarved(Org.Apache.Hadoop.Yarn.Api.Records.Resource share) { Org.Apache.Hadoop.Yarn.Api.Records.Resource desiredShare = Resources.Min(scheduler .GetResourceCalculator(), scheduler.GetClusterResource(), share, GetDemand()); return(Resources.LessThan(scheduler.GetResourceCalculator(), scheduler.GetClusterResource (), GetResourceUsage(), desiredShare)); }
public virtual void Setup() { // 24h window timeWindow = 86400000L; // 1 sec step step = 1000L; // 25% avg cap on capacity avgConstraint = 25; // 70% instantaneous cap on capacity instConstraint = 70; initTime = Runtime.CurrentTimeMillis(); minAlloc = Org.Apache.Hadoop.Yarn.Api.Records.Resource.NewInstance(1024, 1); res = new DefaultResourceCalculator(); maxAlloc = Org.Apache.Hadoop.Yarn.Api.Records.Resource.NewInstance(1024 * 8, 8); mAgent = Org.Mockito.Mockito.Mock <ReservationAgent>(); ReservationSystemTestUtil testUtil = new ReservationSystemTestUtil(); QueueMetrics rootQueueMetrics = Org.Mockito.Mockito.Mock <QueueMetrics>(); string reservationQ = testUtil.GetFullReservationQueueName(); Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterResource = ReservationSystemTestUtil .CalculateClusterResource(totCont); ReservationSchedulerConfiguration conf = ReservationSystemTestUtil.CreateConf(reservationQ , timeWindow, instConstraint, avgConstraint); CapacityOverTimePolicy policy = new CapacityOverTimePolicy(); policy.Init(reservationQ, conf); plan = new InMemoryPlan(rootQueueMetrics, policy, mAgent, clusterResource, step, res, minAlloc, maxAlloc, "dedicated", null, true); }
public virtual void Move(Queue newQueue) { lock (this) { QueueMetrics oldMetrics = queue.GetMetrics(); QueueMetrics newMetrics = newQueue.GetMetrics(); string user = GetUser(); foreach (RMContainer liveContainer in liveContainers.Values) { Org.Apache.Hadoop.Yarn.Api.Records.Resource resource = liveContainer.GetContainer ().GetResource(); oldMetrics.ReleaseResources(user, 1, resource); newMetrics.AllocateResources(user, 1, resource, false); } foreach (IDictionary <NodeId, RMContainer> map in reservedContainers.Values) { foreach (RMContainer reservedContainer in map.Values) { Org.Apache.Hadoop.Yarn.Api.Records.Resource resource = reservedContainer.GetReservedResource (); oldMetrics.UnreserveResource(user, resource); newMetrics.ReserveResource(user, resource); } } appSchedulingInfo.Move(newQueue); this.queue = newQueue; } }
internal virtual AggregateAppResourceUsage GetRunningAggregateAppResourceUsage() { lock (this) { long currentTimeMillis = Runtime.CurrentTimeMillis(); // Don't walk the whole container list if the resources were computed // recently. if ((currentTimeMillis - lastMemoryAggregateAllocationUpdateTime) > MemAggregateAllocationCacheMsecs) { long memorySeconds = 0; long vcoreSeconds = 0; foreach (RMContainer rmContainer in this.liveContainers.Values) { long usedMillis = currentTimeMillis - rmContainer.GetCreationTime(); Org.Apache.Hadoop.Yarn.Api.Records.Resource resource = rmContainer.GetContainer() .GetResource(); memorySeconds += resource.GetMemory() * usedMillis / DateUtils.MillisPerSecond; vcoreSeconds += resource.GetVirtualCores() * usedMillis / DateUtils.MillisPerSecond; } lastMemoryAggregateAllocationUpdateTime = currentTimeMillis; lastMemorySeconds = memorySeconds; lastVcoreSeconds = vcoreSeconds; } return(new AggregateAppResourceUsage(lastMemorySeconds, lastVcoreSeconds)); } }
public virtual void TearDown() { user = null; planName = null; resCalc = null; minAlloc = null; }
protected internal override bool ArePlanResourcesLessThanReservations(Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterResources, Org.Apache.Hadoop.Yarn.Api.Records.Resource planResources, Org.Apache.Hadoop.Yarn.Api.Records.Resource reservedResources) { return(Resources.GreaterThan(fs.GetResourceCalculator(), clusterResources, reservedResources , planResources)); }
/// <summary>Process resource update on a node.</summary> public virtual void UpdateNodeResource(RMNode nm, ResourceOption resourceOption) { lock (this) { SchedulerNode node = GetSchedulerNode(nm.GetNodeID()); Org.Apache.Hadoop.Yarn.Api.Records.Resource newResource = resourceOption.GetResource (); Org.Apache.Hadoop.Yarn.Api.Records.Resource oldResource = node.GetTotalResource(); if (!oldResource.Equals(newResource)) { // Log resource change Log.Info("Update resource on node: " + node.GetNodeName() + " from: " + oldResource + ", to: " + newResource); Sharpen.Collections.Remove(nodes, nm.GetNodeID()); UpdateMaximumAllocation(node, false); // update resource to node node.SetTotalResource(newResource); nodes[nm.GetNodeID()] = (N)node; UpdateMaximumAllocation(node, true); // update resource to clusterResource Resources.SubtractFrom(clusterResource, oldResource); Resources.AddTo(clusterResource, newResource); } else { // Log resource change Log.Warn("Update resource on node: " + node.GetNodeName() + " with the same resource: " + newResource); } } }
/// <summary> /// Assign a container to this node to facilitate /// <paramref name="request"/> /// . If node does /// not have enough memory, create a reservation. This is called once we are /// sure the particular request should be facilitated by this node. /// </summary> /// <param name="node">The node to try placing the container on.</param> /// <param name="request">The ResourceRequest we're trying to satisfy.</param> /// <param name="type">The locality of the assignment.</param> /// <param name="reserved">Whether there's already a container reserved for this app on the node. /// </param> /// <returns> /// If an assignment was made, returns the resources allocated to the /// container. If a reservation was made, returns /// FairScheduler.CONTAINER_RESERVED. If no assignment or reservation was /// made, returns an empty resource. /// </returns> private Org.Apache.Hadoop.Yarn.Api.Records.Resource AssignContainer(FSSchedulerNode node, ResourceRequest request, NodeType type, bool reserved) { // How much does this request need? Org.Apache.Hadoop.Yarn.Api.Records.Resource capability = request.GetCapability(); // How much does the node have? Org.Apache.Hadoop.Yarn.Api.Records.Resource available = node.GetAvailableResource (); Container container = null; if (reserved) { container = node.GetReservedContainer().GetContainer(); } else { container = CreateContainer(node, capability, request.GetPriority()); } // Can we allocate a container on this node? if (Resources.FitsIn(capability, available)) { // Inform the application of the new container for this request RMContainer allocatedContainer = Allocate(type, node, request.GetPriority(), request , container); if (allocatedContainer == null) { // Did the application need this resource? if (reserved) { Unreserve(request.GetPriority(), node); } return(Resources.None()); } // If we had previously made a reservation, delete it if (reserved) { Unreserve(request.GetPriority(), node); } // Inform the node node.AllocateContainer(allocatedContainer); // If this container is used to run AM, update the leaf queue's AM usage if (GetLiveContainers().Count == 1 && !GetUnmanagedAM()) { ((FSLeafQueue)GetQueue()).AddAMResourceUsage(container.GetResource()); SetAmRunning(true); } return(container.GetResource()); } else { if (!FairScheduler.FitsInMaxShare(((FSLeafQueue)GetQueue()), capability)) { return(Resources.None()); } // The desired container won't fit here, so reserve Reserve(request.GetPriority(), node, container, reserved); return(FairScheduler.ContainerReserved); } }
public virtual void TestHeadroom() { FairScheduler mockScheduler = Org.Mockito.Mockito.Mock <FairScheduler>(); Org.Mockito.Mockito.When(mockScheduler.GetClock()).ThenReturn(scheduler.GetClock( )); FSLeafQueue mockQueue = Org.Mockito.Mockito.Mock <FSLeafQueue>(); Resource queueMaxResources = Resource.NewInstance(5 * 1024, 3); Org.Apache.Hadoop.Yarn.Api.Records.Resource queueFairShare = Resources.CreateResource (4096, 2); Org.Apache.Hadoop.Yarn.Api.Records.Resource queueUsage = Org.Apache.Hadoop.Yarn.Api.Records.Resource .NewInstance(2048, 2); Org.Apache.Hadoop.Yarn.Api.Records.Resource queueStarvation = Resources.Subtract( queueFairShare, queueUsage); Org.Apache.Hadoop.Yarn.Api.Records.Resource queueMaxResourcesAvailable = Resources .Subtract(queueMaxResources, queueUsage); Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterResource = Resources.CreateResource (8192, 8); Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterUsage = Resources.CreateResource (2048, 2); Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterAvailable = Resources.Subtract (clusterResource, clusterUsage); QueueMetrics fakeRootQueueMetrics = Org.Mockito.Mockito.Mock <QueueMetrics>(); Org.Mockito.Mockito.When(mockQueue.GetMaxShare()).ThenReturn(queueMaxResources); Org.Mockito.Mockito.When(mockQueue.GetFairShare()).ThenReturn(queueFairShare); Org.Mockito.Mockito.When(mockQueue.GetResourceUsage()).ThenReturn(queueUsage); Org.Mockito.Mockito.When(mockScheduler.GetClusterResource()).ThenReturn(clusterResource ); Org.Mockito.Mockito.When(fakeRootQueueMetrics.GetAllocatedResources()).ThenReturn (clusterUsage); Org.Mockito.Mockito.When(mockScheduler.GetRootQueueMetrics()).ThenReturn(fakeRootQueueMetrics ); ApplicationAttemptId applicationAttemptId = CreateAppAttemptId(1, 1); RMContext rmContext = resourceManager.GetRMContext(); FSAppAttempt schedulerApp = new FSAppAttempt(mockScheduler, applicationAttemptId, "user1", mockQueue, null, rmContext); // Min of Memory and CPU across cluster and queue is used in // DominantResourceFairnessPolicy Org.Mockito.Mockito.When(mockQueue.GetPolicy()).ThenReturn(SchedulingPolicy.GetInstance (typeof(DominantResourceFairnessPolicy))); VerifyHeadroom(schedulerApp, Min(queueStarvation.GetMemory(), clusterAvailable.GetMemory (), queueMaxResourcesAvailable.GetMemory()), Min(queueStarvation.GetVirtualCores (), clusterAvailable.GetVirtualCores(), queueMaxResourcesAvailable.GetVirtualCores ())); // Fair and Fifo ignore CPU of queue, so use cluster available CPU Org.Mockito.Mockito.When(mockQueue.GetPolicy()).ThenReturn(SchedulingPolicy.GetInstance (typeof(FairSharePolicy))); VerifyHeadroom(schedulerApp, Min(queueStarvation.GetMemory(), clusterAvailable.GetMemory (), queueMaxResourcesAvailable.GetMemory()), Math.Min(clusterAvailable.GetVirtualCores (), queueMaxResourcesAvailable.GetVirtualCores())); Org.Mockito.Mockito.When(mockQueue.GetPolicy()).ThenReturn(SchedulingPolicy.GetInstance (typeof(FifoPolicy))); VerifyHeadroom(schedulerApp, Min(queueStarvation.GetMemory(), clusterAvailable.GetMemory (), queueMaxResourcesAvailable.GetMemory()), Math.Min(clusterAvailable.GetVirtualCores (), queueMaxResourcesAvailable.GetVirtualCores())); }
public virtual void ResetPreemptedResources() { preemptedResources = Resources.CreateResource(0); foreach (RMContainer container in GetPreemptionContainers()) { Resources.AddTo(preemptedResources, container.GetAllocatedResource()); } }
public virtual void AddAMResourceUsage(Org.Apache.Hadoop.Yarn.Api.Records.Resource amResource) { if (amResource != null) { Resources.AddTo(amResourceUsage, amResource); } }
public override void ComputeSteadyShares <_T0>(ICollection <_T0> queues, Org.Apache.Hadoop.Yarn.Api.Records.Resource totalResources) { foreach (ResourceType type in ResourceType.Values()) { ComputeFairShares.ComputeSteadyShares(queues, totalResources, type); } }
public virtual void SetHeadroom(Org.Apache.Hadoop.Yarn.Api.Records.Resource globalLimit ) { lock (this) { this.resourceLimit = globalLimit; } }
private bool IsSameAsNext(long key, Org.Apache.Hadoop.Yarn.Api.Records.Resource capacity ) { KeyValuePair <long, Org.Apache.Hadoop.Yarn.Api.Records.Resource> next = cumulativeCapacity .HigherEntry(key); return(next != null && next.Value.Equals(capacity)); }