protected internal virtual void VerifyHeadroom(FSAppAttempt schedulerApp, int expectedMemory , int expectedCPU) { Org.Apache.Hadoop.Yarn.Api.Records.Resource headroom = schedulerApp.GetHeadroom(); NUnit.Framework.Assert.AreEqual(expectedMemory, headroom.GetMemory()); NUnit.Framework.Assert.AreEqual(expectedCPU, headroom.GetVirtualCores()); }
internal virtual AggregateAppResourceUsage GetRunningAggregateAppResourceUsage() { lock (this) { long currentTimeMillis = Runtime.CurrentTimeMillis(); // Don't walk the whole container list if the resources were computed // recently. if ((currentTimeMillis - lastMemoryAggregateAllocationUpdateTime) > MemAggregateAllocationCacheMsecs) { long memorySeconds = 0; long vcoreSeconds = 0; foreach (RMContainer rmContainer in this.liveContainers.Values) { long usedMillis = currentTimeMillis - rmContainer.GetCreationTime(); Org.Apache.Hadoop.Yarn.Api.Records.Resource resource = rmContainer.GetContainer() .GetResource(); memorySeconds += resource.GetMemory() * usedMillis / DateUtils.MillisPerSecond; vcoreSeconds += resource.GetVirtualCores() * usedMillis / DateUtils.MillisPerSecond; } lastMemoryAggregateAllocationUpdateTime = currentTimeMillis; lastMemorySeconds = memorySeconds; lastVcoreSeconds = vcoreSeconds; } return(new AggregateAppResourceUsage(lastMemorySeconds, lastVcoreSeconds)); } }
private void VerifyMaximumResourceCapability(Org.Apache.Hadoop.Yarn.Api.Records.Resource expectedMaximumResource, AbstractYarnScheduler scheduler) { Org.Apache.Hadoop.Yarn.Api.Records.Resource schedulerMaximumResourceCapability = scheduler.GetMaximumResourceCapability(); NUnit.Framework.Assert.AreEqual(expectedMaximumResource.GetMemory(), schedulerMaximumResourceCapability .GetMemory()); NUnit.Framework.Assert.AreEqual(expectedMaximumResource.GetVirtualCores(), schedulerMaximumResourceCapability .GetVirtualCores()); }
/// <summary> /// Get the per queue setting for the maximum limit to allocate to /// each container request. /// </summary> /// <param name="queue">name of the queue</param> /// <returns>setting specified per queue else falls back to the cluster setting</returns> public virtual Org.Apache.Hadoop.Yarn.Api.Records.Resource GetMaximumAllocationPerQueue (string queue) { string queuePrefix = GetQueuePrefix(queue); int maxAllocationMbPerQueue = GetInt(queuePrefix + MaximumAllocationMb, (int)Undefined ); int maxAllocationVcoresPerQueue = GetInt(queuePrefix + MaximumAllocationVcores, ( int)Undefined); if (Log.IsDebugEnabled()) { Log.Debug("max alloc mb per queue for " + queue + " is " + maxAllocationMbPerQueue ); Log.Debug("max alloc vcores per queue for " + queue + " is " + maxAllocationVcoresPerQueue ); } Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterMax = GetMaximumAllocation(); if (maxAllocationMbPerQueue == (int)Undefined) { Log.Info("max alloc mb per queue for " + queue + " is undefined"); maxAllocationMbPerQueue = clusterMax.GetMemory(); } if (maxAllocationVcoresPerQueue == (int)Undefined) { Log.Info("max alloc vcore per queue for " + queue + " is undefined"); maxAllocationVcoresPerQueue = clusterMax.GetVirtualCores(); } Org.Apache.Hadoop.Yarn.Api.Records.Resource result = Resources.CreateResource(maxAllocationMbPerQueue , maxAllocationVcoresPerQueue); if (maxAllocationMbPerQueue > clusterMax.GetMemory() || maxAllocationVcoresPerQueue > clusterMax.GetVirtualCores()) { throw new ArgumentException("Queue maximum allocation cannot be larger than the cluster setting" + " for queue " + queue + " max allocation per queue: " + result + " cluster setting: " + clusterMax); } return(result); }
public override Org.Apache.Hadoop.Yarn.Api.Records.Resource GetHeadroom(Org.Apache.Hadoop.Yarn.Api.Records.Resource queueFairShare, Org.Apache.Hadoop.Yarn.Api.Records.Resource queueUsage, Org.Apache.Hadoop.Yarn.Api.Records.Resource maxAvailable) { int queueAvailableMemory = Math.Max(queueFairShare.GetMemory() - queueUsage.GetMemory (), 0); int queueAvailableCPU = Math.Max(queueFairShare.GetVirtualCores() - queueUsage.GetVirtualCores (), 0); Org.Apache.Hadoop.Yarn.Api.Records.Resource headroom = Resources.CreateResource(Math .Min(maxAvailable.GetMemory(), queueAvailableMemory), Math.Min(maxAvailable.GetVirtualCores (), queueAvailableCPU)); return(headroom); }
/// <summary> /// Utility method to validate a resource request, by insuring that the /// requested memory/vcore is non-negative and not greater than max /// </summary> /// <exception cref="Org.Apache.Hadoop.Yarn.Exceptions.InvalidResourceRequestException /// ">when there is invalid request</exception> private static void ValidateResourceRequest(ResourceRequest resReq, Org.Apache.Hadoop.Yarn.Api.Records.Resource maximumResource, QueueInfo queueInfo, RMContext rmContext) { if (resReq.GetCapability().GetMemory() < 0 || resReq.GetCapability().GetMemory() > maximumResource.GetMemory()) { throw new InvalidResourceRequestException("Invalid resource request" + ", requested memory < 0" + ", or requested memory > max configured" + ", requestedMemory=" + resReq.GetCapability ().GetMemory() + ", maxMemory=" + maximumResource.GetMemory()); } if (resReq.GetCapability().GetVirtualCores() < 0 || resReq.GetCapability().GetVirtualCores () > maximumResource.GetVirtualCores()) { throw new InvalidResourceRequestException("Invalid resource request" + ", requested virtual cores < 0" + ", or requested virtual cores > max configured" + ", requestedVirtualCores=" + resReq.GetCapability().GetVirtualCores() + ", maxVirtualCores=" + maximumResource .GetVirtualCores()); } string labelExp = resReq.GetNodeLabelExpression(); // we don't allow specify label expression other than resourceName=ANY now if (!ResourceRequest.Any.Equals(resReq.GetResourceName()) && labelExp != null && !labelExp.Trim().IsEmpty()) { throw new InvalidResourceRequestException("Invailid resource request, queue=" + queueInfo .GetQueueName() + " specified node label expression in a " + "resource request has resource name = " + resReq.GetResourceName()); } // we don't allow specify label expression with more than one node labels now if (labelExp != null && labelExp.Contains("&&")) { throw new InvalidResourceRequestException("Invailid resource request, queue=" + queueInfo .GetQueueName() + " specified more than one node label " + "in a node label expression, node label expression = " + labelExp); } if (labelExp != null && !labelExp.Trim().IsEmpty() && queueInfo != null) { if (!CheckQueueLabelExpression(queueInfo.GetAccessibleNodeLabels(), labelExp, rmContext )) { throw new InvalidResourceRequestException("Invalid resource request" + ", queue=" + queueInfo.GetQueueName() + " doesn't have permission to access all labels " + "in resource request. labelExpression of resource request=" + labelExp + ". Queue labels=" + (queueInfo.GetAccessibleNodeLabels() == null ? string.Empty : StringUtils.Join (queueInfo.GetAccessibleNodeLabels().GetEnumerator(), ','))); } } }
private static int GetResourceValue(Org.Apache.Hadoop.Yarn.Api.Records.Resource resource , ResourceType type) { switch (type) { case ResourceType.Memory: { return(resource.GetMemory()); } case ResourceType.Cpu: { return(resource.GetVirtualCores()); } default: { throw new ArgumentException("Invalid resource"); } } }
/// <summary>Calculates and orders a resource's share of a pool in terms of two vectors. /// </summary> /// <remarks> /// Calculates and orders a resource's share of a pool in terms of two vectors. /// The shares vector contains, for each resource, the fraction of the pool that /// it takes up. The resourceOrder vector contains an ordering of resources /// by largest share. So if resource=<10 MB, 5 CPU>, and pool=<100 MB, 10 CPU>, /// shares will be [.1, .5] and resourceOrder will be [CPU, MEMORY]. /// </remarks> internal virtual void CalculateShares(Org.Apache.Hadoop.Yarn.Api.Records.Resource resource, Org.Apache.Hadoop.Yarn.Api.Records.Resource pool, ResourceWeights shares , ResourceType[] resourceOrder, ResourceWeights weights) { shares.SetWeight(ResourceType.Memory, (float)resource.GetMemory() / (pool.GetMemory () * weights.GetWeight(ResourceType.Memory))); shares.SetWeight(ResourceType.Cpu, (float)resource.GetVirtualCores() / (pool.GetVirtualCores () * weights.GetWeight(ResourceType.Cpu))); // sort order vector by resource share if (resourceOrder != null) { if (shares.GetWeight(ResourceType.Memory) > shares.GetWeight(ResourceType.Cpu)) { resourceOrder[0] = ResourceType.Memory; resourceOrder[1] = ResourceType.Cpu; } else { resourceOrder[0] = ResourceType.Cpu; resourceOrder[1] = ResourceType.Memory; } } }
protected internal virtual void RefreshMaximumAllocation(Org.Apache.Hadoop.Yarn.Api.Records.Resource newMaxAlloc) { maxAllocWriteLock.Lock(); try { configuredMaximumAllocation = Resources.Clone(newMaxAlloc); int maxMemory = newMaxAlloc.GetMemory(); if (maxNodeMemory != -1) { maxMemory = Math.Min(maxMemory, maxNodeMemory); } int maxVcores = newMaxAlloc.GetVirtualCores(); if (maxNodeVCores != -1) { maxVcores = Math.Min(maxVcores, maxNodeVCores); } maximumAllocation = Resources.CreateResource(maxMemory, maxVcores); } finally { maxAllocWriteLock.Unlock(); } }
protected internal virtual void UpdateMaximumAllocation(SchedulerNode node, bool add) { Org.Apache.Hadoop.Yarn.Api.Records.Resource totalResource = node.GetTotalResource (); maxAllocWriteLock.Lock(); try { if (add) { // added node int nodeMemory = totalResource.GetMemory(); if (nodeMemory > maxNodeMemory) { maxNodeMemory = nodeMemory; maximumAllocation.SetMemory(Math.Min(configuredMaximumAllocation.GetMemory(), maxNodeMemory )); } int nodeVCores = totalResource.GetVirtualCores(); if (nodeVCores > maxNodeVCores) { maxNodeVCores = nodeVCores; maximumAllocation.SetVirtualCores(Math.Min(configuredMaximumAllocation.GetVirtualCores (), maxNodeVCores)); } } else { // removed node if (maxNodeMemory == totalResource.GetMemory()) { maxNodeMemory = -1; } if (maxNodeVCores == totalResource.GetVirtualCores()) { maxNodeVCores = -1; } // We only have to iterate through the nodes if the current max memory // or vcores was equal to the removed node's if (maxNodeMemory == -1 || maxNodeVCores == -1) { foreach (KeyValuePair <NodeId, N> nodeEntry in nodes) { int nodeMemory = nodeEntry.Value.GetTotalResource().GetMemory(); if (nodeMemory > maxNodeMemory) { maxNodeMemory = nodeMemory; } int nodeVCores = nodeEntry.Value.GetTotalResource().GetVirtualCores(); if (nodeVCores > maxNodeVCores) { maxNodeVCores = nodeVCores; } } if (maxNodeMemory == -1) { // no nodes maximumAllocation.SetMemory(configuredMaximumAllocation.GetMemory()); } else { maximumAllocation.SetMemory(Math.Min(configuredMaximumAllocation.GetMemory(), maxNodeMemory )); } if (maxNodeVCores == -1) { // no nodes maximumAllocation.SetVirtualCores(configuredMaximumAllocation.GetVirtualCores()); } else { maximumAllocation.SetVirtualCores(Math.Min(configuredMaximumAllocation.GetVirtualCores (), maxNodeVCores)); } } } } finally { maxAllocWriteLock.Unlock(); } }
public virtual void TestHeadroom() { FairScheduler mockScheduler = Org.Mockito.Mockito.Mock <FairScheduler>(); Org.Mockito.Mockito.When(mockScheduler.GetClock()).ThenReturn(scheduler.GetClock( )); FSLeafQueue mockQueue = Org.Mockito.Mockito.Mock <FSLeafQueue>(); Resource queueMaxResources = Resource.NewInstance(5 * 1024, 3); Org.Apache.Hadoop.Yarn.Api.Records.Resource queueFairShare = Resources.CreateResource (4096, 2); Org.Apache.Hadoop.Yarn.Api.Records.Resource queueUsage = Org.Apache.Hadoop.Yarn.Api.Records.Resource .NewInstance(2048, 2); Org.Apache.Hadoop.Yarn.Api.Records.Resource queueStarvation = Resources.Subtract( queueFairShare, queueUsage); Org.Apache.Hadoop.Yarn.Api.Records.Resource queueMaxResourcesAvailable = Resources .Subtract(queueMaxResources, queueUsage); Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterResource = Resources.CreateResource (8192, 8); Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterUsage = Resources.CreateResource (2048, 2); Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterAvailable = Resources.Subtract (clusterResource, clusterUsage); QueueMetrics fakeRootQueueMetrics = Org.Mockito.Mockito.Mock <QueueMetrics>(); Org.Mockito.Mockito.When(mockQueue.GetMaxShare()).ThenReturn(queueMaxResources); Org.Mockito.Mockito.When(mockQueue.GetFairShare()).ThenReturn(queueFairShare); Org.Mockito.Mockito.When(mockQueue.GetResourceUsage()).ThenReturn(queueUsage); Org.Mockito.Mockito.When(mockScheduler.GetClusterResource()).ThenReturn(clusterResource ); Org.Mockito.Mockito.When(fakeRootQueueMetrics.GetAllocatedResources()).ThenReturn (clusterUsage); Org.Mockito.Mockito.When(mockScheduler.GetRootQueueMetrics()).ThenReturn(fakeRootQueueMetrics ); ApplicationAttemptId applicationAttemptId = CreateAppAttemptId(1, 1); RMContext rmContext = resourceManager.GetRMContext(); FSAppAttempt schedulerApp = new FSAppAttempt(mockScheduler, applicationAttemptId, "user1", mockQueue, null, rmContext); // Min of Memory and CPU across cluster and queue is used in // DominantResourceFairnessPolicy Org.Mockito.Mockito.When(mockQueue.GetPolicy()).ThenReturn(SchedulingPolicy.GetInstance (typeof(DominantResourceFairnessPolicy))); VerifyHeadroom(schedulerApp, Min(queueStarvation.GetMemory(), clusterAvailable.GetMemory (), queueMaxResourcesAvailable.GetMemory()), Min(queueStarvation.GetVirtualCores (), clusterAvailable.GetVirtualCores(), queueMaxResourcesAvailable.GetVirtualCores ())); // Fair and Fifo ignore CPU of queue, so use cluster available CPU Org.Mockito.Mockito.When(mockQueue.GetPolicy()).ThenReturn(SchedulingPolicy.GetInstance (typeof(FairSharePolicy))); VerifyHeadroom(schedulerApp, Min(queueStarvation.GetMemory(), clusterAvailable.GetMemory (), queueMaxResourcesAvailable.GetMemory()), Math.Min(clusterAvailable.GetVirtualCores (), queueMaxResourcesAvailable.GetVirtualCores())); Org.Mockito.Mockito.When(mockQueue.GetPolicy()).ThenReturn(SchedulingPolicy.GetInstance (typeof(FifoPolicy))); VerifyHeadroom(schedulerApp, Min(queueStarvation.GetMemory(), clusterAvailable.GetMemory (), queueMaxResourcesAvailable.GetMemory()), Math.Min(clusterAvailable.GetVirtualCores (), queueMaxResourcesAvailable.GetVirtualCores())); }
public virtual void Subtract(Org.Apache.Hadoop.Yarn.Api.Records.Resource r) { memory -= r.GetMemory(); vcores -= r.GetVirtualCores(); }
public virtual void Add(Org.Apache.Hadoop.Yarn.Api.Records.Resource r) { memory += r.GetMemory(); vcores += r.GetVirtualCores(); }
public IntegralResource(Org.Apache.Hadoop.Yarn.Api.Records.Resource resource) { this.memory = resource.GetMemory(); this.vcores = resource.GetVirtualCores(); }