public ProcessCommand(Container container, string[] arguments, bool shouldImpersonate, ResourceLimits rlimits) : base(container, arguments) { this.shouldImpersonate = shouldImpersonate; this.rlimits = rlimits; }
public WebApplicationCommand(Container container, string[] arguments, bool shouldImpersonate, ResourceLimits rlimits) : base(container, arguments, shouldImpersonate, rlimits) { if (arguments.IsNullOrEmpty()) { throw new ArgumentException("Expected one or more arguments"); } if (String.IsNullOrWhiteSpace(arguments[0])) { throw new ArgumentException("Expected port as first argument"); } port = arguments[0]; if (arguments.Length > 1) { if (arguments[1] != RuntimeVersionTwo && arguments[1] != RuntimeVersionFour) { throw new ArgumentException("Expected runtime version value of '2.0' or '4.0', default is '4.0'."); } else { runtimeVersion = arguments[1]; } } }
public PowershellCommand(Container container, string[] arguments, bool shouldImpersonate, ResourceLimits rlimits) : base(container, arguments, shouldImpersonate, rlimits) { if (base.arguments.IsNullOrEmpty()) { throw new ArgumentException("powershell: command must have at least one argument."); } }
public TaskCommandFactory(Container container, bool shouldImpersonate, ResourceLimits rlimits) { if (container == null) { throw new ArgumentNullException("container"); } this.container = container; this.shouldImpersonate = shouldImpersonate; this.rlimits = rlimits; }
public void ShouldChangeAreaAndMemory() { var area = ResourceLimits.Area; var memory = ResourceLimits.Memory; ResourceLimits.LimitMemory((Percentage)80); Assert.NotEqual(area, ResourceLimits.Area); Assert.NotEqual(memory, ResourceLimits.Memory); ResourceLimits.Area = area; ResourceLimits.Memory = memory; }
public void ShouldSetMemoryAndAreaToTheCorrectValues() { var area = ResourceLimits.Area; var memory = ResourceLimits.Memory; ResourceLimits.LimitMemory((Percentage)100); Assert.InRange(ResourceLimits.Area, (area * 2) - 8192, (area * 2) + 8192); Assert.InRange(ResourceLimits.Memory, (memory * 2) - 8192, (memory * 2) + 8192); ResourceLimits.Area = area; ResourceLimits.Memory = memory; }
private void InitializeProject() { OpenCL.IsEnabled = true; ResourceLimits.LimitMemory(new Percentage(100)); selectedLayer = new EmptyLayer(this); layers = new ObservableCollection <Layer>(); checkerBoardLarge = size.GetCheckerBoard(90); checkerBoardSmall = size.GetCheckerBoard(10); EventBus.LayerActionChanged += OnLayerActionChanged; EventBus.LayerActionChanged += OnAutoSave; EventBus.LayerActionAdded += OnAutoSave; EventBus.LayerActionRemoved += OnAutoSave; EventBus.LayerBlendChanged += OnAutoSave; }
public void ShouldChangeAreaAndMemory() { TestHelper.ExecuteInsideLock(() => { var area = ResourceLimits.Area; var memory = ResourceLimits.Memory; ResourceLimits.LimitMemory((Percentage)80); Assert.NotEqual(area, ResourceLimits.Area); Assert.NotEqual(memory, ResourceLimits.Memory); ResourceLimits.Area = area; ResourceLimits.Memory = memory; }); }
public void ShouldSetMemoryAndAreaToTheCorrectValues() { ExecuteInsideLock(() => { var area = ResourceLimits.Area; var memory = ResourceLimits.Memory; ResourceLimits.LimitMemory((Percentage)100); Assert.AreEqual(area * 2, ResourceLimits.Area, 8192); Assert.AreEqual(memory * 2, ResourceLimits.Memory, 8192); ResourceLimits.Area = area; ResourceLimits.Memory = memory; }); }
/// <summary> /// Serializes the object to JSON. /// </summary> /// <param name="writer">The <see cref="T: Newtonsoft.Json.JsonWriter" /> to write to.</param> /// <param name="obj">The object to serialize to JSON.</param> internal static void Serialize(JsonWriter writer, ResourceLimits obj) { // Required properties are always serialized, optional properties are serialized when not null. writer.WriteStartObject(); if (obj.MemoryInGB != null) { writer.WriteProperty(obj.MemoryInGB, "memoryInGB", JsonWriterExtensions.WriteDoubleValue); } if (obj.Cpu != null) { writer.WriteProperty(obj.Cpu, "cpu", JsonWriterExtensions.WriteDoubleValue); } writer.WriteEndObject(); }
private CSAssignment AssignContainersToChildQueues(Org.Apache.Hadoop.Yarn.Api.Records.Resource cluster, FiCaSchedulerNode node, ResourceLimits limits) { lock (this) { CSAssignment assignment = new CSAssignment(Resources.CreateResource(0, 0), NodeType .NodeLocal); PrintChildQueues(); // Try to assign to most 'under-served' sub-queue for (IEnumerator <CSQueue> iter = childQueues.GetEnumerator(); iter.HasNext();) { CSQueue childQueue = iter.Next(); if (Log.IsDebugEnabled()) { Log.Debug("Trying to assign to queue: " + childQueue.GetQueuePath() + " stats: " + childQueue); } // Get ResourceLimits of child queue before assign containers ResourceLimits childLimits = GetResourceLimitsOfChild(childQueue, cluster, limits ); assignment = childQueue.AssignContainers(cluster, node, childLimits); if (Log.IsDebugEnabled()) { Log.Debug("Assigned to queue: " + childQueue.GetQueuePath() + " stats: " + childQueue + " --> " + assignment.GetResource() + ", " + assignment.GetType()); } // If we do assign, remove the queue and re-insert in-order to re-sort if (Resources.GreaterThan(resourceCalculator, cluster, assignment.GetResource(), Resources.None())) { // Remove and re-insert to sort iter.Remove(); Log.Info("Re-sorting assigned queue: " + childQueue.GetQueuePath() + " stats: " + childQueue); childQueues.AddItem(childQueue); if (Log.IsDebugEnabled()) { PrintChildQueues(); } break; } } return(assignment); } }
public override void UpdateClusterResource(Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterResource, ResourceLimits resourceLimits) { lock (this) { // Update all children foreach (CSQueue childQueue in childQueues) { // Get ResourceLimits of child queue before assign containers ResourceLimits childLimits = GetResourceLimitsOfChild(childQueue, clusterResource , resourceLimits); childQueue.UpdateClusterResource(clusterResource, childLimits); } // Update metrics CSQueueUtils.UpdateQueueStatistics(resourceCalculator, this, parent, clusterResource , minimumAllocation); } }
private ResourceLimits GetResourceLimitsOfChild(CSQueue child, Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterResource, ResourceLimits parentLimits) { // Set resource-limit of a given child, child.limit = // min(my.limit - my.used + child.used, child.max) // Parent available resource = parent-limit - parent-used-resource Org.Apache.Hadoop.Yarn.Api.Records.Resource parentMaxAvailableResource = Resources .Subtract(parentLimits.GetLimit(), GetUsedResources()); // Child's limit = parent-available-resource + child-used Org.Apache.Hadoop.Yarn.Api.Records.Resource childLimit = Resources.Add(parentMaxAvailableResource , child.GetUsedResources()); // Get child's max resource Org.Apache.Hadoop.Yarn.Api.Records.Resource childConfiguredMaxResource = Resources .MultiplyAndNormalizeDown(resourceCalculator, labelManager.GetResourceByLabel(RMNodeLabelsManager .NoLabel, clusterResource), child.GetAbsoluteMaximumCapacity(), minimumAllocation ); // Child's limit should be capped by child configured max resource childLimit = Resources.Min(resourceCalculator, clusterResource, childLimit, childConfiguredMaxResource ); // Normalize before return childLimit = Resources.RoundDown(resourceCalculator, childLimit, minimumAllocation ); return(new ResourceLimits(childLimit)); }
public ExeCommand(Container container, string[] arguments, bool shouldImpersonate, ResourceLimits rlimits) : base(container, arguments, shouldImpersonate, rlimits) { if (arguments.IsNullOrEmpty()) { throw new ArgumentNullException("arguments"); } else { this.executable = arguments[0]; if (this.executable.IsNullOrWhiteSpace()) { throw new ArgumentNullException("First argument must be executable name."); } if (arguments.Length > 1) { this.args = String.Join(" ", arguments.Skip(1)); } } }
internal virtual bool CanAssignToThisQueue(Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterResource, ICollection <string> nodeLabels, ResourceLimits currentResourceLimits , Org.Apache.Hadoop.Yarn.Api.Records.Resource nowRequired, Org.Apache.Hadoop.Yarn.Api.Records.Resource resourceCouldBeUnreserved) { lock (this) { // Get label of this queue can access, it's (nodeLabel AND queueLabel) ICollection <string> labelCanAccess; if (null == nodeLabels || nodeLabels.IsEmpty()) { labelCanAccess = new HashSet <string>(); // Any queue can always access any node without label labelCanAccess.AddItem(RMNodeLabelsManager.NoLabel); } else { labelCanAccess = new HashSet <string>(accessibleLabels.Contains(CommonNodeLabelsManager .Any) ? nodeLabels : Sets.Intersection(accessibleLabels, nodeLabels)); } foreach (string label in labelCanAccess) { // New total resource = used + required Org.Apache.Hadoop.Yarn.Api.Records.Resource newTotalResource = Resources.Add(queueUsage .GetUsed(label), nowRequired); Org.Apache.Hadoop.Yarn.Api.Records.Resource currentLimitResource = GetCurrentLimitResource (label, clusterResource, currentResourceLimits); if (Resources.GreaterThan(resourceCalculator, clusterResource, newTotalResource, currentLimitResource)) { // if reservation continous looking enabled, check to see if could we // potentially use this node instead of a reserved node if the application // has reserved containers. // TODO, now only consider reservation cases when the node has no label if (this.reservationsContinueLooking && label.Equals(RMNodeLabelsManager.NoLabel) && Resources.GreaterThan(resourceCalculator, clusterResource, resourceCouldBeUnreserved , Resources.None())) { // resource-without-reserved = used - reserved Org.Apache.Hadoop.Yarn.Api.Records.Resource newTotalWithoutReservedResource = Resources .Subtract(newTotalResource, resourceCouldBeUnreserved); // when total-used-without-reserved-resource < currentLimit, we still // have chance to allocate on this node by unreserving some containers if (Resources.LessThan(resourceCalculator, clusterResource, newTotalWithoutReservedResource , currentLimitResource)) { if (Log.IsDebugEnabled()) { Log.Debug("try to use reserved: " + GetQueueName() + " usedResources: " + queueUsage .GetUsed() + ", clusterResources: " + clusterResource + ", reservedResources: " + resourceCouldBeUnreserved + ", capacity-without-reserved: " + newTotalWithoutReservedResource + ", maxLimitCapacity: " + currentLimitResource); } currentResourceLimits.SetAmountNeededUnreserve(Resources.Subtract(newTotalResource , currentLimitResource)); return(true); } } if (Log.IsDebugEnabled()) { Log.Debug(GetQueueName() + "Check assign to queue, label=" + label + " usedResources: " + queueUsage.GetUsed(label) + " clusterResources: " + clusterResource + " currentUsedCapacity " + Resources.Divide(resourceCalculator, clusterResource, queueUsage.GetUsed(label ), labelManager.GetResourceByLabel(label, clusterResource)) + " max-capacity: " + queueCapacities.GetAbsoluteMaximumCapacity(label) + ")"); } return(false); } return(true); } // Actually, this will not happen, since labelCanAccess will be always // non-empty return(false); } }
public abstract CSAssignment AssignContainers(Org.Apache.Hadoop.Yarn.Api.Records.Resource arg1, FiCaSchedulerNode arg2, ResourceLimits arg3);
public abstract void UpdateClusterResource(Org.Apache.Hadoop.Yarn.Api.Records.Resource arg1, ResourceLimits arg2);
public void AddProcess(Process process, ResourceLimits rlimits) { processManager.AddProcess(process); }
private Org.Apache.Hadoop.Yarn.Api.Records.Resource GetCurrentLimitResource(string nodeLabel, Org.Apache.Hadoop.Yarn.Api.Records.Resource clusterResource, ResourceLimits currentResourceLimits) { /* * Current limit resource: For labeled resource: limit = queue-max-resource * (TODO, this part need update when we support labeled-limit) For * non-labeled resource: limit = min(queue-max-resource, * limit-set-by-parent) */ Org.Apache.Hadoop.Yarn.Api.Records.Resource queueMaxResource = Resources.MultiplyAndNormalizeDown (resourceCalculator, labelManager.GetResourceByLabel(nodeLabel, clusterResource) , queueCapacities.GetAbsoluteMaximumCapacity(nodeLabel), minimumAllocation); if (nodeLabel.Equals(RMNodeLabelsManager.NoLabel)) { return(Resources.Min(resourceCalculator, clusterResource, queueMaxResource, currentResourceLimits .GetLimit())); } return(queueMaxResource); }
public void ShouldThrowExceptionWhenValueIsNegative() { Assert.Throws <ArgumentOutOfRangeException>("percentage", () => ResourceLimits.LimitMemory(new Percentage(-0.99))); }
private void ProcessRAW(string[] srcRGBTriplet, ShotSetting[] shotSettings, string targetFilename, TARGETFORMAT targetFormat, FORMAT inputFormat, int maxThreads, float HDRClippingPoint, float HDRFeatherMultiplier, bool EXRIntegrityVerification) { int groupLength = shotSettings.Length; byte[][] buffers = new byte[groupLength][]; for (int i = 0; i < groupLength; i++) { buffers[i] = File.ReadAllBytes(srcRGBTriplet[i]); if (inputFormat == FORMAT.MONO12p) { buffers[i] = convert12pto16bit(buffers[i]); } } int width = 4096; int height = 3000; this.Dispatcher.Invoke(() => { width = 1; int.TryParse(rawWidth.Text, out width); height = 1; int.TryParse(rawHeight.Text, out height); }); byte[][] RGBBuffers = HDRMerge(buffers, shotSettings, HDRClippingPoint, HDRFeatherMultiplier); byte[] buffR = RGBBuffers[0]; byte[] buffG = RGBBuffers[1]; byte[] buffB = RGBBuffers[2]; // Interleave int pixelCount = width * height; int totalLength = width * height * 3; byte[] buff = new byte[totalLength * 2]; if (buffR.Count() < pixelCount * 2) { this.Dispatcher.Invoke(() => { MessageBox.Show("Red file too short: " + srcRGBTriplet[0]); }); return; } if (buffG.Count() < pixelCount * 2) { this.Dispatcher.Invoke(() => { MessageBox.Show("Green file too short: " + srcRGBTriplet[1]); }); return; } if (buffB.Count() < pixelCount * 2) { this.Dispatcher.Invoke(() => { MessageBox.Show("Blue file too short: " + srcRGBTriplet[2]); }); return; } for (int pixelIndex = 0; pixelIndex < pixelCount; pixelIndex++) { /* * // BGR * buff[pixelIndex * 3 * 2] = buffB[pixelIndex * 2]; * buff[pixelIndex * 3 * 2 + 1] = buffB[pixelIndex * 2 + 1]; * buff[pixelIndex * 3 * 2 +4] = buffR[pixelIndex*2]; * buff[pixelIndex * 3 * 2 +5] = buffR[pixelIndex * 2 + 1]; * buff[pixelIndex * 3 * 2 +2] = buffG[pixelIndex * 2]; * buff[pixelIndex * 3 * 2 +3] = buffG[pixelIndex * 2 + 1]; */ // RGB buff[pixelIndex * 3 * 2] = buffR[pixelIndex * 2]; buff[pixelIndex * 3 * 2 + 1] = buffR[pixelIndex * 2 + 1]; buff[pixelIndex * 3 * 2 + 2] = buffG[pixelIndex * 2]; buff[pixelIndex * 3 * 2 + 3] = buffG[pixelIndex * 2 + 1]; buff[pixelIndex * 3 * 2 + 4] = buffB[pixelIndex * 2]; buff[pixelIndex * 3 * 2 + 5] = buffB[pixelIndex * 2 + 1]; } string fileName = targetFilename; if (targetFormat == TARGETFORMAT.EXR) { ResourceLimits.Thread = (ulong)maxThreads; ResourceLimits.LimitMemory(new Percentage(90)); MagickReadSettings settings = new MagickReadSettings(); settings.Width = width; settings.Height = height; settings.Format = MagickFormat.Rgb; // Correction, this is actually right, I had flipped RGB to BGR elsewhere in the code before. Fixed now. /*ColorManager.ICC.ICCProfileWriter iccWriter = new ColorManager.ICC.ICCProfileWriter(); * iccWriter.WriteProfile(new ColorManager.ICC.ICCProfile()); */ if (EXRIntegrityVerification) { /* * Info on half float format: https://www.openexr.com/about.html */ // What does this mean for precision of converting 16 bit integers to 16 bit floating point? // We need to know the maximum precision achievable to be able to tell rounding errors from actual integrity fails. // More info here: https://en.wikipedia.org/wiki/Half-precision_floating-point_format // Basically, precision at any given value is 11 bits or 2048 values. int integrityCheckFailCountLocal = 0; bool integrityCheckPassed = false; bool retriesExhausted = false; while (!integrityCheckPassed && !retriesExhausted) { using (var image = new MagickImage(buff, settings)) { //ExifProfile profile = new ExifProfile(); //profile.SetValue(ExifTag.UserComment, Encoding.ASCII.GetBytes(srcRGBTriplet[0] + "," + srcRGBTriplet[1] + "," + srcRGBTriplet[2])); //image.SetProfile(profile); image.Format = MagickFormat.Exr; image.Settings.Compression = CompressionMethod.Piz; //image.Write(fileName); byte[] exrFile = image.ToByteArray(); bool integrityCheckFailed = false; using (var reloadedImage = new MagickImage(exrFile)) { reloadedImage.Depth = 16; reloadedImage.ColorSpace = ColorSpace.Undefined; byte[] reloadedImageBytes = reloadedImage.ToByteArray(MagickFormat.Rgb); integrityCheckFailed = integrityCheckFailed | !IntegrityChecker.VerifyIntegrityUInt16InHalfPrecisionFloat(buff, reloadedImageBytes); } if (integrityCheckFailed) { integrityCheckFailCount++; integrityCheckFailCountLocal++; continue; } else { integrityCheckPassed = true; File.WriteAllBytes(fileName, exrFile); } if (integrityCheckFailCountLocal > integrityCheckRetries) { retriesExhausted = true; // At this point just write it into a subfolder and be done with it. string failedFolder = Path.GetDirectoryName(fileName) + Path.DirectorySeparatorChar + "FAILED" + Path.DirectorySeparatorChar; Directory.CreateDirectory(failedFolder); string failedFile = failedFolder + Path.GetFileName(fileName); File.WriteAllBytes(failedFile, exrFile); } } } } else { using (var image = new MagickImage(buff, settings)) { //ExifProfile profile = new ExifProfile(); //profile.SetValue(ExifTag.UserComment, Encoding.ASCII.GetBytes(srcRGBTriplet[0] + "," + srcRGBTriplet[1] + "," + srcRGBTriplet[2])); //image.SetProfile(profile); image.Format = MagickFormat.Exr; image.Settings.Compression = CompressionMethod.Piz; //image.Write(fileName); byte[] exrFile = image.ToByteArray(); File.WriteAllBytes(fileName, exrFile); } } } else if (targetFormat == TARGETFORMAT.TIF) { using (Tiff output = Tiff.Open(fileName, "w")) { output.SetField(TiffTag.SUBFILETYPE, 0); //output.SetField(TiffTag.ORIGINALRAWFILENAME, srcRGBTriplet[0]+","+srcRGBTriplet[1]+","+srcRGBTriplet[2]); output.SetField(TiffTag.IMAGEWIDTH, width); output.SetField(TiffTag.IMAGELENGTH, height); output.SetField(TiffTag.SAMPLESPERPIXEL, 3); output.SetField(TiffTag.BITSPERSAMPLE, 16); output.SetField(TiffTag.ORIENTATION, Orientation.TOPLEFT); output.SetField(TiffTag.PHOTOMETRIC, Photometric.RGB); output.SetField(TiffTag.FILLORDER, FillOrder.MSB2LSB); output.SetField(TiffTag.COMPRESSION, Compression.DEFLATE); output.SetField(TiffTag.PLANARCONFIG, PlanarConfig.CONTIG); output.WriteEncodedStrip(0, buff, width * height * 2 * 3); } } }
/// <summary> /// Initializes a new instance of the <see cref="LimitDetailsForm" /> class. /// </summary> public LimitDetailsForm(ResourceLimits limits, string name) { InitializeComponent(); GetLimitsInfo(limits, name); }
/// <summary> /// The GetLimitsInfo method. /// </summary> /// <param name="limits">The <paramref name="limits"/> to get the info for.</param> public void GetLimitsInfo(ResourceLimits limits, string name) { if (limits == null) { return; } TreeNode topNode = tvLimits.Nodes.Add(name); topNode.Expand(); if (limits.BooleanLimits.Count > 0) { var node = topNode.Nodes.Add("Boolean Limits"); foreach (var booleanLimit in limits.BooleanLimits) { AddPropertyInfo(booleanLimit, booleanLimit.FieldName, node); } } if (limits.FloatLimits.Count > 0) { var node = topNode.Nodes.Add("Float Limits"); foreach (var floatLimit in limits.FloatLimits) { AddPropertyInfo(floatLimit, floatLimit.FieldName, node); } } if (limits.IntegerLimits.Count > 0) { var node = topNode.Nodes.Add("Integer Limits"); foreach (var integerLimit in limits.IntegerLimits) { AddPropertyInfo(integerLimit, integerLimit.FieldName, node); } } if (limits.ListLimits.Count > 0) { var node = topNode.Nodes.Add("List Limits"); foreach (var listLimit in limits.ListLimits) { AddPropertyInfo(listLimit, listLimit.FieldName, node); } } if (limits.ObjectLimits.Count > 0) { var node = topNode.Nodes.Add("Object Limits"); foreach (var objectLimit in limits.ObjectLimits) { AddPropertyInfo(objectLimit, objectLimit.FieldName, node); } } if (limits.StringLimits.Count > 0) { var node = topNode.Nodes.Add("String Limits"); foreach (var stringLimit in limits.StringLimits) { AddPropertyInfo(stringLimit, stringLimit.FieldName, node); } } limits.Dispose(); }
public void ShouldThrowExceptionWhenValueIsTooHigh() { Assert.Throws <ArgumentOutOfRangeException>("percentage", () => ResourceLimits.LimitMemory(new Percentage(100.1))); }
public override CSAssignment AssignContainers(Resource clusterResource, FiCaSchedulerNode node, ResourceLimits resourceLimits) { lock (this) { CSAssignment assignment = new CSAssignment(Resources.CreateResource(0, 0), NodeType .NodeLocal); ICollection <string> nodeLabels = node.GetLabels(); // if our queue cannot access this node, just return if (!SchedulerUtils.CheckQueueAccessToNode(accessibleLabels, nodeLabels)) { return(assignment); } while (CanAssign(clusterResource, node)) { if (Log.IsDebugEnabled()) { Log.Debug("Trying to assign containers to child-queue of " + GetQueueName()); } // Are we over maximum-capacity for this queue? // This will also consider parent's limits and also continuous reservation // looking if (!base.CanAssignToThisQueue(clusterResource, nodeLabels, resourceLimits, minimumAllocation , Resources.CreateResource(GetMetrics().GetReservedMB(), GetMetrics().GetReservedVirtualCores ()))) { break; } // Schedule CSAssignment assignedToChild = AssignContainersToChildQueues(clusterResource, node , resourceLimits); assignment.SetType(assignedToChild.GetType()); // Done if no child-queue assigned anything if (Resources.GreaterThan(resourceCalculator, clusterResource, assignedToChild.GetResource (), Resources.None())) { // Track resource utilization for the parent-queue base.AllocateResource(clusterResource, assignedToChild.GetResource(), nodeLabels); // Track resource utilization in this pass of the scheduler Resources.AddTo(assignment.GetResource(), assignedToChild.GetResource()); Log.Info("assignedContainer" + " queue=" + GetQueueName() + " usedCapacity=" + GetUsedCapacity () + " absoluteUsedCapacity=" + GetAbsoluteUsedCapacity() + " used=" + queueUsage .GetUsed() + " cluster=" + clusterResource); } else { break; } if (Log.IsDebugEnabled()) { Log.Debug("ParentQ=" + GetQueueName() + " assignedSoFarInThisIteration=" + assignment .GetResource() + " usedCapacity=" + GetUsedCapacity() + " absoluteUsedCapacity=" + GetAbsoluteUsedCapacity()); } // Do not assign more than one container if this isn't the root queue // or if we've already assigned an off-switch container if (!rootQueue || assignment.GetType() == NodeType.OffSwitch) { if (Log.IsDebugEnabled()) { if (rootQueue && assignment.GetType() == NodeType.OffSwitch) { Log.Debug("Not assigning more than one off-switch container," + " assignments so far: " + assignment); } } break; } } return(assignment); } }