Example #1
0
        public currentOwnerStat[] getFairnessStatsForBlades(disposingList <lockableBladeSpec> blades)
        {
            Dictionary <string, currentOwnerStat> ownershipByOwnerIP = new Dictionary <string, currentOwnerStat>();

            // TODO: check .state and omit release-requested blades
            foreach (lockableBladeSpec blade in blades)
            {
                if (!string.IsNullOrEmpty(blade.spec.currentOwner) && !ownershipByOwnerIP.ContainsKey(blade.spec.currentOwner))
                {
                    ownershipByOwnerIP.Add(blade.spec.currentOwner, new currentOwnerStat(blade.spec.currentOwner, 0));
                }

                if (!string.IsNullOrEmpty(blade.spec.nextOwner) && !ownershipByOwnerIP.ContainsKey(blade.spec.nextOwner))
                {
                    ownershipByOwnerIP.Add(blade.spec.nextOwner, new currentOwnerStat(blade.spec.nextOwner, 0));
                }

                // We don't count any blades which are in 'release requested' as owned by the current owner - we count them as owned
                // by the queued owner.
                if (blade.spec.state == bladeStatus.releaseRequested)
                {
                    if (string.IsNullOrEmpty(blade.spec.nextOwner))
                    {
                        throw new Exception("Blade has no .nextOwner but is in releaseRequested state");
                    }
                    ownershipByOwnerIP[blade.spec.nextOwner].allocatedBlades++;
                }
                else if (blade.spec.state == bladeStatus.inUse || blade.spec.state == bladeStatus.inUseByDirector)
                {
                    ownershipByOwnerIP[blade.spec.currentOwner].allocatedBlades++;
                }
            }

            return(ownershipByOwnerIP.Values.ToArray());
        }
 public override void checkFairness_blades(hostDB db, disposingList <lockableBladeSpec> blades)
 {
     // If anyone is queued, promote them.
     foreach (lockableBladeSpec blade in blades)
     {
         if (blade.spec.currentlyBeingAVMServer)
         {
             using (disposingList <lockableVMSpec> childVMs = db.getVMByVMServerIP(blade,
                                                                                   bladeLockType.lockNone, bladeLockType.lockOwnership))
             {
                 foreach (lockableVMSpec VM in childVMs)
                 {
                     Debug.WriteLine("Requesting release for VM " + VM.spec.VMIP);
                     VM.spec.state = bladeStatus.releaseRequested;
                 }
             }
         }
         else
         {
             if (blade.spec.currentOwner != "vmserver" && blade.spec.nextOwner != null)
             {
                 Debug.WriteLine("Requesting release for blade " + blade.spec.bladeIP);
                 blade.spec.state = bladeStatus.releaseRequested;
             }
         }
     }
 }
Example #3
0
        public disposingList <lockableVMSpec> getAllVMInfo(Func <vmSpec, bool> filter, bladeLockType lockTypeRead, bladeLockType lockTypeWrite)
        {
            disposingList <lockableVMSpec> toRet = new disposingList <lockableVMSpec>();

            foreach (string bladeIP in getAllVMIP())
            {
                lockableVMSpec VM = getVMByIP(bladeIP, lockTypeRead, lockTypeWrite);
                if (filter(VM.spec))
                {
                    toRet.Add(VM);
                }
                else
                {
                    VM.Dispose();
                }
            }
            return(toRet);
        }
Example #4
0
        public disposingList <lockableBladeSpec> getAllBladeInfo(Func <bladeSpec, bool> filter, bladeLockType lockTypeRead, bladeLockType lockTypeWrite, bool permitAccessDuringBIOS = false, bool permitAccessDuringDeployment = false, int max = Int32.MaxValue)
        {
            disposingList <lockableBladeSpec> toRet = new disposingList <lockableBladeSpec>();

            foreach (string bladeIP in getAllBladeIP())
            {
                lockableBladeSpec blade = getBladeByIP(bladeIP, lockTypeRead, lockTypeWrite, true, true);
                // Filter out anything as requested
                if (!filter(blade.spec))
                {
                    blade.Dispose();
                    continue;
                }
                // Filter out anything we don't have access to right now, due to BIOS or VM deployments
                if ((!permitAccessDuringDeployment) &&
                    blade.spec.vmDeployState != VMDeployStatus.notBeingDeployed &&
                    blade.spec.vmDeployState != VMDeployStatus.failed &&
                    blade.spec.vmDeployState != VMDeployStatus.readyForDeployment)
                {
                    blade.Dispose();
                    continue;
                }
                if ((!permitAccessDuringBIOS) && blade.spec.currentlyHavingBIOSDeployed)
                {
                    blade.Dispose();
                    continue;
                }

                // Have we hit our maximum yet?
                if (toRet.Count == max)
                {
                    blade.Dispose();
                    continue;
                }

                // Otherwise, okay.
                toRet.Add(blade);
            }
            return(toRet);
        }
Example #5
0
        public currentOwnerStat[] getFairnessStats(disposingList <lockableBladeSpec> blades)
        {
            List <currentOwnerStat> bladeStats = getFairnessStatsForBlades(blades).ToList();

            if (bladeStats.Any(x => bladeStats.Count(y => x.ownerName == y.ownerName) == 0))
            {
                throw new Exception("Not passed enough locks!");
            }

            // Now add VM stats.
            foreach (lockableBladeSpec blade in blades)
            {
                using (disposingList <lockableVMSpec> vms = getVMByVMServerIP(blade,
                                                                              bladeLockType.lockOwnership | bladeLockType.lockVMCreation | bladeLockType.lockVirtualHW,
                                                                              bladeLockType.lockNone))
                {
                    foreach (lockableVMSpec vm in vms)
                    {
                        string owner;
                        //if (vm.spec.state == bladeStatus.inUse || vm.spec.state == bladeStatus.inUseByDirector)
                        {
                            // During deployment, the VM is allocated to the VMServer, with the real requestor queued in the
                            // nextOwner. We count ownership quota against the nextOwner.
                            owner = vm.spec.currentOwner;
                            if (vm.spec.currentOwner == "vmserver")
                            {
                                owner = vm.spec.nextOwner;
                                if (string.IsNullOrEmpty(owner))
                                {
                                    // if this is empty, then this VM is not yet created and thus can't be owned.
                                    // It shouldn't be in the DB if it has no owner, unless the blade is not locked properly.
                                    throw new Exception("VM has 'vmserver' owner but no queued owner");
                                }
                            }
                            if (!vm.spec.ownershipRowID.HasValue)
                            {
                                throw new Exception("VM " + vm.spec.VMIP + " has no ownership row ID!?");
                            }
                            if (string.IsNullOrEmpty(owner))
                            {
                                // Likewise, this should be impossible, because we hold the VMCreation read lock.
                                throw new Exception("VM " + vm.spec.VMIP + " has no owner!?");
                            }
                        }

                        int cpuCount = vm.spec.cpuCount;
                        int memoryMB = vm.spec.memoryMB;

                        float pct = bladeSpec.asPercentageOfCapacity(cpuCount, memoryMB) / 100f;

                        if (bladeStats.Count(x => x.ownerName == owner) == 0)
                        {
                            bladeStats.Add(new currentOwnerStat(owner, 0));
                        }
                        bladeStats.Single(x => x.ownerName == owner).allocatedBlades += (pct * 100 / 1);
                    }
                }
            }

            bladeStats.RemoveAll(x => x.ownerName == "vmserver");
            return(bladeStats.ToArray());
        }
Example #6
0
        public disposingList <lockableVMSpec> getVMByVMServerIP(lockableBladeSpec blade, bladeLockType readLock,
                                                                bladeLockType writeLock)
        {
            disposingList <lockableVMSpec> toRet = new disposingList <lockableVMSpec>();

            if ((blade.getCurrentLocks().read & bladeLockType.lockVMCreation) == 0)
            {
                throw new Exception("lockVMCreation required on vmserver passed to getVMByVMServerIP");
            }

            // We need to lock IP addressess on the VMs, since we lock by them.
            readLock = readLock | bladeLockType.lockIPAddresses;

            // Since we hold lockVMCreation, we can assume no new VMs will be added or removed to/from this blade. We assume that
            // VM IP addresses will never change, except during initialization, when they go from null - we just drop any with a
            // NULL IP address.

            Dictionary <string, lockableVMSpec> VMNames = new Dictionary <string, lockableVMSpec>();
            string sqlCommand = "select VMIP from vmConfiguration " +
                                "join bladeConfiguration on parentbladeID = bladeConfigKey " +
                                "where bladeIP = $vmServerIP";

            using (SQLiteCommand cmd = new SQLiteCommand(sqlCommand, conn))
            {
                cmd.Parameters.AddWithValue("$vmServerIP", blade.spec.bladeIP);
                using (SQLiteDataReader reader = cmd.ExecuteReader())
                {
                    while (reader.Read())
                    {
                        string VMName = reader[0].ToString();
                        if (!String.IsNullOrEmpty(VMName))
                        {
                            VMNames.Add(VMName, new lockableVMSpec(VMName, readLock, writeLock));
                        }
                    }
                }
            }

            try
            {
                // Now read each from the DB, now that we hold the lock for each.
                foreach (KeyValuePair <string, lockableVMSpec> kvp in VMNames)
                {
                    string         vmName = kvp.Key;
                    lockableVMSpec vmSpec = kvp.Value;

                    string sql_getVM = "select bladeOwnership.*, vmConfiguration.* from vmConfiguration " +
                                       " join bladeOwnership on bladeOwnership.ownershipKey = vmConfiguration.ownershipID " +
                                       " join bladeConfiguration on parentbladeID = bladeConfigKey " +
                                       " where VMIP = $vmIP";

                    using (SQLiteCommand cmd = new SQLiteCommand(sql_getVM, conn))
                    {
                        cmd.Parameters.AddWithValue("$vmIP", vmName);

                        using (SQLiteDataReader reader = cmd.ExecuteReader())
                        {
                            if (!reader.Read())
                            {
                                throw new Exception("VM disappeared, even though we hold lockVMCreation on the parent!");
                            }

                            vmSpec.setSpec(new vmSpec(conn, reader, readLock, writeLock));
                            toRet.Add(vmSpec);
                        }
                    }
                }
            }
            catch (Exception)
            {
                foreach (KeyValuePair <string, lockableVMSpec> kvp in VMNames)
                {
                    kvp.Value.Dispose();
                }
                throw;
            }
            return(toRet);
        }
        public override void checkFairness_blades(hostDB db, disposingList <lockableBladeSpec> blades)
        {
            // If a blade owner is under its quota, then promote it in any queues where the current owner is over-quota.
            currentOwnerStat[] stats  = db.getFairnessStats(blades);
            string[]           owners = stats.Where(x => x.ownerName != "vmserver").Select(x => x.ownerName).ToArray();
            if (owners.Length == 0)
            {
                return;
            }
            float fairShare = (float)db.getAllBladeIP().Length / (float)owners.Length;

            currentOwnerStat[]      ownersOverQuota  = stats.Where(x => x.allocatedBlades > fairShare).ToArray();
            List <currentOwnerStat> ownersUnderQuota = stats.Where(x => x.allocatedBlades < fairShare).ToList();

            foreach (currentOwnerStat migrateTo in ownersUnderQuota)
            {
                var migratory = blades.Where(x =>
                                             (
                                                 // Migrate if the dest is currently owned by someone over-quota
                                                 (ownersOverQuota.Count(y => y.ownerName == x.spec.currentOwner) > 0) ||
                                                 // Or if it is a VM server, and currently holds VMs that are _all_ allocated to over-quota users
                                                 (
                                                     x.spec.currentOwner == "vmserver" &&

                                                     db.getVMByVMServerIP_nolocking(x.spec.bladeIP).All(vm =>
                                                                                                        (ownersOverQuota.Count(overQuotaUser => overQuotaUser.ownerName == vm.currentOwner) > 0)
                                                                                                        )
                                                 )
                                             )
                                             &&
                                             x.spec.nextOwner == migrateTo.ownerName &&
                                             (x.spec.state == bladeStatus.inUse || x.spec.state == bladeStatus.inUseByDirector)).ToList();
                {
                    if (migratory.Count == 0)
                    {
                        // There is nowhere to migrate this owner from. Try another owner.
                        continue;
                    }

                    // Since migration cannot fail, we just take the first potential.
                    // TODO: should we prefer non VM-servers here?
                    lockableBladeSpec newHost = migratory.First();

                    if (newHost.spec.currentlyBeingAVMServer)
                    {
                        // It's a VM server. Migrate all the VMs off it (ie, request them to be destroyed).
                        newHost.spec.nextOwner = migrateTo.ownerName;
                        using (disposingList <lockableVMSpec> childVMs = db.getVMByVMServerIP(newHost,
                                                                                              bladeLockType.lockNone, bladeLockType.lockOwnership))
                        {
                            foreach (lockableVMSpec VM in childVMs)
                            {
                                Debug.WriteLine("Requesting release for VM " + VM.spec.VMIP);
                                VM.spec.state = bladeStatus.releaseRequested;
                            }
                        }
                        newHost.spec.nextOwner = migrateTo.ownerName;
                        newHost.spec.state     = bladeStatus.releaseRequested;
                    }
                    else
                    {
                        // It's a physical server. Just mark it as .releaseRequested.
                        Debug.WriteLine("Requesting release for blade " + newHost.spec.bladeIP);
                        newHost.spec.nextOwner = migrateTo.ownerName;
                        newHost.spec.state     = bladeStatus.releaseRequested;
                    }
                }
            }
        }
 public abstract void checkFairness_blades(hostDB db, disposingList <lockableBladeSpec> blades);