public override void checkFairness_blades(hostDB db, disposingList <lockableBladeSpec> blades)
 {
     // If anyone is queued, promote them.
     foreach (lockableBladeSpec blade in blades)
     {
         if (blade.spec.currentlyBeingAVMServer)
         {
             using (disposingList <lockableVMSpec> childVMs = db.getVMByVMServerIP(blade,
                                                                                   bladeLockType.lockNone, bladeLockType.lockOwnership))
             {
                 foreach (lockableVMSpec VM in childVMs)
                 {
                     Debug.WriteLine("Requesting release for VM " + VM.spec.VMIP);
                     VM.spec.state = bladeStatus.releaseRequested;
                 }
             }
         }
         else
         {
             if (blade.spec.currentOwner != "vmserver" && blade.spec.nextOwner != null)
             {
                 Debug.WriteLine("Requesting release for blade " + blade.spec.bladeIP);
                 blade.spec.state = bladeStatus.releaseRequested;
             }
         }
     }
 }
Beispiel #2
0
        public void testDBObjectCanDoDelayedDisposalWhileUpgraded()
        {
            using (hostDB db = new hostDB())
            {
                bladeDirectorWCF.vmSpec toDB = new bladeDirectorWCF.vmSpec(db.conn, "1.1.1.3", bladeLockType.lockAll, bladeLockType.lockAll);
                db.addNode(toDB);

                using (lockableVMSpec refA = db.getVMByIP("1.1.1.3", bladeLockType.lockNone, bladeLockType.lockNone))
                {
                    using (new tempLockElevation(refA,
                                                 bladeLockType.lockVirtualHW,
                                                 bladeLockType.lockOwnership))
                    {
                        Assert.AreEqual(bladeLockType.lockIPAddresses | bladeLockType.lockVirtualHW | bladeLockType.lockOwnership, refA.spec.permittedAccessRead);
                        Assert.AreEqual(bladeLockType.lockOwnership, refA.spec.permittedAccessWrite);
                        refA.Dispose();
                        refA.inhibitNextDisposal();
                    }
                }

                // As a final check, make sure we can re-acquire the locks. This will make sure we didn't 'leak' any.
                using (lockableVMSpec refA = db.getVMByIP("1.1.1.3", bladeLockType.lockAll, bladeLockType.lockAll))
                {
                    // ..
                }
            }
        }
Beispiel #3
0
        public void testDBObjectIsUpdatedFromDBOnPermissionUpgrade()
        {
            using (hostDB db = new hostDB())
            {
                bladeDirectorWCF.vmSpec toDB = new bladeDirectorWCF.vmSpec(db.conn, "1.1.1.2", bladeLockType.lockAll, bladeLockType.lockAll);
                db.addNode(toDB);

                using (lockableVMSpec refB = db.getVMByIP("1.1.1.2", bladeLockType.lockNone, bladeLockType.lockNone))
                {
                    Thread innerThread = new Thread(() =>
                    {
                        using (lockableVMSpec refA = db.getVMByIP("1.1.1.2", bladeLockType.lockNone,
                                                                  bladeLockType.lockVirtualHW | bladeLockType.lockOwnership))
                        {
                            refA.spec.friendlyName = "test data";
                            refA.spec.currentOwner = "Dave_Lister";
                        }
                    });
                    innerThread.Start();
                    innerThread.Join();

                    refB.upgradeLocks(bladeLockType.lockVirtualHW | bladeLockType.lockOwnership, bladeLockType.lockNone);
                    Assert.AreEqual("test data", refB.spec.friendlyName);
                    Assert.AreEqual("Dave_Lister", refB.spec.currentOwner);
                }
            }
        }
Beispiel #4
0
        public void testDBObjectThrowsAfterDowngradeToReadOnlyAccess()
        {
            using (hostDB db = new hostDB())
            {
                bladeDirectorWCF.vmSpec toDB = new bladeDirectorWCF.vmSpec(db.conn, "1.1.1.4", bladeLockType.lockAll, bladeLockType.lockAll);
                db.addNode(toDB);

                // Lock with write access to a field, and then downgrade to read-only access. Then, try to write to the field we
                // originally locked, and expect an exception to be thrown.
                using (lockableVMSpec refA = db.getVMByIP("1.1.1.4", bladeLockType.lockNone,
                                                          bladeLockType.lockVirtualHW | bladeLockType.lockOwnership))
                {
                    refA.downgradeLocks(
                        bladeLockType.lockNone | bladeLockType.lockNone,
                        bladeLockType.lockVirtualHW | bladeLockType.lockOwnership);

                    // We have released the write lock, so we should be holding the read lock only.
                    Assert.AreEqual(bladeLockType.lockIPAddresses | bladeLockType.lockOwnership | bladeLockType.lockVirtualHW, refA.spec.permittedAccessRead);
                    Assert.AreEqual(bladeLockType.lockNone, refA.spec.permittedAccessWrite);

                    // We should not be permitted to write fields
                    failIfNoThrow(() => { refA.spec.friendlyName = "test data"; });
                    failIfNoThrow(() => { refA.spec.currentOwner = "Dave_Lister"; });
                    // but should be permitted to read them.
                    failIfThrow(() => { Debug.WriteLine(refA.spec.currentOwner); });
                    failIfThrow(() => { Debug.WriteLine(refA.spec.friendlyName); });
                }
            }
        }
Beispiel #5
0
        public void testDBObjectFlushesToDBOnLockDowngrade()
        {
            using (hostDB db = new hostDB())
            {
                bladeDirectorWCF.vmSpec toDB = new bladeDirectorWCF.vmSpec(db.conn, "1.1.1.7", bladeLockType.lockAll, bladeLockType.lockAll);
                db.addNode(toDB);

                ManualResetEvent canCheckRefB = new ManualResetEvent(false);
                ManualResetEvent testEnded    = new ManualResetEvent(false);

                Thread innerThread = new Thread(() =>
                {
                    using (lockableVMSpec refA = db.getVMByIP("1.1.1.7", bladeLockType.lockNone,
                                                              bladeLockType.lockVirtualHW | bladeLockType.lockOwnership))
                    {
                        // Set some data, and then downgrade to a read-only lock.
                        // The data should be flushed to the DB at that point, so we set a ManualResetEvent and the main thread
                        // will check that the data has indeed been flushed, by reading from the DB.

                        refA.spec.friendlyName = "test data";
                        refA.spec.currentOwner = "Dave_Lister";

                        refA.downgradeLocks(
                            bladeLockType.lockNone | bladeLockType.lockNone,
                            bladeLockType.lockVirtualHW | bladeLockType.lockOwnership);

                        Assert.AreEqual(bladeLockType.lockIPAddresses | bladeLockType.lockOwnership | bladeLockType.lockVirtualHW, refA.spec.permittedAccessRead);
                        Assert.AreEqual(bladeLockType.lockNone, refA.spec.permittedAccessWrite);

                        canCheckRefB.Set();
                        testEnded.WaitOne();
                    }
                });
                innerThread.Start();
                canCheckRefB.WaitOne();
                try
                {
                    using (lockableVMSpec refB = db.getVMByIP("1.1.1.7",
                                                              bladeLockType.lockVirtualHW | bladeLockType.lockOwnership,
                                                              bladeLockType.lockNone))
                    {
                        Assert.AreEqual("Dave_Lister", refB.spec.currentOwner);
                        Assert.AreEqual("test data", refB.spec.friendlyName);
                    }
                }
                finally
                {
                    testEnded.Set();
                    innerThread.Join();
                }
            }
        }
Beispiel #6
0
        public lockableVMSpec createChildVM(SQLiteConnection conn, hostDB db, VMHardwareSpec reqhw, VMSoftwareSpec reqsw, string newOwner)
        {
            if ((permittedAccessRead & bladeLockType.lockVMCreation) == bladeLockType.lockNone)
            {
                throw new Exception("lockVMCreation is needed when calling .createChildVM");
            }

            vmserverTotals totals        = db.getVMServerTotals(this);
            int            indexOnServer = totals.VMs + 1;
            string         newBladeName  = xdlClusterNaming.makeVMName(bladeIP, indexOnServer);

            // If we set the debugger port automatically, make sure we reset it to zero before we return.
            bool needToResetReqSWDebugPort = false;

            if (reqsw.debuggerPort == 0)
            {
                reqsw.debuggerPort        = xdlClusterNaming.makeVMKernelDebugPort(bladeIP, indexOnServer);
                needToResetReqSWDebugPort = true;
            }

            vmSpec newVM = new vmSpec(conn, newBladeName, reqsw, bladeLockType.lockAll, bladeLockType.lockAll);

            newVM.parentBladeIP = bladeIP;
            newVM.state         = bladeStatus.inUseByDirector;
            newVM.currentOwner  = "vmserver"; // We own the blade until we are done setting it up
            newVM.nextOwner     = newOwner;
            newVM.parentBladeID = bladeID.Value;
            newVM.memoryMB      = reqhw.memoryMB;
            newVM.cpuCount      = reqhw.cpuCount;
            newVM.indexOnServer = indexOnServer;

            newVM.VMIP    = xdlClusterNaming.makeVMIP(bladeIP, newVM);
            newVM.iscsiIP = xdlClusterNaming.makeiSCSIIP(bladeIP, newVM);
            newVM.eth0MAC = xdlClusterNaming.makeEth0MAC(bladeIP, newVM);
            newVM.eth1MAC = xdlClusterNaming.makeEth1MAC(bladeIP, newVM);

            // VMs always have this implicit snapshot.
            newVM.currentSnapshot = "vm";

            if (needToResetReqSWDebugPort)
            {
                reqsw.debuggerPort = 0;
            }

            lockableVMSpec toRet = new lockableVMSpec(newVM.VMIP, bladeLockType.lockAll, bladeLockType.lockAll);

            toRet.setSpec(newVM);
            return(toRet);
        }
Beispiel #7
0
        public void testDBObjectThrowsAfterUpgradeToWriteAccess()
        {
            using (hostDB db = new hostDB())
            {
                bladeDirectorWCF.vmSpec toDB = new bladeDirectorWCF.vmSpec(db.conn, "1.1.1.6", bladeLockType.lockAll, bladeLockType.lockAll);
                db.addNode(toDB);

                using (lockableVMSpec refA = db.getVMByIP("1.1.1.6", bladeLockType.lockNone, bladeLockType.lockNone))
                {
                    refA.upgradeLocks(
                        bladeLockType.lockVirtualHW | bladeLockType.lockOwnership,
                        bladeLockType.lockVirtualHW | bladeLockType.lockOwnership);

                    Assert.AreEqual(bladeLockType.lockIPAddresses | bladeLockType.lockOwnership | bladeLockType.lockVirtualHW, refA.spec.permittedAccessRead);
                    Assert.AreEqual(bladeLockType.lockOwnership | bladeLockType.lockVirtualHW, refA.spec.permittedAccessWrite);

                    failIfThrow(() => { refA.spec.friendlyName = "test data"; });
                    failIfThrow(() => { refA.spec.currentOwner = "Dave_Lister"; });
                    failIfThrow(() => { Debug.WriteLine(refA.spec.friendlyName); });
                    failIfThrow(() => { Debug.WriteLine(refA.spec.currentOwner); });
                }
            }
        }
Beispiel #8
0
        public bool canAccommodate(hostDB db, VMHardwareSpec req)
        {
            if ((permittedAccessRead & bladeLockType.lockVMCreation) == bladeLockType.lockNone)
            {
                throw  new Exception("lockVMCreation is needed when calling .canAccomodate");
            }

            vmserverTotals totals = db.getVMServerTotals(this);

            if (totals.VMs + 1 > _VMCapacity.maxVMs)
            {
                return(false);
            }
            if (totals.ram + req.memoryMB > _VMCapacity.maxVMMemoryMB)
            {
                return(false);
            }
            if (totals.cpus + req.cpuCount > _VMCapacity.maxCPUCount)
            {
                return(false);
            }

            return(true);
        }
Beispiel #9
0
        public void testDBObjectThrowsAfterDowngradeToNoAccess()
        {
            using (hostDB db = new hostDB())
            {
                bladeDirectorWCF.vmSpec toDB = new bladeDirectorWCF.vmSpec(db.conn, "1.1.1.3", bladeLockType.lockAll, bladeLockType.lockAll);
                db.addNode(toDB);

                // Lock with write access to a field, and then downgrade to no access. Then, access the field we originally
                // locked, and expect an exception to be thrown.
                using (lockableVMSpec refA = db.getVMByIP("1.1.1.3", bladeLockType.lockNone,
                                                          bladeLockType.lockVirtualHW | bladeLockType.lockOwnership))
                {
                    refA.downgradeLocks(
                        bladeLockType.lockVirtualHW | bladeLockType.lockOwnership,
                        bladeLockType.lockVirtualHW | bladeLockType.lockOwnership);

                    Assert.AreEqual(bladeLockType.lockIPAddresses, refA.spec.permittedAccessRead);
                    Assert.AreEqual(bladeLockType.lockNone, refA.spec.permittedAccessWrite);

                    failIfNoThrow(() => { refA.spec.friendlyName = "test data"; });
                    failIfNoThrow(() => { refA.spec.currentOwner = "Dave_Lister"; });
                }
            }
        }
        public override void checkFairness_blades(hostDB db, disposingList <lockableBladeSpec> blades)
        {
            // If a blade owner is under its quota, then promote it in any queues where the current owner is over-quota.
            currentOwnerStat[] stats  = db.getFairnessStats(blades);
            string[]           owners = stats.Where(x => x.ownerName != "vmserver").Select(x => x.ownerName).ToArray();
            if (owners.Length == 0)
            {
                return;
            }
            float fairShare = (float)db.getAllBladeIP().Length / (float)owners.Length;

            currentOwnerStat[]      ownersOverQuota  = stats.Where(x => x.allocatedBlades > fairShare).ToArray();
            List <currentOwnerStat> ownersUnderQuota = stats.Where(x => x.allocatedBlades < fairShare).ToList();

            foreach (currentOwnerStat migrateTo in ownersUnderQuota)
            {
                var migratory = blades.Where(x =>
                                             (
                                                 // Migrate if the dest is currently owned by someone over-quota
                                                 (ownersOverQuota.Count(y => y.ownerName == x.spec.currentOwner) > 0) ||
                                                 // Or if it is a VM server, and currently holds VMs that are _all_ allocated to over-quota users
                                                 (
                                                     x.spec.currentOwner == "vmserver" &&

                                                     db.getVMByVMServerIP_nolocking(x.spec.bladeIP).All(vm =>
                                                                                                        (ownersOverQuota.Count(overQuotaUser => overQuotaUser.ownerName == vm.currentOwner) > 0)
                                                                                                        )
                                                 )
                                             )
                                             &&
                                             x.spec.nextOwner == migrateTo.ownerName &&
                                             (x.spec.state == bladeStatus.inUse || x.spec.state == bladeStatus.inUseByDirector)).ToList();
                {
                    if (migratory.Count == 0)
                    {
                        // There is nowhere to migrate this owner from. Try another owner.
                        continue;
                    }

                    // Since migration cannot fail, we just take the first potential.
                    // TODO: should we prefer non VM-servers here?
                    lockableBladeSpec newHost = migratory.First();

                    if (newHost.spec.currentlyBeingAVMServer)
                    {
                        // It's a VM server. Migrate all the VMs off it (ie, request them to be destroyed).
                        newHost.spec.nextOwner = migrateTo.ownerName;
                        using (disposingList <lockableVMSpec> childVMs = db.getVMByVMServerIP(newHost,
                                                                                              bladeLockType.lockNone, bladeLockType.lockOwnership))
                        {
                            foreach (lockableVMSpec VM in childVMs)
                            {
                                Debug.WriteLine("Requesting release for VM " + VM.spec.VMIP);
                                VM.spec.state = bladeStatus.releaseRequested;
                            }
                        }
                        newHost.spec.nextOwner = migrateTo.ownerName;
                        newHost.spec.state     = bladeStatus.releaseRequested;
                    }
                    else
                    {
                        // It's a physical server. Just mark it as .releaseRequested.
                        Debug.WriteLine("Requesting release for blade " + newHost.spec.bladeIP);
                        newHost.spec.nextOwner = migrateTo.ownerName;
                        newHost.spec.state     = bladeStatus.releaseRequested;
                    }
                }
            }
        }
 public abstract void checkFairness_blades(hostDB db, disposingList <lockableBladeSpec> blades);