public override void startBladePowerOn(lockableBladeSpec nodeSpec, cancellableDateTime deadline) { using (hypervisor hyp = new hypervisor_mocked_ilo(nodeSpec.spec, callMockedExecutionHandler)) { hyp.powerOn(deadline); } }
public GetBladeStatusResult getBladeStatus(string nodeIp, string requestorIp) { using (lockableBladeSpec blade = getBladeByIP(nodeIp, bladeLockType.lockOwnership, bladeLockType.lockNone, permitAccessDuringBIOS: true, permitAccessDuringDeployment: true)) { switch (blade.spec.state) { case bladeStatus.unused: return(GetBladeStatusResult.unused); case bladeStatus.releaseRequested: return(GetBladeStatusResult.releasePending); case bladeStatus.inUse: if (blade.spec.currentOwner == requestorIp) { return(GetBladeStatusResult.yours); } return(GetBladeStatusResult.notYours); case bladeStatus.inUseByDirector: return(GetBladeStatusResult.notYours); default: throw new ArgumentOutOfRangeException(); } } }
public void makeIntoAVMServer(lockableBladeSpec toConvert) { // Delete any VM configurations that have been left lying around. string sql = "select bladeConfigKey from VMConfiguration " + " join BladeConfiguration on BladeConfigKey = ownershipKey " + "join bladeOwnership on VMConfiguration.parentBladeID = ownershipKey " + " where bladeConfigKey = $bladeIP"; List <long> toDel = new List <long>(); using (SQLiteCommand cmd = new SQLiteCommand(sql, conn)) { cmd.Parameters.AddWithValue("$bladeIP", toConvert.spec.bladeIP); using (SQLiteDataReader reader = cmd.ExecuteReader()) { while (reader.Read()) { toDel.Add((long)reader[0]); } } } string deleteSQL = "delete from VMConfiguration where id in (" + String.Join(",", toDel) + ")"; using (SQLiteCommand cmd = new SQLiteCommand(deleteSQL, conn)) { cmd.ExecuteNonQuery(); } // And then mark this blade as being a VM server. toConvert.spec.currentlyBeingAVMServer = true; toConvert.spec.state = bladeStatus.inUseByDirector; // Since we don't know if the blade has been left in a good state (or even if it was a VM server previously) we // force a power cycle before we use it. toConvert.spec.vmDeployState = VMDeployStatus.needsPowerCycle; }
private static void mockedBiosThread(mockedBiosThreadParams param) { using (lockableBladeSpec blade = param.db.getBladeByIP(param.nodeIP, bladeLockType.lockBIOS, bladeLockType.lockBIOS, true, true)) { blade.spec.currentlyHavingBIOSDeployed = true; } param.isStarted = true; param.signalOnStart.Set(); using (lockableBladeSpec blade = param.db.getBladeByIP(param.nodeIP, bladeLockType.lockLongRunningBIOS, bladeLockType.lockLongRunningBIOS, true, true)) { while (true) { if (DateTime.Now > param.deadline) { using (var tmp = new tempLockElevation(blade, bladeLockType.lockBIOS, bladeLockType.lockBIOS)) { param.parent.markLastKnownBIOS(blade, param.BIOSToWrite); param.result = new result(resultCode.success); return; } } if (param.isCancelled) { param.result = new result(resultCode.cancelled); return; } Thread.Sleep(TimeSpan.FromSeconds(1)); } } }
private void copyDeploymentFilesToBlade(lockableBladeSpec nodeSpec, string biosConfigFile, cancellableDateTime deadline) { using (hypervisor hyp = _hostManager.makeHypervisorForBlade_LTSP(nodeSpec)) { Dictionary <string, string> toCopy = new Dictionary <string, string> { { "applyBIOS.sh", Resources.applyBIOS.Replace("\r\n", "\n") }, { "getBIOS.sh", Resources.getBIOS.Replace("\r\n", "\n") }, { "conrep.xml", Resources.conrep_xml.Replace("\r\n", "\n") } }; if (biosConfigFile != null) { toCopy.Add("newbios.xml", biosConfigFile.Replace("\r\n", "\n")); } foreach (KeyValuePair <string, string> kvp in toCopy) { hypervisor.doWithRetryOnSomeExceptions(() => { hyp.copyToGuestFromBuffer(kvp.Key, kvp.Value); }, deadline, TimeSpan.FromSeconds(10)); } // And copy this file specifically as binary. hypervisor.doWithRetryOnSomeExceptions(() => { hyp.copyToGuestFromBuffer("conrep", Resources.conrep); }, deadline, TimeSpan.FromSeconds(10)); } }
public override hypervisor makeHypervisorForVM(lockableVMSpec vm, lockableBladeSpec parentBladeSpec) { hypSpec_vmware spec = new hypSpec_vmware(vm.spec.friendlyName, parentBladeSpec.spec.bladeIP, Settings.Default.esxiUsername, Settings.Default.esxiPassword, Settings.Default.vmUsername, Settings.Default.vmPassword, null, null, vm.spec.kernelDebugPort, vm.spec.kernelDebugKey, vm.spec.VMIP); return(new hypervisor_vmware(spec, clientExecutionMethod.smbWithWMI)); }
public static void unmonitorDisposable(lockableBladeSpec tomonitor) { string sessionID = getSessionID(); if (sessionID == null) { return; } disposablesByID[sessionID].Remove(tomonitor); }
public override hypervisor makeHypervisorForBlade_windows(lockableBladeSpec bladeSpec) { hypSpec_iLo iloSpec = new hypSpec_iLo( bladeSpec.spec.bladeIP, Settings.Default.vmUsername, Settings.Default.vmPassword, bladeSpec.spec.iLOIP, Settings.Default.iloUsername, Settings.Default.iloPassword, bladeSpec.spec.iscsiIP, null, null, bladeSpec.spec.currentSnapshot, null, bladeSpec.spec.kernelDebugPort, null); return(new hypervisor_iLo(iloSpec, clientExecutionMethod.smbWithWMI)); }
public override hypervisor makeHypervisorForBlade_ESXi(lockableBladeSpec bladeSpec) { hypSpec_iLo iloSpec = new hypSpec_iLo( bladeSpec.spec.bladeIP, Settings.Default.esxiUsername, Settings.Default.esxiPassword, bladeSpec.spec.iLOIP, Settings.Default.iloUsername, Settings.Default.iloPassword, null, null, null, null, null, 0, null); return(new hypervisor_iLo(iloSpec, clientExecutionMethod.SSHToBASH)); }
public override void startBladePowerOff(lockableBladeSpec nodeSpec, cancellableDateTime deadline) { using (hypervisor_iLo_HTTP hyp = new hypervisor_iLo_HTTP(nodeSpec.spec.iLOIP, Settings.Default.iloUsername, Settings.Default.iloPassword)) { hyp.connect(); while (true) { hyp.powerOff(); if (hyp.getPowerStatus() == false) { break; } deadline.doCancellableSleep(TimeSpan.FromSeconds(5)); } } }
public disposingList <lockableBladeSpec> getAllBladeInfo(Func <bladeSpec, bool> filter, bladeLockType lockTypeRead, bladeLockType lockTypeWrite, bool permitAccessDuringBIOS = false, bool permitAccessDuringDeployment = false, int max = Int32.MaxValue) { disposingList <lockableBladeSpec> toRet = new disposingList <lockableBladeSpec>(); foreach (string bladeIP in getAllBladeIP()) { lockableBladeSpec blade = getBladeByIP(bladeIP, lockTypeRead, lockTypeWrite, true, true); // Filter out anything as requested if (!filter(blade.spec)) { blade.Dispose(); continue; } // Filter out anything we don't have access to right now, due to BIOS or VM deployments if ((!permitAccessDuringDeployment) && blade.spec.vmDeployState != VMDeployStatus.notBeingDeployed && blade.spec.vmDeployState != VMDeployStatus.failed && blade.spec.vmDeployState != VMDeployStatus.readyForDeployment) { blade.Dispose(); continue; } if ((!permitAccessDuringBIOS) && blade.spec.currentlyHavingBIOSDeployed) { blade.Dispose(); continue; } // Have we hit our maximum yet? if (toRet.Count == max) { blade.Dispose(); continue; } // Otherwise, okay. toRet.Add(blade); } return(toRet); }
private void _ltspBootThreadStart(biosThreadState param) { using (lockableBladeSpec blade = _hostManager.db.getBladeByIP(param.nodeIP, bladeLockType.lockBIOS, bladeLockType.lockBIOS, permitAccessDuringBIOS: true, permitAccessDuringDeployment: true)) { blade.spec.currentlyHavingBIOSDeployed = true; } param.connectDeadline = new cancellableDateTime(TimeSpan.FromMinutes(5)); param.isStarted.Set(); using (lockableBladeSpec blade = _hostManager.db.getBladeByIP(param.nodeIP, bladeLockType.lockOwnership | bladeLockType.lockSnapshot, bladeLockType.lockNone, permitAccessDuringBIOS: true, permitAccessDuringDeployment: true)) { // Power cycle it _hostManager.startBladePowerOff(blade, param.connectDeadline); _hostManager.startBladePowerOn(blade, param.connectDeadline); param.blade = blade; // Wait for it to boot. Note that we don't ping the client repeatedly here - since the Ping class can cause // a BSoD.. ;_; Instead, we wait for port 22 (SSH) to be open. _hostManager.setCallbackOnTCPPortOpen(22, param.onBootFinishEvent, param.onBootFailureEvent, param.connectDeadline, param); // Wait for the boot to either complete or to fail. while (true) { if (!param.onBootFinishEvent.WaitOne(TimeSpan.FromMilliseconds(500))) { param.onBootFinish(param); break; } if (!param.onBootFailureEvent.WaitOne(TimeSpan.FromMilliseconds(500))) { param.onBootFailure(param); break; } } } }
private static void mockedBiosThread(Object param) { mockedBiosThreadParams paramTyped = (mockedBiosThreadParams)param; try { mockedBiosThread(paramTyped); } catch (Exception e) { paramTyped.result = new result(resultCode.genericFail, e.Message + " @ " + e.StackTrace); } finally { using (lockableBladeSpec blade = paramTyped.db.getBladeByIP(paramTyped.nodeIP, bladeLockType.lockBIOS, bladeLockType.lockBIOS, true, true)) { blade.spec.currentlyHavingBIOSDeployed = false; paramTyped.isFinished = true; } } }
public override hypervisor makeHypervisorForVM(lockableVMSpec VM, lockableBladeSpec parentBladeSpec) { return(new hypervisor_mocked_vmware(VM.spec, parentBladeSpec.spec, callMockedExecutionHandler)); }
public override hypervisor makeHypervisorForBlade_ESXi(lockableBladeSpec newBladeSpec) { return(new hypervisor_mocked_ilo(newBladeSpec.spec, callMockedExecutionHandler)); }
public disposingList <lockableVMSpec> getVMByVMServerIP(lockableBladeSpec blade, bladeLockType readLock, bladeLockType writeLock) { disposingList <lockableVMSpec> toRet = new disposingList <lockableVMSpec>(); if ((blade.getCurrentLocks().read & bladeLockType.lockVMCreation) == 0) { throw new Exception("lockVMCreation required on vmserver passed to getVMByVMServerIP"); } // We need to lock IP addressess on the VMs, since we lock by them. readLock = readLock | bladeLockType.lockIPAddresses; // Since we hold lockVMCreation, we can assume no new VMs will be added or removed to/from this blade. We assume that // VM IP addresses will never change, except during initialization, when they go from null - we just drop any with a // NULL IP address. Dictionary <string, lockableVMSpec> VMNames = new Dictionary <string, lockableVMSpec>(); string sqlCommand = "select VMIP from vmConfiguration " + "join bladeConfiguration on parentbladeID = bladeConfigKey " + "where bladeIP = $vmServerIP"; using (SQLiteCommand cmd = new SQLiteCommand(sqlCommand, conn)) { cmd.Parameters.AddWithValue("$vmServerIP", blade.spec.bladeIP); using (SQLiteDataReader reader = cmd.ExecuteReader()) { while (reader.Read()) { string VMName = reader[0].ToString(); if (!String.IsNullOrEmpty(VMName)) { VMNames.Add(VMName, new lockableVMSpec(VMName, readLock, writeLock)); } } } } try { // Now read each from the DB, now that we hold the lock for each. foreach (KeyValuePair <string, lockableVMSpec> kvp in VMNames) { string vmName = kvp.Key; lockableVMSpec vmSpec = kvp.Value; string sql_getVM = "select bladeOwnership.*, vmConfiguration.* from vmConfiguration " + " join bladeOwnership on bladeOwnership.ownershipKey = vmConfiguration.ownershipID " + " join bladeConfiguration on parentbladeID = bladeConfigKey " + " where VMIP = $vmIP"; using (SQLiteCommand cmd = new SQLiteCommand(sql_getVM, conn)) { cmd.Parameters.AddWithValue("$vmIP", vmName); using (SQLiteDataReader reader = cmd.ExecuteReader()) { if (!reader.Read()) { throw new Exception("VM disappeared, even though we hold lockVMCreation on the parent!"); } vmSpec.setSpec(new vmSpec(conn, reader, readLock, writeLock)); toRet.Add(vmSpec); } } } } catch (Exception) { foreach (KeyValuePair <string, lockableVMSpec> kvp in VMNames) { kvp.Value.Dispose(); } throw; } return(toRet); }
// FIXME: code duplication public lockableBladeSpec getBladeByIP(string IP, bladeLockType readLock, bladeLockType writeLock, bool permitAccessDuringBIOS = false, bool permitAccessDuringDeployment = false) { bladeLockType origReadLock = readLock | writeLock; readLock = origReadLock; // We need to lock IP addressess, since we're searching by them. readLock = readLock | bladeLockType.lockIPAddresses; readLock = readLock | bladeLockType.lockvmDeployState; readLock = readLock | bladeLockType.lockBIOS; lockableBladeSpec toRet = null; try { toRet = new lockableBladeSpec(IP, readLock, writeLock); string sqlCommand = "select * from bladeOwnership " + "join bladeConfiguration on ownershipKey = bladeConfiguration.ownershipID " + "where bladeIP = $bladeIP"; using (SQLiteCommand cmd = new SQLiteCommand(sqlCommand, conn)) { cmd.Parameters.AddWithValue("$bladeIP", IP); using (SQLiteDataReader reader = cmd.ExecuteReader()) { if (reader.Read()) { bladeSpec newSpec = new bladeSpec(conn, reader, readLock, writeLock); toRet.setSpec(newSpec); if ((!permitAccessDuringDeployment) && newSpec.vmDeployState != VMDeployStatus.notBeingDeployed && newSpec.vmDeployState != VMDeployStatus.failed && newSpec.vmDeployState != VMDeployStatus.readyForDeployment) { throw new Exception("Attempt to access blade during VM deployment"); } if ((!permitAccessDuringBIOS) && newSpec.currentlyHavingBIOSDeployed) { throw new Exception("Attempt to access blade during BIOS deployment"); } if ((origReadLock & bladeLockType.lockvmDeployState) == 0 && (writeLock & bladeLockType.lockvmDeployState) == 0) { toRet.downgradeLocks(bladeLockType.lockvmDeployState, bladeLockType.lockNone); } if ((origReadLock & bladeLockType.lockBIOS) == 0 && (writeLock & bladeLockType.lockBIOS) == 0) { toRet.downgradeLocks(bladeLockType.lockBIOS, bladeLockType.lockNone); } leakCheckerInspector.monitorDisposable(toRet); return(toRet); } // No records returned. throw new bladeNotFoundException(); } } } catch (Exception) { if (toRet != null) { toRet.Dispose(); } throw; } }
public override void checkFairness_blades(hostDB db, disposingList <lockableBladeSpec> blades) { // If a blade owner is under its quota, then promote it in any queues where the current owner is over-quota. currentOwnerStat[] stats = db.getFairnessStats(blades); string[] owners = stats.Where(x => x.ownerName != "vmserver").Select(x => x.ownerName).ToArray(); if (owners.Length == 0) { return; } float fairShare = (float)db.getAllBladeIP().Length / (float)owners.Length; currentOwnerStat[] ownersOverQuota = stats.Where(x => x.allocatedBlades > fairShare).ToArray(); List <currentOwnerStat> ownersUnderQuota = stats.Where(x => x.allocatedBlades < fairShare).ToList(); foreach (currentOwnerStat migrateTo in ownersUnderQuota) { var migratory = blades.Where(x => ( // Migrate if the dest is currently owned by someone over-quota (ownersOverQuota.Count(y => y.ownerName == x.spec.currentOwner) > 0) || // Or if it is a VM server, and currently holds VMs that are _all_ allocated to over-quota users ( x.spec.currentOwner == "vmserver" && db.getVMByVMServerIP_nolocking(x.spec.bladeIP).All(vm => (ownersOverQuota.Count(overQuotaUser => overQuotaUser.ownerName == vm.currentOwner) > 0) ) ) ) && x.spec.nextOwner == migrateTo.ownerName && (x.spec.state == bladeStatus.inUse || x.spec.state == bladeStatus.inUseByDirector)).ToList(); { if (migratory.Count == 0) { // There is nowhere to migrate this owner from. Try another owner. continue; } // Since migration cannot fail, we just take the first potential. // TODO: should we prefer non VM-servers here? lockableBladeSpec newHost = migratory.First(); if (newHost.spec.currentlyBeingAVMServer) { // It's a VM server. Migrate all the VMs off it (ie, request them to be destroyed). newHost.spec.nextOwner = migrateTo.ownerName; using (disposingList <lockableVMSpec> childVMs = db.getVMByVMServerIP(newHost, bladeLockType.lockNone, bladeLockType.lockOwnership)) { foreach (lockableVMSpec VM in childVMs) { Debug.WriteLine("Requesting release for VM " + VM.spec.VMIP); VM.spec.state = bladeStatus.releaseRequested; } } newHost.spec.nextOwner = migrateTo.ownerName; newHost.spec.state = bladeStatus.releaseRequested; } else { // It's a physical server. Just mark it as .releaseRequested. Debug.WriteLine("Requesting release for blade " + newHost.spec.bladeIP); newHost.spec.nextOwner = migrateTo.ownerName; newHost.spec.state = bladeStatus.releaseRequested; } } } }