protected void EvacuateHost(ref Session session) { var hostObj = GetResolvedHost(); var vms = hostObj.GetRunningVMs(); AddProgressStep(string.Format(Messages.UPDATES_WIZARD_ENTERING_MAINTENANCE_MODE, hostObj.Name())); log.DebugFormat("Disabling host {0}", hostObj.Name()); Host.disable(session, HostXenRef.opaque_ref); if (vms.Count > 0) { PBD.CheckPlugPBDsForVMs(Connection, vms); try { AddProgressStep(string.Format(Messages.PLANACTION_VMS_MIGRATING, hostObj.Name())); log.DebugFormat("Migrating VMs from host {0}", hostObj.Name()); XenRef <Task> task = Host.async_evacuate(session, HostXenRef.opaque_ref); PollTaskForResultAndDestroy(Connection, ref session, task); } catch (Failure f) { if (f.ErrorDescription.Count > 0 && f.ErrorDescription[0] == Failure.HOST_NOT_ENOUGH_FREE_MEMORY) { log.WarnFormat("Host {0} cannot be avacuated: {1}", hostObj.Name(), f.Message); throw new Exception(string.Format(Messages.PLAN_ACTION_FAILURE_NOT_ENOUGH_MEMORY, hostObj.Name()), f); } throw; } } }
protected void EvacuateHost(ref Session session) { var hostObj = GetResolvedHost(); var vms = hostObj.GetRunningVMs(); AddProgressStep(string.Format(Messages.UPDATES_WIZARD_ENTERING_MAINTENANCE_MODE, hostObj.Name())); log.DebugFormat("Disabling host {0}", hostObj.Name()); Host.disable(session, HostXenRef.opaque_ref); if (vms.Count > 0) { PBD.CheckPlugPBDsForVMs(Connection, vms); AddProgressStep(string.Format(Messages.PLANACTION_VMS_MIGRATING, hostObj.Name())); log.DebugFormat("Migrating VMs from host {0}", hostObj.Name()); XenRef <Task> task = Host.async_evacuate(session, HostXenRef.opaque_ref); PollTaskForResultAndDestroy(Connection, ref session, task); } }
protected override void RunWithSession(ref Session session) { PBD.CheckPlugPBDsForVMs(Connection, _vms); int vmCount = _vms.Count; for (int i = 0; i < _vms.Count; i++) { var vmRef = _vms[i]; XenRef <Task> task = DoPerVM(session, vmRef); try { var j = i; PollTaskForResult(Connection, ref session, task, progress => PercentComplete = (progress + 100 * j) / vmCount); } finally { Task.destroy(session, task); } } }
protected void BringBabiesBack(ref Session session, List <XenRef <VM> > vmrefs, bool enableOnly) { // CA-17428: Apply hotfixes to a pool of hosts through XenCenter fails. // Hosts do reenable themselves anyway, so just wait 1 min for that, // occasionally poking it. WaitForHostToBecomeEnabled(session, true); if (enableOnly || vmrefs.Count == 0) { return; } int vmCount = vmrefs.Count; int vmNumber = 0; var hostObj = GetResolvedHost(); AddProgressStep(string.Format(Messages.PLAN_ACTION_STATUS_REPATRIATING_VMS, hostObj.Name())); PBD.CheckPlugPBDsForVMs(Connection, vmrefs, true); foreach (var vmRef in vmrefs) { var vm = Connection.Resolve(vmRef); if (vm == null) { continue; } int tries = 0; if (vm.power_state != vm_power_state.Running) { continue; // vm may have been shutdown or suspended. } do { tries++; try { log.DebugFormat("Migrating VM '{0}' back to Host '{1}'", vm.Name(), hostObj.Name()); PollTaskForResultAndDestroy(Connection, ref session, VM.async_live_migrate(session, vm.opaque_ref, HostXenRef.opaque_ref), (vmNumber * 100) / vmCount, ((vmNumber + 1) * 100) / vmCount); vmNumber++; } catch (Failure e) { // When trying to put the first vm back, we get all sorts // of errors ie storage not plugged yet etc. Just ignore them for now if (vmNumber > 0 || tries > 24) { throw; } log.Debug(string.Format("Error migrating VM '{0}' back to Host '{1}'", vm.Name(), hostObj.Name()), e); Thread.Sleep(5000); } } while (vmNumber == 0); } log.DebugFormat("Cleaning up evacuated VMs from Host '{0}'", hostObj.Name()); Host.ClearEvacuatedVMs(session, HostXenRef); }