protected override void RunWithSession(ref Session session) { Host hostObject = TryResolveWithTimeout(_host); // If there are no patches that require reboot, we skip the evacuate-reboot-bringbabiesback sequence // But we only do this if we indicated that host restart should be avoided (by initializing the AvoidRestartHosts property) if (Helpers.ElyOrGreater(hostObject) && AvoidRestartHosts != null) { log.DebugFormat("Checking host.patches_requiring_reboot now on '{0}'...", hostObject); if (hostObject.patches_requiring_reboot.Count > 0) { AvoidRestartHosts.Remove(hostObject.uuid); log.DebugFormat("Restart is needed now (hostObject.patches_requiring_reboot has {0} items in it). Evacuating now. Will restart after.", hostObject.patches_requiring_reboot.Count); } else { if (!AvoidRestartHosts.Contains(hostObject.uuid)) { AvoidRestartHosts.Add(hostObject.uuid); } log.Debug("Will skip scheduled restart (livepatching succeeded), because hostObject.patches_requiring_reboot is empty."); return; } } visible = true; PBD.CheckAndPlugPBDsFor(Connection.ResolveAll(hostObject.resident_VMs)); log.DebugFormat("Disabling host {0}", hostObject.Name); Host.disable(session, _host.opaque_ref); Status = Messages.PLAN_ACTION_STATUS_MIGRATING_VMS_FROM_HOST; log.DebugFormat("Migrating VMs from host {0}", hostObject.Name); XenRef <Task> task = Host.async_evacuate(session, _host.opaque_ref); PollTaskForResultAndDestroy(Connection, ref session, task); }
protected override void RunWithSession(ref Session session) { // If there are no patches that require reboot, we skip the evacuate-reboot-bringbabiesback sequence if (Helpers.ElyOrGreater(currentHost) && AvoidRestartHosts != null && AvoidRestartHosts.Contains(currentHost.uuid)) { log.Debug("Skipped scheduled restart (livepatching succeeded), BringBabiesBackAction is skipped."); return; } visible = true; Status = Messages.PLAN_ACTION_STATUS_RECONNECTING_STORAGE; PBD.CheckAndBestEffortPlugPBDsFor(Connection, _vms); // // CA-17428: Apply hotfixes to a pool of hosts through XenCenter fails. // // Host do reenable themselves anyway, so just wait 1 min for that, // occasionally poking it. // int retries = 0; Status = Messages.PLAN_ACTION_STATUS_REENABLING_HOST; while (!Host.get_enabled(session, _host.opaque_ref)) { retries++; Thread.Sleep(5000); try { Host.enable(session, _host.opaque_ref); } catch (Exception e) { if (retries > 60) { throw; } log.Debug(string.Format("Cannot enable host {0}. Retrying in 5 sec.", _host.opaque_ref), e); } } if (_enableOnly) { return; } int vmCount = _vms.Count; int vmNumber = 0; foreach (VM vm in Connection.ResolveAll(_vms)) { int tries = 0; if (vm.power_state != vm_power_state.Running) { continue; // vm may have been shutdown or suspended. } do { tries++; try { Status = string.Format(Messages.PLAN_ACTION_STATUS_MIGRATING_VM_X_OF_Y, vmNumber + 1, vmCount); log.DebugFormat("Migrating VM '{0}' back to Host '{1}'", Helpers.GetName(vm), Helpers.GetName(Connection.Resolve(_host))); PollTaskForResultAndDestroy(Connection, ref session, VM.async_live_migrate(session, vm.opaque_ref, _host.opaque_ref), (vmNumber * 100) / vmCount, ((vmNumber + 1) * 100) / vmCount); vmNumber++; } catch (Failure e) { // When trying to put the first vm back, we get all sorts // of errors ie storage not plugged yet etc. Just ignore them for now if (vmNumber > 0 || tries > 24) { throw; } log.Debug(string.Format("Error migrating VM '{0}' back to Host '{1}'", Helpers.GetName(vm), Helpers.GetName(Connection.Resolve(_host))), e); Thread.Sleep(5000); } } while (vmNumber == 0); } Host hostModelObject = Connection.Resolve(_host); if (hostModelObject != null) { log.DebugFormat("Cleaning up evacuated VMs from Host '{0}'", hostModelObject.Name()); hostModelObject.ClearEvacuatedVMs(session); } }