diff --git a/lib/cmdlib.py b/lib/cmdlib.py index 4dfd6688f11e76e16ebb6f6e9a84441fc575c546..935827ba6bae211cd40ad46c848948220250b58e 100644 --- a/lib/cmdlib.py +++ b/lib/cmdlib.py @@ -13856,47 +13856,16 @@ class LUInstanceSetParams(LogicalUnit): self.op.disk_template)) raise errors.OpPrereqError(errmsg, errors.ECODE_STATE) - def CheckPrereq(self): - """Check prerequisites. + def _PreCheckDisks(self, ispec): + """CheckPrereq checks related to disk changes. - This only checks the instance list against the existing names. + @type ispec: dict + @param ispec: instance specs to be updated with the new disks """ - assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE) - instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) - - cluster = self.cluster = self.cfg.GetClusterInfo() - assert self.instance is not None, \ - "Cannot retrieve locked instance %s" % self.op.instance_name - - pnode = instance.primary_node - - self.warn = [] - - if (self.op.pnode is not None and self.op.pnode != pnode and - not self.op.force): - # verify that the instance is not up - instance_info = self.rpc.call_instance_info(pnode, instance.name, - instance.hypervisor) - if instance_info.fail_msg: - self.warn.append("Can't get instance runtime information: %s" % - instance_info.fail_msg) - elif instance_info.payload: - raise errors.OpPrereqError("Instance is still running on %s" % pnode, - errors.ECODE_STATE) - - assert pnode in self.owned_locks(locking.LEVEL_NODE) - nodelist = list(instance.all_nodes) - pnode_info = self.cfg.GetNodeInfo(pnode) + instance = self.instance self.diskparams = self.cfg.GetInstanceDiskParams(instance) - #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups) - assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP) - group_info = self.cfg.GetNodeGroup(pnode_info.group) - - # dictionary with instance information after the modification - ispec = {} - # Check disk modifications. This is done here and not in CheckArguments # (as with NICs), because we need to know the instance's disk template if instance.disk_template == constants.DT_EXT: @@ -13906,9 +13875,7 @@ class LUInstanceSetParams(LogicalUnit): self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES, self._VerifyDiskModification) - # Prepare disk/NIC modifications self.diskmod = PrepareContainerMods(self.op.disks, None) - self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate) # Check the validity of the `provider' parameter if instance.disk_template in constants.DT_EXT: @@ -13937,6 +13904,75 @@ class LUInstanceSetParams(LogicalUnit): constants.DT_EXT), errors.ECODE_INVAL) + if self.op.disks and instance.disk_template == constants.DT_DISKLESS: + raise errors.OpPrereqError("Disk operations not supported for" + " diskless instances", errors.ECODE_INVAL) + + def _PrepareDiskMod(_, disk, params, __): + disk.name = params.get(constants.IDISK_NAME, None) + + # Verify disk changes (operating on a copy) + disks = copy.deepcopy(instance.disks) + ApplyContainerMods("disk", disks, None, self.diskmod, None, _PrepareDiskMod, + None) + utils.ValidateDeviceNames("disk", disks) + if len(disks) > constants.MAX_DISKS: + raise errors.OpPrereqError("Instance has too many disks (%d), cannot add" + " more" % constants.MAX_DISKS, + errors.ECODE_STATE) + disk_sizes = [disk.size for disk in instance.disks] + disk_sizes.extend(params["size"] for (op, idx, params, private) in + self.diskmod if op == constants.DDM_ADD) + ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes) + ispec[constants.ISPEC_DISK_SIZE] = disk_sizes + + if self.op.offline is not None and self.op.offline: + _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE, + msg="can't change to offline") + + def CheckPrereq(self): + """Check prerequisites. + + This only checks the instance list against the existing names. + + """ + assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE) + instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name) + + cluster = self.cluster = self.cfg.GetClusterInfo() + assert self.instance is not None, \ + "Cannot retrieve locked instance %s" % self.op.instance_name + + pnode = instance.primary_node + + self.warn = [] + + if (self.op.pnode is not None and self.op.pnode != pnode and + not self.op.force): + # verify that the instance is not up + instance_info = self.rpc.call_instance_info(pnode, instance.name, + instance.hypervisor) + if instance_info.fail_msg: + self.warn.append("Can't get instance runtime information: %s" % + instance_info.fail_msg) + elif instance_info.payload: + raise errors.OpPrereqError("Instance is still running on %s" % pnode, + errors.ECODE_STATE) + + assert pnode in self.owned_locks(locking.LEVEL_NODE) + nodelist = list(instance.all_nodes) + pnode_info = self.cfg.GetNodeInfo(pnode) + + #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups) + assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP) + group_info = self.cfg.GetNodeGroup(pnode_info.group) + + # dictionary with instance information after the modification + ispec = {} + + # Prepare NIC modifications + self.nicmod = PrepareContainerMods(self.op.nics, _InstNicModPrivate) + # OS change if self.op.os_name and not self.op.force: _CheckNodeHasOS(self, instance.primary_node, self.op.os_name, @@ -13951,6 +13987,8 @@ class LUInstanceSetParams(LogicalUnit): if self.op.disk_template: self._PreCheckDiskTemplate(pnode_info) + self._PreCheckDisks(ispec) + # hvparams processing if self.op.hvparams: hv_type = instance.hypervisor @@ -14105,10 +14143,6 @@ class LUInstanceSetParams(LogicalUnit): "ballooning memory for instance %s" % instance.name, delta, instance.hypervisor) - if self.op.disks and instance.disk_template == constants.DT_DISKLESS: - raise errors.OpPrereqError("Disk operations not supported for" - " diskless instances", errors.ECODE_INVAL) - def _PrepareNicCreate(_, params, private): self._PrepareNicModification(params, private, None, None, {}, cluster, pnode) @@ -14134,28 +14168,6 @@ class LUInstanceSetParams(LogicalUnit): " (%d), cannot add more" % constants.MAX_NICS, errors.ECODE_STATE) - def _PrepareDiskMod(_, disk, params, __): - disk.name = params.get(constants.IDISK_NAME, None) - - # Verify disk changes (operating on a copy) - disks = copy.deepcopy(instance.disks) - ApplyContainerMods("disk", disks, None, self.diskmod, None, _PrepareDiskMod, - None) - utils.ValidateDeviceNames("disk", disks) - if len(disks) > constants.MAX_DISKS: - raise errors.OpPrereqError("Instance has too many disks (%d), cannot add" - " more" % constants.MAX_DISKS, - errors.ECODE_STATE) - disk_sizes = [disk.size for disk in instance.disks] - disk_sizes.extend(params["size"] for (op, idx, params, private) in - self.diskmod if op == constants.DDM_ADD) - ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes) - ispec[constants.ISPEC_DISK_SIZE] = disk_sizes - - if self.op.offline is not None and self.op.offline: - _CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE, - msg="can't change to offline") - # Pre-compute NIC changes (necessary to use result in hooks) self._nic_chgdesc = [] if self.nicmod: