Package ganeti :: Package cmdlib :: Module instance_set_params
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cmdlib.instance_set_params

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30  """Logical unit setting parameters of a single instance.""" 
  31   
  32  import copy 
  33  import logging 
  34  import os 
  35   
  36  from ganeti import compat 
  37  from ganeti import constants 
  38  from ganeti import errors 
  39  from ganeti import ht 
  40  from ganeti import hypervisor 
  41  from ganeti import locking 
  42  from ganeti.masterd import iallocator 
  43  from ganeti import netutils 
  44  from ganeti import objects 
  45  from ganeti import utils 
  46  import ganeti.rpc.node as rpc 
  47   
  48  from ganeti.cmdlib.base import LogicalUnit 
  49   
  50  from ganeti.cmdlib.common import INSTANCE_DOWN, \ 
  51    INSTANCE_NOT_RUNNING, CheckNodeOnline, \ 
  52    CheckParamsNotGlobal, \ 
  53    IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \ 
  54    GetUpdatedParams, CheckInstanceState, ExpandNodeUuidAndName, \ 
  55    IsValidDiskAccessModeCombination, AnnotateDiskParams, \ 
  56    CheckIAllocatorOrNode 
  57  from ganeti.cmdlib.instance_storage import CalculateFileStorageDir, \ 
  58    CheckDiskExtProvider, CheckNodesFreeDiskPerVG, CheckRADOSFreeSpace, \ 
  59    CheckSpindlesExclusiveStorage, ComputeDiskSizePerVG, ComputeDisksInfo, \ 
  60    CreateDisks, CreateSingleBlockDev, GenerateDiskTemplate, \ 
  61    IsExclusiveStorageEnabledNodeUuid, ShutdownInstanceDisks, \ 
  62    WaitForSync, WipeOrCleanupDisks, AssembleInstanceDisks 
  63  from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \ 
  64    NICToTuple, CheckNodeNotDrained, CopyLockList, \ 
  65    ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \ 
  66    GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \ 
  67    UpdateMetadata, CheckForConflictingIp, \ 
  68    PrepareContainerMods, ComputeInstanceCommunicationNIC, \ 
  69    ApplyContainerMods, ComputeIPolicyInstanceSpecViolation, \ 
  70    CheckNodesPhysicalCPUs 
  71  import ganeti.masterd.instance 
72 73 74 -class InstNicModPrivate(object):
75 """Data structure for network interface modifications. 76 77 Used by L{LUInstanceSetParams}. 78 79 """
80 - def __init__(self):
81 self.params = None 82 self.filled = None
83
84 85 -class LUInstanceSetParams(LogicalUnit):
86 """Modifies an instances's parameters. 87 88 """ 89 HPATH = "instance-modify" 90 HTYPE = constants.HTYPE_INSTANCE 91 REQ_BGL = False 92
93 - def GenericGetDiskInfo(self, uuid=None, name=None):
94 """Find a disk object using the provided params. 95 96 Accept arguments as keywords and use the GetDiskInfo/GetDiskInfoByName 97 config functions to retrieve the disk info based on these arguments. 98 99 In case of an error, raise the appropriate exceptions. 100 """ 101 if uuid: 102 disk = self.cfg.GetDiskInfo(uuid) 103 if disk is None: 104 raise errors.OpPrereqError("No disk was found with this UUID: %s" % 105 uuid, errors.ECODE_INVAL) 106 elif name: 107 disk = self.cfg.GetDiskInfoByName(name) 108 if disk is None: 109 raise errors.OpPrereqError("No disk was found with this name: %s" % 110 name, errors.ECODE_INVAL) 111 else: 112 raise errors.ProgrammerError("No disk UUID or name was given") 113 114 return disk
115 116 @staticmethod
117 - def _UpgradeDiskNicMods(kind, mods, verify_fn):
118 assert ht.TList(mods) 119 assert not mods or len(mods[0]) in (2, 3) 120 121 if mods and len(mods[0]) == 2: 122 result = [] 123 124 addremove = 0 125 for op, params in mods: 126 if op in (constants.DDM_ADD, constants.DDM_ATTACH, 127 constants.DDM_REMOVE, constants.DDM_DETACH): 128 result.append((op, -1, params)) 129 addremove += 1 130 131 if addremove > 1: 132 raise errors.OpPrereqError("Only one %s add/attach/remove/detach " 133 "operation is supported at a time" % 134 kind, errors.ECODE_INVAL) 135 else: 136 result.append((constants.DDM_MODIFY, op, params)) 137 138 assert verify_fn(result) 139 else: 140 result = mods 141 return result
142 143 @staticmethod
144 - def _CheckMods(kind, mods, key_types, item_fn):
145 """Ensures requested disk/NIC modifications are valid. 146 147 Note that the 'attach' action needs a way to refer to the UUID of the disk, 148 since the disk name is not unique cluster-wide. However, the UUID of the 149 disk is not settable but rather generated by Ganeti automatically, 150 therefore it cannot be passed as an IDISK parameter. For this reason, this 151 function will override the checks to accept uuid parameters solely for the 152 attach action. 153 """ 154 # Create a key_types copy with the 'uuid' as a valid key type. 155 key_types_attach = key_types.copy() 156 key_types_attach['uuid'] = 'string' 157 158 for (op, _, params) in mods: 159 assert ht.TDict(params) 160 161 # If 'key_types' is an empty dict, we assume we have an 162 # 'ext' template and thus do not ForceDictType 163 if key_types: 164 utils.ForceDictType(params, (key_types if op != constants.DDM_ATTACH 165 else key_types_attach)) 166 167 if op in (constants.DDM_REMOVE, constants.DDM_DETACH): 168 if params: 169 raise errors.OpPrereqError("No settings should be passed when" 170 " removing or detaching a %s" % kind, 171 errors.ECODE_INVAL) 172 elif op in (constants.DDM_ADD, constants.DDM_ATTACH, 173 constants.DDM_MODIFY): 174 item_fn(op, params) 175 else: 176 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
177
178 - def _VerifyDiskModification(self, op, params, excl_stor, group_access_types):
179 """Verifies a disk modification. 180 181 """ 182 disk_type = params.get( 183 constants.IDISK_TYPE, 184 self.cfg.GetInstanceDiskTemplate(self.instance.uuid)) 185 186 if op == constants.DDM_ADD: 187 params[constants.IDISK_TYPE] = disk_type 188 189 if disk_type == constants.DT_DISKLESS: 190 raise errors.OpPrereqError( 191 "Must specify disk type on diskless instance", errors.ECODE_INVAL) 192 193 if disk_type != constants.DT_EXT: 194 utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES) 195 196 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR) 197 if mode not in constants.DISK_ACCESS_SET: 198 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode, 199 errors.ECODE_INVAL) 200 201 size = params.get(constants.IDISK_SIZE, None) 202 if size is None: 203 raise errors.OpPrereqError("Required disk parameter '%s' missing" % 204 constants.IDISK_SIZE, errors.ECODE_INVAL) 205 size = int(size) 206 207 params[constants.IDISK_SIZE] = size 208 name = params.get(constants.IDISK_NAME, None) 209 if name is not None and name.lower() == constants.VALUE_NONE: 210 params[constants.IDISK_NAME] = None 211 212 # This check is necessary both when adding and attaching disks 213 if op in (constants.DDM_ADD, constants.DDM_ATTACH): 214 CheckSpindlesExclusiveStorage(params, excl_stor, True) 215 CheckDiskExtProvider(params, disk_type) 216 217 # Make sure we do not add syncing disks to instances with inactive disks 218 if not self.op.wait_for_sync and not self.instance.disks_active: 219 raise errors.OpPrereqError("Can't %s a disk to an instance with" 220 " deactivated disks and --no-wait-for-sync" 221 " given" % op, errors.ECODE_INVAL) 222 223 # Check disk access param (only for specific disks) 224 if disk_type in constants.DTS_HAVE_ACCESS: 225 access_type = params.get(constants.IDISK_ACCESS, 226 group_access_types[disk_type]) 227 if not IsValidDiskAccessModeCombination(self.instance.hypervisor, 228 disk_type, access_type): 229 raise errors.OpPrereqError("Selected hypervisor (%s) cannot be" 230 " used with %s disk access param" % 231 (self.instance.hypervisor, access_type), 232 errors.ECODE_STATE) 233 234 if op == constants.DDM_ATTACH: 235 if len(params) != 1 or ('uuid' not in params and 236 constants.IDISK_NAME not in params): 237 raise errors.OpPrereqError("Only one argument is permitted in %s op," 238 " either %s or uuid" % (constants.DDM_ATTACH, 239 constants.IDISK_NAME, 240 ), 241 errors.ECODE_INVAL) 242 self._CheckAttachDisk(params) 243 244 elif op == constants.DDM_MODIFY: 245 if constants.IDISK_SIZE in params: 246 raise errors.OpPrereqError("Disk size change not possible, use" 247 " grow-disk", errors.ECODE_INVAL) 248 249 disk_info = self.cfg.GetInstanceDisks(self.instance.uuid) 250 251 # Disk modification supports changing only the disk name and mode. 252 # Changing arbitrary parameters is allowed only for ext disk template", 253 if not utils.AllDiskOfType(disk_info, [constants.DT_EXT]): 254 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES) 255 else: 256 # We have to check that the 'access' and 'disk_provider' parameters 257 # cannot be modified 258 for param in [constants.IDISK_ACCESS, constants.IDISK_PROVIDER]: 259 if param in params: 260 raise errors.OpPrereqError("Disk '%s' parameter change is" 261 " not possible" % param, 262 errors.ECODE_INVAL) 263 264 name = params.get(constants.IDISK_NAME, None) 265 if name is not None and name.lower() == constants.VALUE_NONE: 266 params[constants.IDISK_NAME] = None
267 268 @staticmethod
269 - def _VerifyNicModification(op, params):
270 """Verifies a network interface modification. 271 272 """ 273 if op in (constants.DDM_ADD, constants.DDM_MODIFY): 274 ip = params.get(constants.INIC_IP, None) 275 name = params.get(constants.INIC_NAME, None) 276 req_net = params.get(constants.INIC_NETWORK, None) 277 link = params.get(constants.NIC_LINK, None) 278 mode = params.get(constants.NIC_MODE, None) 279 if name is not None and name.lower() == constants.VALUE_NONE: 280 params[constants.INIC_NAME] = None 281 if req_net is not None: 282 if req_net.lower() == constants.VALUE_NONE: 283 params[constants.INIC_NETWORK] = None 284 req_net = None 285 elif link is not None or mode is not None: 286 raise errors.OpPrereqError("If network is given" 287 " mode or link should not", 288 errors.ECODE_INVAL) 289 290 if op == constants.DDM_ADD: 291 macaddr = params.get(constants.INIC_MAC, None) 292 if macaddr is None: 293 params[constants.INIC_MAC] = constants.VALUE_AUTO 294 295 if ip is not None: 296 if ip.lower() == constants.VALUE_NONE: 297 params[constants.INIC_IP] = None 298 else: 299 if ip.lower() == constants.NIC_IP_POOL: 300 if op == constants.DDM_ADD and req_net is None: 301 raise errors.OpPrereqError("If ip=pool, parameter network" 302 " cannot be none", 303 errors.ECODE_INVAL) 304 else: 305 if not netutils.IPAddress.IsValid(ip): 306 raise errors.OpPrereqError("Invalid IP address '%s'" % ip, 307 errors.ECODE_INVAL) 308 309 if constants.INIC_MAC in params: 310 macaddr = params[constants.INIC_MAC] 311 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 312 macaddr = utils.NormalizeAndValidateMac(macaddr) 313 314 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO: 315 raise errors.OpPrereqError("'auto' is not a valid MAC address when" 316 " modifying an existing NIC", 317 errors.ECODE_INVAL)
318
319 - def _LookupDiskIndex(self, idx):
320 """Looks up uuid or name of disk if necessary.""" 321 try: 322 return int(idx) 323 except ValueError: 324 pass 325 for i, d in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)): 326 if d.name == idx or d.uuid == idx: 327 return i 328 raise errors.OpPrereqError("Lookup of disk %r failed" % idx)
329
330 - def _LookupDiskMods(self):
331 """Looks up uuid or name of disk if necessary.""" 332 return [(op, self._LookupDiskIndex(idx), params) 333 for op, idx, params in self.op.disks]
334
335 - def CheckArguments(self):
336 if not (self.op.nics or self.op.disks or self.op.disk_template or 337 self.op.hvparams or self.op.beparams or self.op.os_name or 338 self.op.osparams or self.op.offline is not None or 339 self.op.runtime_mem or self.op.pnode or self.op.osparams_private or 340 self.op.instance_communication is not None): 341 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL) 342 343 if self.op.hvparams: 344 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, 345 "hypervisor", "instance", "cluster") 346 347 self.op.disks = self._UpgradeDiskNicMods( 348 "disk", self.op.disks, 349 ht.TSetParamsMods(ht.TIDiskParams)) 350 self.op.nics = self._UpgradeDiskNicMods( 351 "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams)) 352 353 # Check disk template modifications 354 if self.op.disk_template: 355 if self.op.disks: 356 raise errors.OpPrereqError("Disk template conversion and other disk" 357 " changes not supported at the same time", 358 errors.ECODE_INVAL) 359 360 # mirrored template node checks 361 if self.op.disk_template in constants.DTS_INT_MIRROR: 362 CheckIAllocatorOrNode(self, "iallocator", "remote_node") 363 elif self.op.remote_node: 364 self.LogWarning("Changing the disk template to a non-mirrored one," 365 " the secondary node will be ignored") 366 # the secondary node must be cleared in order to be ignored, otherwise 367 # the operation will fail, in the GenerateDiskTemplate method 368 self.op.remote_node = None 369 370 # file-based template checks 371 if self.op.disk_template in constants.DTS_FILEBASED: 372 self._FillFileDriver() 373 374 # Check NIC modifications 375 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES, 376 self._VerifyNicModification) 377 378 if self.op.pnode: 379 (self.op.pnode_uuid, self.op.pnode) = \ 380 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
381
382 - def _CheckAttachDisk(self, params):
383 """Check if disk can be attached to an instance. 384 385 Check if the disk and instance have the same template. Also, check if the 386 disk nodes are visible from the instance. 387 """ 388 uuid = params.get("uuid", None) 389 name = params.get(constants.IDISK_NAME, None) 390 391 disk = self.GenericGetDiskInfo(uuid, name) 392 instance_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid) 393 if (disk.dev_type != instance_template and 394 instance_template != constants.DT_DISKLESS): 395 raise errors.OpPrereqError("Instance has '%s' template while disk has" 396 " '%s' template" % 397 (instance_template, disk.dev_type), 398 errors.ECODE_INVAL) 399 400 instance_nodes = self.cfg.GetInstanceNodes(self.instance.uuid) 401 # Make sure we do not attach disks to instances on wrong nodes. If the 402 # instance is diskless, that instance is associated only to the primary 403 # node, whereas the disk can be associated to two nodes in the case of DRBD, 404 # hence, we have a subset check here. 405 if disk.nodes and not set(instance_nodes).issubset(set(disk.nodes)): 406 raise errors.OpPrereqError("Disk nodes are %s while the instance's nodes" 407 " are %s" % 408 (disk.nodes, instance_nodes), 409 errors.ECODE_INVAL) 410 # Make sure a DRBD disk has the same primary node as the instance where it 411 # will be attached to. 412 disk_primary = disk.GetPrimaryNode(self.instance.primary_node) 413 if self.instance.primary_node != disk_primary: 414 raise errors.OpExecError("The disks' primary node is %s whereas the " 415 "instance's primary node is %s." 416 % (disk_primary, self.instance.primary_node))
417
418 - def ExpandNames(self):
419 self._ExpandAndLockInstance() 420 self.needed_locks[locking.LEVEL_NODEGROUP] = [] 421 # Can't even acquire node locks in shared mode as upcoming changes in 422 # Ganeti 2.6 will start to modify the node object on disk conversion 423 self.needed_locks[locking.LEVEL_NODE] = [] 424 self.needed_locks[locking.LEVEL_NODE_RES] = [] 425 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE 426 # Look node group to look up the ipolicy 427 self.share_locks[locking.LEVEL_NODEGROUP] = 1 428 self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True 429 self.dont_collate_locks[locking.LEVEL_NODE] = True 430 self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
431
432 - def DeclareLocks(self, level):
433 if level == locking.LEVEL_NODEGROUP: 434 assert not self.needed_locks[locking.LEVEL_NODEGROUP] 435 # Acquire locks for the instance's nodegroups optimistically. Needs 436 # to be verified in CheckPrereq 437 self.needed_locks[locking.LEVEL_NODEGROUP] = \ 438 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid) 439 elif level == locking.LEVEL_NODE: 440 self._LockInstancesNodes() 441 if self.op.disk_template and self.op.remote_node: 442 (self.op.remote_node_uuid, self.op.remote_node) = \ 443 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid, 444 self.op.remote_node) 445 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid) 446 elif self.op.disk_template in constants.DTS_INT_MIRROR: 447 # If we have to find the secondary node for a conversion to DRBD, 448 # close node locks to the whole node group. 449 self.needed_locks[locking.LEVEL_NODE] = \ 450 list(self.cfg.GetNodeGroupMembersByNodes( 451 self.needed_locks[locking.LEVEL_NODE])) 452 elif level == locking.LEVEL_NODE_RES and self.op.disk_template: 453 # Copy node locks 454 self.needed_locks[locking.LEVEL_NODE_RES] = \ 455 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
456
457 - def BuildHooksEnv(self):
458 """Build hooks env. 459 460 This runs on the master, primary and secondaries. 461 462 """ 463 args = {} 464 if constants.BE_MINMEM in self.be_new: 465 args["minmem"] = self.be_new[constants.BE_MINMEM] 466 if constants.BE_MAXMEM in self.be_new: 467 args["maxmem"] = self.be_new[constants.BE_MAXMEM] 468 if constants.BE_VCPUS in self.be_new: 469 args["vcpus"] = self.be_new[constants.BE_VCPUS] 470 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk 471 # information at all. 472 473 if self._new_nics is not None: 474 nics = [] 475 476 for nic in self._new_nics: 477 n = copy.deepcopy(nic) 478 nicparams = self.cluster.SimpleFillNIC(n.nicparams) 479 n.nicparams = nicparams 480 nics.append(NICToTuple(self, n)) 481 482 args["nics"] = nics 483 484 env = BuildInstanceHookEnvByObject(self, self.instance, override=args) 485 if self.op.disk_template: 486 env["NEW_DISK_TEMPLATE"] = self.op.disk_template 487 if self.op.runtime_mem: 488 env["RUNTIME_MEMORY"] = self.op.runtime_mem 489 490 return env
491
492 - def BuildHooksNodes(self):
493 """Build hooks nodes. 494 495 """ 496 nl = [self.cfg.GetMasterNode()] + \ 497 list(self.cfg.GetInstanceNodes(self.instance.uuid)) 498 return (nl, nl)
499
500 - def _PrepareNicModification(self, params, private, old_ip, old_net_uuid, 501 old_params, cluster, pnode_uuid):
502 503 update_params_dict = dict([(key, params[key]) 504 for key in constants.NICS_PARAMETERS 505 if key in params]) 506 507 req_link = update_params_dict.get(constants.NIC_LINK, None) 508 req_mode = update_params_dict.get(constants.NIC_MODE, None) 509 510 new_net_uuid = None 511 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid) 512 if new_net_uuid_or_name: 513 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name) 514 new_net_obj = self.cfg.GetNetwork(new_net_uuid) 515 516 if old_net_uuid: 517 old_net_obj = self.cfg.GetNetwork(old_net_uuid) 518 519 if new_net_uuid: 520 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid) 521 if not netparams: 522 raise errors.OpPrereqError("No netparams found for the network" 523 " %s, probably not connected" % 524 new_net_obj.name, errors.ECODE_INVAL) 525 new_params = dict(netparams) 526 else: 527 new_params = GetUpdatedParams(old_params, update_params_dict) 528 529 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES) 530 531 new_filled_params = cluster.SimpleFillNIC(new_params) 532 objects.NIC.CheckParameterSyntax(new_filled_params) 533 534 new_mode = new_filled_params[constants.NIC_MODE] 535 if new_mode == constants.NIC_MODE_BRIDGED: 536 bridge = new_filled_params[constants.NIC_LINK] 537 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg 538 if msg: 539 msg = "Error checking bridges on node '%s': %s" % \ 540 (self.cfg.GetNodeName(pnode_uuid), msg) 541 if self.op.force: 542 self.warn.append(msg) 543 else: 544 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON) 545 546 elif new_mode == constants.NIC_MODE_ROUTED: 547 ip = params.get(constants.INIC_IP, old_ip) 548 if ip is None and not new_net_uuid: 549 raise errors.OpPrereqError("Cannot set the NIC IP address to None" 550 " on a routed NIC if not attached to a" 551 " network", errors.ECODE_INVAL) 552 553 elif new_mode == constants.NIC_MODE_OVS: 554 # TODO: check OVS link 555 self.LogInfo("OVS links are currently not checked for correctness") 556 557 if constants.INIC_MAC in params: 558 mac = params[constants.INIC_MAC] 559 if mac is None: 560 raise errors.OpPrereqError("Cannot unset the NIC MAC address", 561 errors.ECODE_INVAL) 562 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 563 # otherwise generate the MAC address 564 params[constants.INIC_MAC] = \ 565 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId()) 566 else: 567 # or validate/reserve the current one 568 try: 569 self.cfg.ReserveMAC(mac, self.proc.GetECId()) 570 except errors.ReservationError: 571 raise errors.OpPrereqError("MAC address '%s' already in use" 572 " in cluster" % mac, 573 errors.ECODE_NOTUNIQUE) 574 elif new_net_uuid != old_net_uuid: 575 576 def get_net_prefix(net_uuid): 577 mac_prefix = None 578 if net_uuid: 579 nobj = self.cfg.GetNetwork(net_uuid) 580 mac_prefix = nobj.mac_prefix 581 582 return mac_prefix
583 584 new_prefix = get_net_prefix(new_net_uuid) 585 old_prefix = get_net_prefix(old_net_uuid) 586 if old_prefix != new_prefix: 587 params[constants.INIC_MAC] = \ 588 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId()) 589 590 # if there is a change in (ip, network) tuple 591 new_ip = params.get(constants.INIC_IP, old_ip) 592 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid): 593 if new_ip: 594 # if IP is pool then require a network and generate one IP 595 if new_ip.lower() == constants.NIC_IP_POOL: 596 if new_net_uuid: 597 try: 598 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId()) 599 except errors.ReservationError: 600 raise errors.OpPrereqError("Unable to get a free IP" 601 " from the address pool", 602 errors.ECODE_STATE) 603 self.LogInfo("Chose IP %s from network %s", 604 new_ip, 605 new_net_obj.name) 606 params[constants.INIC_IP] = new_ip 607 else: 608 raise errors.OpPrereqError("ip=pool, but no network found", 609 errors.ECODE_INVAL) 610 # Reserve new IP if in the new network if any 611 elif new_net_uuid: 612 try: 613 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(), 614 check=self.op.conflicts_check) 615 self.LogInfo("Reserving IP %s in network %s", 616 new_ip, new_net_obj.name) 617 except errors.ReservationError: 618 raise errors.OpPrereqError("IP %s not available in network %s" % 619 (new_ip, new_net_obj.name), 620 errors.ECODE_NOTUNIQUE) 621 # new network is None so check if new IP is a conflicting IP 622 elif self.op.conflicts_check: 623 CheckForConflictingIp(self, new_ip, pnode_uuid) 624 625 # release old IP if old network is not None 626 if old_ip and old_net_uuid: 627 try: 628 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId()) 629 except errors.AddressPoolError: 630 logging.warning("Release IP %s not contained in network %s", 631 old_ip, old_net_obj.name) 632 633 # there are no changes in (ip, network) tuple and old network is not None 634 elif (old_net_uuid is not None and 635 (req_link is not None or req_mode is not None)): 636 raise errors.OpPrereqError("Not allowed to change link or mode of" 637 " a NIC that is connected to a network", 638 errors.ECODE_INVAL) 639 640 private.params = new_params 641 private.filled = new_filled_params
642
643 - def _PreCheckDiskTemplate(self, pnode_info):
644 """CheckPrereq checks related to a new disk template.""" 645 # Arguments are passed to avoid configuration lookups 646 pnode_uuid = self.instance.primary_node 647 648 # TODO make sure heterogeneous disk types can be converted. 649 disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid) 650 if disk_template == constants.DT_MIXED: 651 raise errors.OpPrereqError( 652 "Conversion from mixed is not yet supported.") 653 654 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) 655 if utils.AnyDiskOfType(inst_disks, constants.DTS_NOT_CONVERTIBLE_FROM): 656 raise errors.OpPrereqError( 657 "Conversion from the '%s' disk template is not supported" 658 % self.cfg.GetInstanceDiskTemplate(self.instance.uuid), 659 errors.ECODE_INVAL) 660 661 elif self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO: 662 raise errors.OpPrereqError("Conversion to the '%s' disk template is" 663 " not supported" % self.op.disk_template, 664 errors.ECODE_INVAL) 665 666 if (self.op.disk_template != constants.DT_EXT and 667 utils.AllDiskOfType(inst_disks, [self.op.disk_template])): 668 raise errors.OpPrereqError("Instance already has disk template %s" % 669 self.op.disk_template, errors.ECODE_INVAL) 670 671 if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template): 672 enabled_dts = utils.CommaJoin(self.cluster.enabled_disk_templates) 673 raise errors.OpPrereqError("Disk template '%s' is not enabled for this" 674 " cluster (enabled templates: %s)" % 675 (self.op.disk_template, enabled_dts), 676 errors.ECODE_STATE) 677 678 default_vg = self.cfg.GetVGName() 679 if (not default_vg and 680 self.op.disk_template not in constants.DTS_NOT_LVM): 681 raise errors.OpPrereqError("Disk template conversions to lvm-based" 682 " instances are not supported by the cluster", 683 errors.ECODE_STATE) 684 685 CheckInstanceState(self, self.instance, INSTANCE_DOWN, 686 msg="cannot change disk template") 687 688 # compute new disks' information 689 self.disks_info = ComputeDisksInfo(inst_disks, self.op.disk_template, 690 default_vg, self.op.ext_params) 691 692 # mirror node verification 693 if self.op.disk_template in constants.DTS_INT_MIRROR \ 694 and self.op.remote_node_uuid: 695 if self.op.remote_node_uuid == pnode_uuid: 696 raise errors.OpPrereqError("Given new secondary node %s is the same" 697 " as the primary node of the instance" % 698 self.op.remote_node, errors.ECODE_STATE) 699 CheckNodeOnline(self, self.op.remote_node_uuid) 700 CheckNodeNotDrained(self, self.op.remote_node_uuid) 701 CheckNodeVmCapable(self, self.op.remote_node_uuid) 702 703 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid) 704 snode_group = self.cfg.GetNodeGroup(snode_info.group) 705 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster, 706 snode_group) 707 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg, 708 ignore=self.op.ignore_ipolicy) 709 if pnode_info.group != snode_info.group: 710 self.LogWarning("The primary and secondary nodes are in two" 711 " different node groups; the disk parameters" 712 " from the first disk's node group will be" 713 " used") 714 715 # check that the template is in the primary node group's allowed templates 716 pnode_group = self.cfg.GetNodeGroup(pnode_info.group) 717 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster, 718 pnode_group) 719 allowed_dts = ipolicy[constants.IPOLICY_DTS] 720 if self.op.disk_template not in allowed_dts: 721 raise errors.OpPrereqError("Disk template '%s' in not allowed (allowed" 722 " templates: %s)" % (self.op.disk_template, 723 utils.CommaJoin(allowed_dts)), 724 errors.ECODE_STATE) 725 726 if not self.op.disk_template in constants.DTS_EXCL_STORAGE: 727 # Make sure none of the nodes require exclusive storage 728 nodes = [pnode_info] 729 if self.op.disk_template in constants.DTS_INT_MIRROR \ 730 and self.op.remote_node_uuid: 731 assert snode_info 732 nodes.append(snode_info) 733 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n) 734 if compat.any(map(has_es, nodes)): 735 errmsg = ("Cannot convert disk template from %s to %s when exclusive" 736 " storage is enabled" % ( 737 self.cfg.GetInstanceDiskTemplate(self.instance.uuid), 738 self.op.disk_template)) 739 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE) 740 741 # TODO remove setting the disk template after DiskSetParams exists. 742 # node capacity checks 743 if (self.op.disk_template == constants.DT_PLAIN and 744 utils.AllDiskOfType(inst_disks, [constants.DT_DRBD8])): 745 # we ensure that no capacity checks will be made for conversions from 746 # the 'drbd' to the 'plain' disk template 747 pass 748 elif (self.op.disk_template == constants.DT_DRBD8 and 749 utils.AllDiskOfType(inst_disks, [constants.DT_PLAIN])): 750 # for conversions from the 'plain' to the 'drbd' disk template, check 751 # only the remote node's capacity 752 if self.op.remote_node_uuid: 753 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info) 754 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], req_sizes) 755 elif self.op.disk_template in constants.DTS_LVM: 756 # rest lvm-based capacity checks 757 node_uuids = [pnode_uuid] 758 if self.op.remote_node_uuid: 759 node_uuids.append(self.op.remote_node_uuid) 760 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info) 761 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes) 762 elif self.op.disk_template == constants.DT_RBD: 763 # CheckRADOSFreeSpace() is simply a placeholder 764 CheckRADOSFreeSpace() 765 elif self.op.disk_template == constants.DT_EXT: 766 # FIXME: Capacity checks for extstorage template, if exists 767 pass 768 else: 769 # FIXME: Checks about other non lvm-based disk templates 770 pass
771
772 - def _PreCheckDisks(self, ispec):
773 """CheckPrereq checks related to disk changes. 774 775 @type ispec: dict 776 @param ispec: instance specs to be updated with the new disks 777 778 """ 779 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance) 780 781 inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid) 782 excl_stor = compat.any( 783 rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes).values() 784 ) 785 786 # Get the group access type 787 node_info = self.cfg.GetNodeInfo(self.instance.primary_node) 788 node_group = self.cfg.GetNodeGroup(node_info.group) 789 group_disk_params = self.cfg.GetGroupDiskParams(node_group) 790 791 group_access_types = dict( 792 (dt, group_disk_params[dt].get( 793 constants.RBD_ACCESS, constants.DISK_KERNELSPACE)) 794 for dt in constants.DISK_TEMPLATES) 795 796 # Check disk modifications. This is done here and not in CheckArguments 797 # (as with NICs), because we need to know the instance's disk template 798 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor, 799 group_access_types) 800 # Don't enforce param types here in case it's an ext disk added. The check 801 # happens inside _VerifyDiskModification. 802 self._CheckMods("disk", self.op.disks, {}, ver_fn) 803 804 self.diskmod = PrepareContainerMods(self.op.disks, None) 805 806 def _PrepareDiskMod(_, disk, params, __): 807 disk.name = params.get(constants.IDISK_NAME, None)
808 809 # Verify disk changes (operating on a copy) 810 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) 811 disks = copy.deepcopy(inst_disks) 812 ApplyContainerMods("disk", disks, None, self.diskmod, None, None, 813 _PrepareDiskMod, None, None) 814 utils.ValidateDeviceNames("disk", disks) 815 if len(disks) > constants.MAX_DISKS: 816 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add" 817 " more" % constants.MAX_DISKS, 818 errors.ECODE_STATE) 819 disk_sizes = [disk.size for disk in inst_disks] 820 disk_sizes.extend(params["size"] for (op, idx, params, private) in 821 self.diskmod if op == constants.DDM_ADD) 822 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes) 823 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes 824 825 # either --online or --offline was passed 826 if self.op.offline is not None: 827 if self.op.offline: 828 msg = "can't change to offline without being down first" 829 else: 830 msg = "can't change to online (down) without being offline first" 831 CheckInstanceState(self, self.instance, INSTANCE_NOT_RUNNING, 832 msg=msg) 833 834 @staticmethod
835 - def _InstanceCommunicationDDM(cfg, instance_communication, instance):
836 """Create a NIC mod that adds or removes the instance 837 communication NIC to a running instance. 838 839 The NICS are dynamically created using the Dynamic Device 840 Modification (DDM). This function produces a NIC modification 841 (mod) that inserts an additional NIC meant for instance 842 communication in or removes an existing instance communication NIC 843 from a running instance, using DDM. 844 845 @type cfg: L{config.ConfigWriter} 846 @param cfg: cluster configuration 847 848 @type instance_communication: boolean 849 @param instance_communication: whether instance communication is 850 enabled or disabled 851 852 @type instance: L{objects.Instance} 853 @param instance: instance to which the NIC mod will be applied to 854 855 @rtype: (L{constants.DDM_ADD}, -1, parameters) or 856 (L{constants.DDM_REMOVE}, -1, parameters) or 857 L{None} 858 @return: DDM mod containing an action to add or remove the NIC, or 859 None if nothing needs to be done 860 861 """ 862 nic_name = ComputeInstanceCommunicationNIC(instance.name) 863 864 instance_communication_nic = None 865 866 for nic in instance.nics: 867 if nic.name == nic_name: 868 instance_communication_nic = nic 869 break 870 871 if instance_communication and not instance_communication_nic: 872 action = constants.DDM_ADD 873 params = {constants.INIC_NAME: nic_name, 874 constants.INIC_MAC: constants.VALUE_GENERATE, 875 constants.INIC_IP: constants.NIC_IP_POOL, 876 constants.INIC_NETWORK: 877 cfg.GetInstanceCommunicationNetwork()} 878 elif not instance_communication and instance_communication_nic: 879 action = constants.DDM_REMOVE 880 params = None 881 else: 882 action = None 883 params = None 884 885 if action is not None: 886 return (action, -1, params) 887 else: 888 return None
889
890 - def _GetInstanceInfo(self, cluster_hvparams):
891 pnode_uuid = self.instance.primary_node 892 instance_info = self.rpc.call_instance_info( 893 pnode_uuid, self.instance.name, self.instance.hypervisor, 894 cluster_hvparams) 895 return instance_info
896
897 - def _CheckHotplug(self):
898 if self.op.hotplug or self.op.hotplug_if_possible: 899 result = self.rpc.call_hotplug_supported(self.instance.primary_node, 900 self.instance) 901 if result.fail_msg: 902 if self.op.hotplug: 903 result.Raise("Hotplug is not possible: %s" % result.fail_msg, 904 prereq=True, ecode=errors.ECODE_STATE) 905 else: 906 self.LogWarning(result.fail_msg) 907 self.op.hotplug = False 908 self.LogInfo("Modification will take place without hotplugging.") 909 else: 910 self.op.hotplug = True
911
912 - def _PrepareNicCommunication(self):
913 # add or remove NIC for instance communication 914 if self.op.instance_communication is not None: 915 mod = self._InstanceCommunicationDDM(self.cfg, 916 self.op.instance_communication, 917 self.instance) 918 if mod is not None: 919 self.op.nics.append(mod) 920 921 self.nicmod = PrepareContainerMods(self.op.nics, InstNicModPrivate)
922
923 - def _ProcessHVParams(self, node_uuids):
924 if self.op.hvparams: 925 hv_type = self.instance.hypervisor 926 i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams) 927 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES) 928 hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict) 929 930 # local check 931 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new) 932 CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new) 933 self.hv_proposed = self.hv_new = hv_new # the new actual values 934 self.hv_inst = i_hvdict # the new dict (without defaults) 935 else: 936 self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor, 937 self.instance.os, 938 self.instance.hvparams) 939 self.hv_new = self.hv_inst = {}
940
941 - def _ProcessBeParams(self):
942 if self.op.beparams: 943 i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams, 944 use_none=True) 945 objects.UpgradeBeParams(i_bedict) 946 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES) 947 be_new = self.cluster.SimpleFillBE(i_bedict) 948 self.be_proposed = self.be_new = be_new # the new actual values 949 self.be_inst = i_bedict # the new dict (without defaults) 950 else: 951 self.be_new = self.be_inst = {} 952 self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams) 953 return self.cluster.FillBE(self.instance)
954
955 - def _ValidateCpuParams(self):
956 # CPU param validation -- checking every time a parameter is 957 # changed to cover all cases where either CPU mask or vcpus have 958 # changed 959 if (constants.BE_VCPUS in self.be_proposed and 960 constants.HV_CPU_MASK in self.hv_proposed): 961 cpu_list = \ 962 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK]) 963 # Verify mask is consistent with number of vCPUs. Can skip this 964 # test if only 1 entry in the CPU mask, which means same mask 965 # is applied to all vCPUs. 966 if (len(cpu_list) > 1 and 967 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]): 968 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the" 969 " CPU mask [%s]" % 970 (self.be_proposed[constants.BE_VCPUS], 971 self.hv_proposed[constants.HV_CPU_MASK]), 972 errors.ECODE_INVAL) 973 974 # Only perform this test if a new CPU mask is given 975 if constants.HV_CPU_MASK in self.hv_new and cpu_list: 976 # Calculate the largest CPU number requested 977 max_requested_cpu = max(map(max, cpu_list)) 978 # Check that all of the instance's nodes have enough physical CPUs to 979 # satisfy the requested CPU mask 980 hvspecs = [(self.instance.hypervisor, 981 self.cfg.GetClusterInfo() 982 .hvparams[self.instance.hypervisor])] 983 CheckNodesPhysicalCPUs(self, 984 self.cfg.GetInstanceNodes(self.instance.uuid), 985 max_requested_cpu + 1, 986 hvspecs)
987
988 - def _ProcessOsParams(self, node_uuids):
989 # osparams processing 990 instance_os = (self.op.os_name 991 if self.op.os_name and not self.op.force 992 else self.instance.os) 993 994 if self.op.osparams or self.op.osparams_private: 995 public_parms = self.op.osparams or {} 996 private_parms = self.op.osparams_private or {} 997 dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms) 998 999 if dupe_keys: 1000 raise errors.OpPrereqError("OS parameters repeated multiple times: %s" % 1001 utils.CommaJoin(dupe_keys)) 1002 1003 self.os_inst = GetUpdatedParams(self.instance.osparams, 1004 public_parms) 1005 self.os_inst_private = GetUpdatedParams(self.instance.osparams_private, 1006 private_parms) 1007 1008 CheckOSParams(self, True, node_uuids, instance_os, 1009 objects.FillDict(self.os_inst, 1010 self.os_inst_private), 1011 self.op.force_variant) 1012 1013 else: 1014 self.os_inst = {} 1015 self.os_inst_private = {}
1016
1017 - def _ProcessMem(self, cluster_hvparams, be_old, pnode_uuid):
1018 #TODO(dynmem): do the appropriate check involving MINMEM 1019 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and 1020 self.be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]): 1021 mem_check_list = [pnode_uuid] 1022 if self.be_new[constants.BE_AUTO_BALANCE]: 1023 # either we changed auto_balance to yes or it was from before 1024 mem_check_list.extend( 1025 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)) 1026 instance_info = self._GetInstanceInfo(cluster_hvparams) 1027 hvspecs = [(self.instance.hypervisor, 1028 cluster_hvparams)] 1029 nodeinfo = self.rpc.call_node_info(mem_check_list, None, 1030 hvspecs) 1031 pninfo = nodeinfo[pnode_uuid] 1032 msg = pninfo.fail_msg 1033 if msg: 1034 # Assume the primary node is unreachable and go ahead 1035 self.warn.append("Can't get info from primary node %s: %s" % 1036 (self.cfg.GetNodeName(pnode_uuid), msg)) 1037 else: 1038 (_, _, (pnhvinfo, )) = pninfo.payload 1039 if not isinstance(pnhvinfo.get("memory_free", None), int): 1040 self.warn.append("Node data from primary node %s doesn't contain" 1041 " free memory information" % 1042 self.cfg.GetNodeName(pnode_uuid)) 1043 elif instance_info.fail_msg: 1044 self.warn.append("Can't get instance runtime information: %s" % 1045 instance_info.fail_msg) 1046 else: 1047 if instance_info.payload: 1048 current_mem = int(instance_info.payload["memory"]) 1049 else: 1050 # Assume instance not running 1051 # (there is a slight race condition here, but it's not very 1052 # probable, and we have no other way to check) 1053 # TODO: Describe race condition 1054 current_mem = 0 1055 #TODO(dynmem): do the appropriate check involving MINMEM 1056 miss_mem = (self.be_new[constants.BE_MAXMEM] - current_mem - 1057 pnhvinfo["memory_free"]) 1058 if miss_mem > 0: 1059 raise errors.OpPrereqError("This change will prevent the instance" 1060 " from starting, due to %d MB of memory" 1061 " missing on its primary node" % 1062 miss_mem, errors.ECODE_NORES) 1063 1064 if self.be_new[constants.BE_AUTO_BALANCE]: 1065 secondary_nodes = \ 1066 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid) 1067 for node_uuid, nres in nodeinfo.items(): 1068 if node_uuid not in secondary_nodes: 1069 continue 1070 nres.Raise("Can't get info from secondary node %s" % 1071 self.cfg.GetNodeName(node_uuid), prereq=True, 1072 ecode=errors.ECODE_STATE) 1073 (_, _, (nhvinfo, )) = nres.payload 1074 if not isinstance(nhvinfo.get("memory_free", None), int): 1075 raise errors.OpPrereqError("Secondary node %s didn't return free" 1076 " memory information" % 1077 self.cfg.GetNodeName(node_uuid), 1078 errors.ECODE_STATE) 1079 #TODO(dynmem): do the appropriate check involving MINMEM 1080 elif self.be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]: 1081 raise errors.OpPrereqError("This change will prevent the instance" 1082 " from failover to its secondary node" 1083 " %s, due to not enough memory" % 1084 self.cfg.GetNodeName(node_uuid), 1085 errors.ECODE_STATE) 1086 1087 if self.op.runtime_mem: 1088 remote_info = self.rpc.call_instance_info( 1089 self.instance.primary_node, self.instance.name, 1090 self.instance.hypervisor, 1091 cluster_hvparams) 1092 remote_info.Raise("Error checking node %s" % 1093 self.cfg.GetNodeName(self.instance.primary_node), 1094 prereq=True) 1095 if not remote_info.payload: # not running already 1096 raise errors.OpPrereqError("Instance %s is not running" % 1097 self.instance.name, errors.ECODE_STATE) 1098 1099 current_memory = remote_info.payload["memory"] 1100 if (not self.op.force and 1101 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or 1102 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])): 1103 raise errors.OpPrereqError("Instance %s must have memory between %d" 1104 " and %d MB of memory unless --force is" 1105 " given" % 1106 (self.instance.name, 1107 self.be_proposed[constants.BE_MINMEM], 1108 self.be_proposed[constants.BE_MAXMEM]), 1109 errors.ECODE_INVAL) 1110 1111 delta = self.op.runtime_mem - current_memory 1112 if delta > 0: 1113 CheckNodeFreeMemory( 1114 self, self.instance.primary_node, 1115 "ballooning memory for instance %s" % self.instance.name, delta, 1116 self.instance.hypervisor, 1117 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1118
1119 - def CheckPrereq(self):
1120 """Check prerequisites. 1121 1122 This only checks the instance list against the existing names. 1123 1124 """ 1125 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE) 1126 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 1127 self.cluster = self.cfg.GetClusterInfo() 1128 cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor] 1129 1130 self.op.disks = self._LookupDiskMods() 1131 1132 assert self.instance is not None, \ 1133 "Cannot retrieve locked instance %s" % self.op.instance_name 1134 1135 self.warn = [] 1136 1137 if (self.op.pnode_uuid is not None and 1138 self.op.pnode_uuid != self.instance.primary_node and 1139 not self.op.force): 1140 instance_info = self._GetInstanceInfo(cluster_hvparams) 1141 1142 if instance_info.fail_msg: 1143 self.warn.append("Can't get instance runtime information: %s" % 1144 instance_info.fail_msg) 1145 elif instance_info.payload: 1146 raise errors.OpPrereqError( 1147 "Instance is still running on %s" % 1148 self.cfg.GetNodeName(self.instance.primary_node), 1149 errors.ECODE_STATE) 1150 pnode_uuid = self.instance.primary_node 1151 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE) 1152 1153 node_uuids = list(self.cfg.GetInstanceNodes(self.instance.uuid)) 1154 pnode_info = self.cfg.GetNodeInfo(pnode_uuid) 1155 1156 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP) 1157 group_info = self.cfg.GetNodeGroup(pnode_info.group) 1158 1159 # dictionary with instance information after the modification 1160 ispec = {} 1161 1162 self._CheckHotplug() 1163 1164 self._PrepareNicCommunication() 1165 1166 # disks processing 1167 assert not (self.op.disk_template and self.op.disks), \ 1168 "Can't modify disk template and apply disk changes at the same time" 1169 1170 if self.op.disk_template: 1171 self._PreCheckDiskTemplate(pnode_info) 1172 1173 self._PreCheckDisks(ispec) 1174 1175 self._ProcessHVParams(node_uuids) 1176 be_old = self._ProcessBeParams() 1177 1178 self._ValidateCpuParams() 1179 self._ProcessOsParams(node_uuids) 1180 self._ProcessMem(cluster_hvparams, be_old, pnode_uuid) 1181 1182 # make self.cluster visible in the functions below 1183 cluster = self.cluster 1184 1185 def _PrepareNicCreate(_, params, private): 1186 self._PrepareNicModification(params, private, None, None, 1187 {}, cluster, pnode_uuid) 1188 return (None, None)
1189 1190 def _PrepareNicAttach(_, __, ___): 1191 raise errors.OpPrereqError("Attach operation is not supported for NICs", 1192 errors.ECODE_INVAL) 1193 1194 def _PrepareNicMod(_, nic, params, private): 1195 self._PrepareNicModification(params, private, nic.ip, nic.network, 1196 nic.nicparams, cluster, pnode_uuid) 1197 return None 1198 1199 def _PrepareNicRemove(_, params, __): 1200 ip = params.ip 1201 net = params.network 1202 if net is not None and ip is not None: 1203 self.cfg.ReleaseIp(net, ip, self.proc.GetECId()) 1204 1205 def _PrepareNicDetach(_, __, ___): 1206 raise errors.OpPrereqError("Detach operation is not supported for NICs", 1207 errors.ECODE_INVAL) 1208 1209 # Verify NIC changes (operating on copy) 1210 nics = [nic.Copy() for nic in self.instance.nics] 1211 ApplyContainerMods("NIC", nics, None, self.nicmod, _PrepareNicCreate, 1212 _PrepareNicAttach, _PrepareNicMod, _PrepareNicRemove, 1213 _PrepareNicDetach) 1214 if len(nics) > constants.MAX_NICS: 1215 raise errors.OpPrereqError("Instance has too many network interfaces" 1216 " (%d), cannot add more" % constants.MAX_NICS, 1217 errors.ECODE_STATE) 1218 1219 # Pre-compute NIC changes (necessary to use result in hooks) 1220 self._nic_chgdesc = [] 1221 if self.nicmod: 1222 # Operate on copies as this is still in prereq 1223 nics = [nic.Copy() for nic in self.instance.nics] 1224 ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod, 1225 self._CreateNewNic, None, self._ApplyNicMods, 1226 self._RemoveNic, None) 1227 # Verify that NIC names are unique and valid 1228 utils.ValidateDeviceNames("NIC", nics) 1229 self._new_nics = nics 1230 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics) 1231 else: 1232 self._new_nics = None 1233 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics) 1234 1235 if not self.op.ignore_ipolicy: 1236 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster, 1237 group_info) 1238 1239 # Fill ispec with backend parameters 1240 ispec[constants.ISPEC_SPINDLE_USE] = \ 1241 self.be_new.get(constants.BE_SPINDLE_USE, None) 1242 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS, 1243 None) 1244 1245 # Copy ispec to verify parameters with min/max values separately 1246 if self.op.disk_template: 1247 count = ispec[constants.ISPEC_DISK_COUNT] 1248 new_disk_types = [self.op.disk_template] * count 1249 else: 1250 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid) 1251 add_disk_count = ispec[constants.ISPEC_DISK_COUNT] - len(old_disks) 1252 dev_type = self.cfg.GetInstanceDiskTemplate(self.instance.uuid) 1253 if dev_type == constants.DT_DISKLESS and add_disk_count != 0: 1254 raise errors.ProgrammerError( 1255 "Conversion from diskless instance not possible and should have" 1256 " been caught") 1257 1258 new_disk_types = ([d.dev_type for d in old_disks] + 1259 [dev_type] * add_disk_count) 1260 ispec_max = ispec.copy() 1261 ispec_max[constants.ISPEC_MEM_SIZE] = \ 1262 self.be_new.get(constants.BE_MAXMEM, None) 1263 res_max = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max, 1264 new_disk_types) 1265 ispec_min = ispec.copy() 1266 ispec_min[constants.ISPEC_MEM_SIZE] = \ 1267 self.be_new.get(constants.BE_MINMEM, None) 1268 res_min = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min, 1269 new_disk_types) 1270 1271 if (res_max or res_min): 1272 # FIXME: Improve error message by including information about whether 1273 # the upper or lower limit of the parameter fails the ipolicy. 1274 msg = ("Instance allocation to group %s (%s) violates policy: %s" % 1275 (group_info, group_info.name, 1276 utils.CommaJoin(set(res_max + res_min)))) 1277 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 1278
1279 - def _ConvertInstanceDisks(self, feedback_fn):
1280 """Converts the disks of an instance to another type. 1281 1282 This function converts the disks of an instance. It supports 1283 conversions among all the available disk types except conversions 1284 between the LVM-based disk types, that use their separate code path. 1285 Also, this method does not support conversions that include the 'diskless' 1286 template and those targeting the 'blockdev' template. 1287 1288 @type feedback_fn: callable 1289 @param feedback_fn: function used to send feedback back to the caller 1290 1291 @rtype: NoneType 1292 @return: None 1293 @raise errors.OpPrereqError: in case of failure 1294 1295 """ 1296 template_info = self.op.disk_template 1297 if self.op.disk_template == constants.DT_EXT: 1298 template_info = ":".join([self.op.disk_template, 1299 self.op.ext_params["provider"]]) 1300 1301 old_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid) 1302 feedback_fn("Converting disk template from '%s' to '%s'" % 1303 (old_template, template_info)) 1304 1305 assert not (old_template in constants.DTS_NOT_CONVERTIBLE_FROM or 1306 self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO), \ 1307 ("Unsupported disk template conversion from '%s' to '%s'" % 1308 (old_template, self.op.disk_template)) 1309 1310 pnode_uuid = self.instance.primary_node 1311 snode_uuid = [] 1312 if self.op.remote_node_uuid: 1313 snode_uuid = [self.op.remote_node_uuid] 1314 1315 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid) 1316 1317 feedback_fn("Generating new '%s' disk template..." % template_info) 1318 file_storage_dir = CalculateFileStorageDir( 1319 self.op.disk_template, self.cfg, self.instance.name, 1320 file_storage_dir=self.op.file_storage_dir) 1321 new_disks = GenerateDiskTemplate(self, 1322 self.op.disk_template, 1323 self.instance.uuid, 1324 pnode_uuid, 1325 snode_uuid, 1326 self.disks_info, 1327 file_storage_dir, 1328 self.op.file_driver, 1329 0, 1330 feedback_fn, 1331 self.diskparams) 1332 1333 # Create the new block devices for the instance. 1334 feedback_fn("Creating new empty disks of type '%s'..." % template_info) 1335 try: 1336 CreateDisks(self, self.instance, disk_template=self.op.disk_template, 1337 disks=new_disks) 1338 except errors.OpExecError: 1339 self.LogWarning("Device creation failed") 1340 for disk in new_disks: 1341 self.cfg.ReleaseDRBDMinors(disk.uuid) 1342 raise 1343 1344 # Transfer the data from the old to the newly created disks of the instance. 1345 feedback_fn("Populating the new empty disks of type '%s'..." % 1346 template_info) 1347 for idx, (old, new) in enumerate(zip(old_disks, new_disks)): 1348 feedback_fn(" - copying data from disk %s (%s), size %s" % 1349 (idx, old.dev_type, 1350 utils.FormatUnit(new.size, "h"))) 1351 if old.dev_type == constants.DT_DRBD8: 1352 old = old.children[0] 1353 result = self.rpc.call_blockdev_convert(pnode_uuid, (old, self.instance), 1354 (new, self.instance)) 1355 msg = result.fail_msg 1356 if msg: 1357 # A disk failed to copy. Abort the conversion operation and rollback 1358 # the modifications to the previous state. The instance will remain 1359 # intact. 1360 if self.op.disk_template == constants.DT_DRBD8: 1361 new = new.children[0] 1362 self.Log(" - ERROR: Could not copy disk '%s' to '%s'" % 1363 (old.logical_id[1], new.logical_id[1])) 1364 try: 1365 self.LogInfo("Some disks failed to copy") 1366 self.LogInfo("The instance will not be affected, aborting operation") 1367 self.LogInfo("Removing newly created disks of type '%s'..." % 1368 template_info) 1369 RemoveDisks(self, self.instance, disks=new_disks) 1370 self.LogInfo("Newly created disks removed successfully") 1371 finally: 1372 for disk in new_disks: 1373 self.cfg.ReleaseDRBDMinors(disk.uuid) 1374 result.Raise("Error while converting the instance's template") 1375 1376 # In case of DRBD disk, return its port to the pool 1377 for disk in old_disks: 1378 if disk.dev_type == constants.DT_DRBD8: 1379 tcp_port = disk.logical_id[2] 1380 self.cfg.AddTcpUdpPort(tcp_port) 1381 1382 # Remove old disks from the instance. 1383 feedback_fn("Detaching old disks (%s) from the instance and removing" 1384 " them from cluster config" % old_template) 1385 for old_disk in old_disks: 1386 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid) 1387 1388 # Attach the new disks to the instance. 1389 feedback_fn("Adding new disks (%s) to cluster config and attaching" 1390 " them to the instance" % template_info) 1391 for (idx, new_disk) in enumerate(new_disks): 1392 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx) 1393 1394 # Re-read the instance from the configuration. 1395 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) 1396 1397 # Release node locks while waiting for sync and disks removal. 1398 ReleaseLocks(self, locking.LEVEL_NODE) 1399 1400 disk_abort = not WaitForSync(self, self.instance, 1401 oneshot=not self.op.wait_for_sync) 1402 if disk_abort: 1403 raise errors.OpExecError("There are some degraded disks for" 1404 " this instance, please cleanup manually") 1405 1406 feedback_fn("Removing old block devices of type '%s'..." % old_template) 1407 RemoveDisks(self, self.instance, disks=old_disks)
1408 1409 # Node resource locks will be released by the caller. 1410
1411 - def _ConvertPlainToDrbd(self, feedback_fn):
1412 """Converts an instance from plain to drbd. 1413 1414 """ 1415 feedback_fn("Converting disk template from 'plain' to 'drbd'") 1416 1417 if not self.op.remote_node_uuid: 1418 feedback_fn("Using %s to choose new secondary" % self.op.iallocator) 1419 1420 req = iallocator.IAReqInstanceAllocateSecondary( 1421 name=self.op.instance_name) 1422 ial = iallocator.IAllocator(self.cfg, self.rpc, req) 1423 ial.Run(self.op.iallocator) 1424 1425 if not ial.success: 1426 raise errors.OpPrereqError("Can's find secondary node using" 1427 " iallocator %s: %s" % 1428 (self.op.iallocator, ial.info), 1429 errors.ECODE_NORES) 1430 feedback_fn("%s choose %s as new secondary" 1431 % (self.op.iallocator, ial.result)) 1432 self.op.remote_node = ial.result 1433 self.op.remote_node_uuid = self.cfg.GetNodeInfoByName(ial.result).uuid 1434 1435 pnode_uuid = self.instance.primary_node 1436 snode_uuid = self.op.remote_node_uuid 1437 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid) 1438 1439 assert utils.AnyDiskOfType(old_disks, [constants.DT_PLAIN]) 1440 1441 new_disks = GenerateDiskTemplate(self, self.op.disk_template, 1442 self.instance.uuid, pnode_uuid, 1443 [snode_uuid], self.disks_info, 1444 None, None, 0, 1445 feedback_fn, self.diskparams) 1446 anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams) 1447 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid) 1448 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid) 1449 info = GetInstanceInfoText(self.instance) 1450 feedback_fn("Creating additional volumes...") 1451 # first, create the missing data and meta devices 1452 for disk in anno_disks: 1453 # unfortunately this is... not too nice 1454 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1], 1455 info, True, p_excl_stor) 1456 for child in disk.children: 1457 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True, 1458 s_excl_stor) 1459 # at this stage, all new LVs have been created, we can rename the 1460 # old ones 1461 feedback_fn("Renaming original volumes...") 1462 rename_list = [(o, n.children[0].logical_id) 1463 for (o, n) in zip(old_disks, new_disks)] 1464 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list) 1465 result.Raise("Failed to rename original LVs") 1466 1467 feedback_fn("Initializing DRBD devices...") 1468 # all child devices are in place, we can now create the DRBD devices 1469 try: 1470 for disk in anno_disks: 1471 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor), 1472 (snode_uuid, s_excl_stor)]: 1473 f_create = node_uuid == pnode_uuid 1474 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info, 1475 f_create, excl_stor) 1476 except errors.GenericError, e: 1477 feedback_fn("Initializing of DRBD devices failed;" 1478 " renaming back original volumes...") 1479 rename_back_list = [(n.children[0], o.logical_id) 1480 for (n, o) in zip(new_disks, old_disks)] 1481 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list) 1482 result.Raise("Failed to rename LVs back after error %s" % str(e)) 1483 raise 1484 1485 # Remove the old disks from the instance 1486 for old_disk in old_disks: 1487 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid) 1488 1489 # Attach the new disks to the instance 1490 for (idx, new_disk) in enumerate(new_disks): 1491 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx) 1492 1493 # re-read the instance from the configuration 1494 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) 1495 1496 # Release node locks while waiting for sync 1497 ReleaseLocks(self, locking.LEVEL_NODE) 1498 1499 # disks are created, waiting for sync 1500 disk_abort = not WaitForSync(self, self.instance, 1501 oneshot=not self.op.wait_for_sync) 1502 if disk_abort: 1503 raise errors.OpExecError("There are some degraded disks for" 1504 " this instance, please cleanup manually")
1505 1506 # Node resource locks will be released by caller 1507
1508 - def _ConvertDrbdToPlain(self, feedback_fn):
1509 """Converts an instance from drbd to plain. 1510 1511 """ 1512 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid) 1513 disks = self.cfg.GetInstanceDisks(self.instance.uuid) 1514 assert len(secondary_nodes) == 1 1515 assert utils.AnyDiskOfType(disks, [constants.DT_DRBD8]) 1516 1517 feedback_fn("Converting disk template from 'drbd' to 'plain'") 1518 1519 old_disks = AnnotateDiskParams(self.instance, disks, self.cfg) 1520 new_disks = [d.children[0] for d in disks] 1521 1522 # copy over size, mode and name and set the correct nodes 1523 for parent, child in zip(old_disks, new_disks): 1524 child.size = parent.size 1525 child.mode = parent.mode 1526 child.name = parent.name 1527 child.nodes = [self.instance.primary_node] 1528 1529 # this is a DRBD disk, return its port to the pool 1530 for disk in old_disks: 1531 tcp_port = disk.logical_id[2] 1532 self.cfg.AddTcpUdpPort(tcp_port) 1533 1534 # Remove the old disks from the instance 1535 for old_disk in old_disks: 1536 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid) 1537 1538 # Attach the new disks to the instance 1539 for (idx, new_disk) in enumerate(new_disks): 1540 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx) 1541 1542 # re-read the instance from the configuration 1543 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) 1544 1545 # Release locks in case removing disks takes a while 1546 ReleaseLocks(self, locking.LEVEL_NODE) 1547 1548 feedback_fn("Removing volumes on the secondary node...") 1549 RemoveDisks(self, self.instance, disks=old_disks, 1550 target_node_uuid=secondary_nodes[0]) 1551 1552 feedback_fn("Removing unneeded volumes on the primary node...") 1553 meta_disks = [] 1554 for idx, disk in enumerate(old_disks): 1555 meta_disks.append(disk.children[1]) 1556 RemoveDisks(self, self.instance, disks=meta_disks)
1557
1558 - def _HotplugDevice(self, action, dev_type, device, extra, seq):
1559 self.LogInfo("Trying to hotplug device...") 1560 msg = "hotplug:" 1561 result = self.rpc.call_hotplug_device(self.instance.primary_node, 1562 self.instance, action, dev_type, 1563 (device, self.instance), 1564 extra, seq) 1565 if result.fail_msg: 1566 self.LogWarning("Could not hotplug device: %s" % result.fail_msg) 1567 self.LogInfo("Continuing execution..") 1568 msg += "failed" 1569 else: 1570 self.LogInfo("Hotplug done.") 1571 msg += "done" 1572 return msg
1573
1574 - def _FillFileDriver(self):
1575 if not self.op.file_driver: 1576 self.op.file_driver = constants.FD_DEFAULT 1577 elif self.op.file_driver not in constants.FILE_DRIVER: 1578 raise errors.OpPrereqError("Invalid file driver name '%s'" % 1579 self.op.file_driver, errors.ECODE_INVAL)
1580
1581 - def _GenerateDiskTemplateWrapper(self, idx, disk_type, params):
1582 file_path = CalculateFileStorageDir( 1583 disk_type, self.cfg, self.instance.name, 1584 file_storage_dir=self.op.file_storage_dir) 1585 1586 self._FillFileDriver() 1587 1588 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid) 1589 return \ 1590 GenerateDiskTemplate(self, disk_type, self.instance.uuid, 1591 self.instance.primary_node, secondary_nodes, 1592 [params], file_path, self.op.file_driver, idx, 1593 self.Log, self.diskparams)[0]
1594
1595 - def _CreateNewDisk(self, idx, params, _):
1596 """Creates a new disk. 1597 1598 """ 1599 # add a new disk 1600 disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid) 1601 disk = self._GenerateDiskTemplateWrapper(idx, disk_template, 1602 params) 1603 new_disks = CreateDisks(self, self.instance, disks=[disk]) 1604 self.cfg.AddInstanceDisk(self.instance.uuid, disk, idx) 1605 1606 # re-read the instance from the configuration 1607 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) 1608 1609 if self.cluster.prealloc_wipe_disks: 1610 # Wipe new disk 1611 WipeOrCleanupDisks(self, self.instance, 1612 disks=[(idx, disk, 0)], 1613 cleanup=new_disks) 1614 1615 changes = [ 1616 ("disk/%d" % idx, 1617 "add:size=%s,mode=%s" % (disk.size, disk.mode)), 1618 ] 1619 if self.op.hotplug: 1620 result = self.rpc.call_blockdev_assemble(self.instance.primary_node, 1621 (disk, self.instance), 1622 self.instance, True, idx) 1623 if result.fail_msg: 1624 changes.append(("disk/%d" % idx, "assemble:failed")) 1625 self.LogWarning("Can't assemble newly created disk %d: %s", 1626 idx, result.fail_msg) 1627 else: 1628 _, link_name, uri = result.payload 1629 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD, 1630 constants.HOTPLUG_TARGET_DISK, 1631 disk, (link_name, uri), idx) 1632 changes.append(("disk/%d" % idx, msg)) 1633 1634 return (disk, changes)
1635
1636 - def _PostAddDisk(self, _, disk):
1637 if not WaitForSync(self, self.instance, disks=[disk], 1638 oneshot=not self.op.wait_for_sync): 1639 raise errors.OpExecError("Failed to sync disks of %s" % 1640 self.instance.name) 1641 1642 # the disk is active at this point, so deactivate it if the instance disks 1643 # are supposed to be inactive 1644 if not self.instance.disks_active: 1645 ShutdownInstanceDisks(self, self.instance, disks=[disk])
1646
1647 - def _AttachDisk(self, idx, params, _):
1648 """Attaches an existing disk to an instance. 1649 1650 """ 1651 uuid = params.get("uuid", None) 1652 name = params.get(constants.IDISK_NAME, None) 1653 1654 disk = self.GenericGetDiskInfo(uuid, name) 1655 1656 # Rename disk before attaching (if disk is filebased) 1657 if disk.dev_type in (constants.DTS_INSTANCE_DEPENDENT_PATH): 1658 # Add disk size/mode, else GenerateDiskTemplate will not work. 1659 params[constants.IDISK_SIZE] = disk.size 1660 params[constants.IDISK_MODE] = str(disk.mode) 1661 dummy_disk = self._GenerateDiskTemplateWrapper(idx, disk.dev_type, params) 1662 new_logical_id = dummy_disk.logical_id 1663 result = self.rpc.call_blockdev_rename(self.instance.primary_node, 1664 [(disk, new_logical_id)]) 1665 result.Raise("Failed before attach") 1666 self.cfg.SetDiskLogicalID(disk.uuid, new_logical_id) 1667 disk.logical_id = new_logical_id 1668 1669 # Attach disk to instance 1670 self.cfg.AttachInstanceDisk(self.instance.uuid, disk.uuid, idx) 1671 1672 # re-read the instance from the configuration 1673 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) 1674 1675 changes = [ 1676 ("disk/%d" % idx, 1677 "attach:size=%s,mode=%s" % (disk.size, disk.mode)), 1678 ] 1679 1680 disks_ok, _, payloads = AssembleInstanceDisks(self, self.instance, 1681 disks=[disk]) 1682 if not disks_ok: 1683 changes.append(("disk/%d" % idx, "assemble:failed")) 1684 return disk, changes 1685 1686 if self.op.hotplug: 1687 _, link_name, uri = payloads[0] 1688 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD, 1689 constants.HOTPLUG_TARGET_DISK, 1690 disk, (link_name, uri), idx) 1691 changes.append(("disk/%d" % idx, msg)) 1692 1693 return (disk, changes)
1694
1695 - def _ModifyDisk(self, idx, disk, params, _):
1696 """Modifies a disk. 1697 1698 """ 1699 changes = [] 1700 if constants.IDISK_MODE in params: 1701 disk.mode = params.get(constants.IDISK_MODE) 1702 changes.append(("disk.mode/%d" % idx, disk.mode)) 1703 1704 if constants.IDISK_NAME in params: 1705 disk.name = params.get(constants.IDISK_NAME) 1706 changes.append(("disk.name/%d" % idx, disk.name)) 1707 1708 # Modify arbitrary params in case instance template is ext 1709 1710 for key, value in params.iteritems(): 1711 if (key not in constants.MODIFIABLE_IDISK_PARAMS and 1712 disk.dev_type == constants.DT_EXT): 1713 # stolen from GetUpdatedParams: default means reset/delete 1714 if value.lower() == constants.VALUE_DEFAULT: 1715 try: 1716 del disk.params[key] 1717 except KeyError: 1718 pass 1719 else: 1720 disk.params[key] = value 1721 changes.append(("disk.params:%s/%d" % (key, idx), value)) 1722 1723 # Update disk object 1724 self.cfg.Update(disk, self.feedback_fn) 1725 1726 return changes
1727
1728 - def _RemoveDisk(self, idx, root, _):
1729 """Removes a disk. 1730 1731 """ 1732 hotmsg = "" 1733 if self.op.hotplug: 1734 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE, 1735 constants.HOTPLUG_TARGET_DISK, 1736 root, None, idx) 1737 ShutdownInstanceDisks(self, self.instance, [root]) 1738 1739 RemoveDisks(self, self.instance, disks=[root]) 1740 1741 # if this is a DRBD disk, return its port to the pool 1742 if root.dev_type in constants.DTS_DRBD: 1743 self.cfg.AddTcpUdpPort(root.logical_id[2]) 1744 1745 # Remove disk from config 1746 self.cfg.RemoveInstanceDisk(self.instance.uuid, root.uuid) 1747 1748 # re-read the instance from the configuration 1749 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) 1750 1751 return hotmsg
1752
1753 - def _DetachDisk(self, idx, root, _):
1754 """Detaches a disk from an instance. 1755 1756 """ 1757 hotmsg = "" 1758 if self.op.hotplug: 1759 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE, 1760 constants.HOTPLUG_TARGET_DISK, 1761 root, None, idx) 1762 1763 # Always shutdown the disk before detaching. 1764 ShutdownInstanceDisks(self, self.instance, [root]) 1765 1766 # Rename detached disk. 1767 # 1768 # Transform logical_id from: 1769 # <file_storage_dir>/<instance_name>/<disk_name> 1770 # to 1771 # <file_storage_dir>/<disk_name> 1772 if root.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE): 1773 file_driver = root.logical_id[0] 1774 instance_path, disk_name = os.path.split(root.logical_id[1]) 1775 new_path = os.path.join(os.path.dirname(instance_path), disk_name) 1776 new_logical_id = (file_driver, new_path) 1777 result = self.rpc.call_blockdev_rename(self.instance.primary_node, 1778 [(root, new_logical_id)]) 1779 result.Raise("Failed before detach") 1780 # Update logical_id 1781 self.cfg.SetDiskLogicalID(root.uuid, new_logical_id) 1782 1783 # Remove disk from config 1784 self.cfg.DetachInstanceDisk(self.instance.uuid, root.uuid) 1785 1786 # re-read the instance from the configuration 1787 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) 1788 1789 return hotmsg
1790
1791 - def _CreateNewNic(self, idx, params, private):
1792 """Creates data structure for a new network interface. 1793 1794 """ 1795 mac = params[constants.INIC_MAC] 1796 ip = params.get(constants.INIC_IP, None) 1797 net = params.get(constants.INIC_NETWORK, None) 1798 name = params.get(constants.INIC_NAME, None) 1799 net_uuid = self.cfg.LookupNetwork(net) 1800 #TODO: not private.filled?? can a nic have no nicparams?? 1801 nicparams = private.filled 1802 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name, 1803 nicparams=nicparams) 1804 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId()) 1805 1806 changes = [ 1807 ("nic.%d" % idx, 1808 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" % 1809 (mac, ip, private.filled[constants.NIC_MODE], 1810 private.filled[constants.NIC_LINK], net)), 1811 ] 1812 1813 if self.op.hotplug: 1814 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD, 1815 constants.HOTPLUG_TARGET_NIC, 1816 nobj, None, idx) 1817 changes.append(("nic.%d" % idx, msg)) 1818 1819 return (nobj, changes)
1820
1821 - def _ApplyNicMods(self, idx, nic, params, private):
1822 """Modifies a network interface. 1823 1824 """ 1825 changes = [] 1826 1827 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]: 1828 if key in params: 1829 changes.append(("nic.%s/%d" % (key, idx), params[key])) 1830 setattr(nic, key, params[key]) 1831 1832 new_net = params.get(constants.INIC_NETWORK, nic.network) 1833 new_net_uuid = self.cfg.LookupNetwork(new_net) 1834 if new_net_uuid != nic.network: 1835 changes.append(("nic.network/%d" % idx, new_net)) 1836 nic.network = new_net_uuid 1837 1838 if private.filled: 1839 nic.nicparams = private.filled 1840 1841 for (key, val) in nic.nicparams.items(): 1842 changes.append(("nic.%s/%d" % (key, idx), val)) 1843 1844 if self.op.hotplug: 1845 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY, 1846 constants.HOTPLUG_TARGET_NIC, 1847 nic, None, idx) 1848 changes.append(("nic/%d" % idx, msg)) 1849 1850 return changes
1851
1852 - def _RemoveNic(self, idx, nic, _):
1853 if self.op.hotplug: 1854 return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE, 1855 constants.HOTPLUG_TARGET_NIC, 1856 nic, None, idx)
1857
1858 - def Exec(self, feedback_fn):
1859 """Modifies an instance. 1860 1861 All parameters take effect only at the next restart of the instance. 1862 1863 """ 1864 self.feedback_fn = feedback_fn 1865 # Process here the warnings from CheckPrereq, as we don't have a 1866 # feedback_fn there. 1867 # TODO: Replace with self.LogWarning 1868 for warn in self.warn: 1869 feedback_fn("WARNING: %s" % warn) 1870 1871 assert ((self.op.disk_template is None) ^ 1872 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \ 1873 "Not owning any node resource locks" 1874 1875 result = [] 1876 1877 # New primary node 1878 if self.op.pnode_uuid: 1879 self.instance.primary_node = self.op.pnode_uuid 1880 1881 # runtime memory 1882 if self.op.runtime_mem: 1883 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node, 1884 self.instance, 1885 self.op.runtime_mem) 1886 rpcres.Raise("Cannot modify instance runtime memory") 1887 result.append(("runtime_memory", self.op.runtime_mem)) 1888 1889 # Apply disk changes 1890 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) 1891 ApplyContainerMods("disk", inst_disks, result, self.diskmod, 1892 self._CreateNewDisk, self._AttachDisk, self._ModifyDisk, 1893 self._RemoveDisk, self._DetachDisk, 1894 post_add_fn=self._PostAddDisk) 1895 1896 if self.op.disk_template: 1897 if __debug__: 1898 check_nodes = set(self.cfg.GetInstanceNodes(self.instance.uuid)) 1899 if self.op.remote_node_uuid: 1900 check_nodes.add(self.op.remote_node_uuid) 1901 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]: 1902 owned = self.owned_locks(level) 1903 assert not (check_nodes - owned), \ 1904 ("Not owning the correct locks, owning %r, expected at least %r" % 1905 (owned, check_nodes)) 1906 1907 r_shut = ShutdownInstanceDisks(self, self.instance) 1908 if not r_shut: 1909 raise errors.OpExecError("Cannot shutdown instance disks, unable to" 1910 " proceed with disk template conversion") 1911 #TODO make heterogeneous conversions work 1912 mode = (self.cfg.GetInstanceDiskTemplate(self.instance.uuid), 1913 self.op.disk_template) 1914 try: 1915 if mode in self._DISK_CONVERSIONS: 1916 self._DISK_CONVERSIONS[mode](self, feedback_fn) 1917 else: 1918 self._ConvertInstanceDisks(feedback_fn) 1919 except: 1920 for disk in inst_disks: 1921 self.cfg.ReleaseDRBDMinors(disk.uuid) 1922 raise 1923 result.append(("disk_template", self.op.disk_template)) 1924 1925 disk_info = self.cfg.GetInstanceDisks(self.instance.uuid) 1926 assert utils.AllDiskOfType(disk_info, [self.op.disk_template]), \ 1927 ("Expected disk template '%s', found '%s'" % 1928 (self.op.disk_template, 1929 self.cfg.GetInstanceDiskTemplate(self.instance.uuid))) 1930 1931 # Release node and resource locks if there are any (they might already have 1932 # been released during disk conversion) 1933 ReleaseLocks(self, locking.LEVEL_NODE) 1934 ReleaseLocks(self, locking.LEVEL_NODE_RES) 1935 1936 # Apply NIC changes 1937 if self._new_nics is not None: 1938 self.instance.nics = self._new_nics 1939 result.extend(self._nic_chgdesc) 1940 1941 # hvparams changes 1942 if self.op.hvparams: 1943 self.instance.hvparams = self.hv_inst 1944 for key, val in self.op.hvparams.iteritems(): 1945 result.append(("hv/%s" % key, val)) 1946 1947 # beparams changes 1948 if self.op.beparams: 1949 self.instance.beparams = self.be_inst 1950 for key, val in self.op.beparams.iteritems(): 1951 result.append(("be/%s" % key, val)) 1952 1953 # OS change 1954 if self.op.os_name: 1955 self.instance.os = self.op.os_name 1956 1957 # osparams changes 1958 if self.op.osparams: 1959 self.instance.osparams = self.os_inst 1960 for key, val in self.op.osparams.iteritems(): 1961 result.append(("os/%s" % key, val)) 1962 1963 if self.op.osparams_private: 1964 self.instance.osparams_private = self.os_inst_private 1965 for key, val in self.op.osparams_private.iteritems(): 1966 # Show the Private(...) blurb. 1967 result.append(("os_private/%s" % key, repr(val))) 1968 1969 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId()) 1970 1971 if self.op.offline is None: 1972 # Ignore 1973 pass 1974 elif self.op.offline: 1975 # Mark instance as offline 1976 self.instance = self.cfg.MarkInstanceOffline(self.instance.uuid) 1977 result.append(("admin_state", constants.ADMINST_OFFLINE)) 1978 else: 1979 # Mark instance as online, but stopped 1980 self.instance = self.cfg.MarkInstanceDown(self.instance.uuid) 1981 result.append(("admin_state", constants.ADMINST_DOWN)) 1982 1983 UpdateMetadata(feedback_fn, self.rpc, self.instance) 1984 1985 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or 1986 self.owned_locks(locking.LEVEL_NODE)), \ 1987 "All node locks should have been released by now" 1988 1989 return result
1990 1991 _DISK_CONVERSIONS = { 1992 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd, 1993 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain, 1994 } 1995