Package ganeti :: Package cmdlib :: Module instance_utils
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cmdlib.instance_utils

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Utility function mainly, but not only used by instance LU's.""" 
  32   
  33  import logging 
  34  import os 
  35   
  36  from ganeti import constants 
  37  from ganeti import errors 
  38  from ganeti import ht 
  39  from ganeti import locking 
  40  from ganeti.masterd import iallocator 
  41  from ganeti import network 
  42  from ganeti import netutils 
  43  from ganeti import objects 
  44  from ganeti import pathutils 
  45  from ganeti import utils 
  46  from ganeti.cmdlib.common import AnnotateDiskParams, \ 
  47    ComputeIPolicyInstanceViolation, CheckDiskTemplateEnabled, \ 
  48    CheckDiskTemplateEnabled, ComputeIPolicySpecViolation 
  49   
  50   
  51  #: Type description for changes as returned by L{ApplyContainerMods}'s 
  52  #: callbacks 
  53  _TApplyContModsCbChanges = \ 
  54    ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([ 
  55      ht.TNonEmptyString, 
  56      ht.TAny, 
  57      ]))) 
  58   
  59   
60 -def BuildInstanceHookEnv(name, primary_node_name, secondary_node_names, os_type, 61 status, minmem, maxmem, vcpus, nics, disk_template, 62 disks, bep, hvp, hypervisor_name, tags):
63 """Builds instance related env variables for hooks 64 65 This builds the hook environment from individual variables. 66 67 @type name: string 68 @param name: the name of the instance 69 @type primary_node_name: string 70 @param primary_node_name: the name of the instance's primary node 71 @type secondary_node_names: list 72 @param secondary_node_names: list of secondary nodes as strings 73 @type os_type: string 74 @param os_type: the name of the instance's OS 75 @type status: string 76 @param status: the desired status of the instance 77 @type minmem: string 78 @param minmem: the minimum memory size of the instance 79 @type maxmem: string 80 @param maxmem: the maximum memory size of the instance 81 @type vcpus: string 82 @param vcpus: the count of VCPUs the instance has 83 @type nics: list 84 @param nics: list of tuples (name, uuid, ip, mac, mode, link, vlan, net, 85 netinfo) representing the NICs the instance has 86 @type disk_template: string 87 @param disk_template: the disk template of the instance 88 @type disks: list 89 @param disks: list of disks (either objects.Disk or dict) 90 @type bep: dict 91 @param bep: the backend parameters for the instance 92 @type hvp: dict 93 @param hvp: the hypervisor parameters for the instance 94 @type hypervisor_name: string 95 @param hypervisor_name: the hypervisor for the instance 96 @type tags: list 97 @param tags: list of instance tags as strings 98 @rtype: dict 99 @return: the hook environment for this instance 100 101 """ 102 env = { 103 "OP_TARGET": name, 104 "INSTANCE_NAME": name, 105 "INSTANCE_PRIMARY": primary_node_name, 106 "INSTANCE_SECONDARIES": " ".join(secondary_node_names), 107 "INSTANCE_OS_TYPE": os_type, 108 "INSTANCE_STATUS": status, 109 "INSTANCE_MINMEM": minmem, 110 "INSTANCE_MAXMEM": maxmem, 111 # TODO(2.9) remove deprecated "memory" value 112 "INSTANCE_MEMORY": maxmem, 113 "INSTANCE_VCPUS": vcpus, 114 "INSTANCE_DISK_TEMPLATE": disk_template, 115 "INSTANCE_HYPERVISOR": hypervisor_name, 116 } 117 if nics: 118 nic_count = len(nics) 119 for idx, (name, uuid, ip, mac, mode, link, vlan, net, netinfo) \ 120 in enumerate(nics): 121 if ip is None: 122 ip = "" 123 if name: 124 env["INSTANCE_NIC%d_NAME" % idx] = name 125 env["INSTANCE_NIC%d_UUID" % idx] = uuid 126 env["INSTANCE_NIC%d_IP" % idx] = ip 127 env["INSTANCE_NIC%d_MAC" % idx] = mac 128 env["INSTANCE_NIC%d_MODE" % idx] = mode 129 env["INSTANCE_NIC%d_LINK" % idx] = link 130 env["INSTANCE_NIC%d_VLAN" % idx] = vlan 131 if netinfo: 132 nobj = objects.Network.FromDict(netinfo) 133 env.update(nobj.HooksDict("INSTANCE_NIC%d_" % idx)) 134 elif network: 135 # FIXME: broken network reference: the instance NIC specifies a 136 # network, but the relevant network entry was not in the config. This 137 # should be made impossible. 138 env["INSTANCE_NIC%d_NETWORK_NAME" % idx] = net 139 if mode == constants.NIC_MODE_BRIDGED or \ 140 mode == constants.NIC_MODE_OVS: 141 env["INSTANCE_NIC%d_BRIDGE" % idx] = link 142 else: 143 nic_count = 0 144 145 env["INSTANCE_NIC_COUNT"] = nic_count 146 147 if disks: 148 disk_count = len(disks) 149 for idx, disk in enumerate(disks): 150 env.update(BuildDiskEnv(idx, disk)) 151 else: 152 disk_count = 0 153 154 env["INSTANCE_DISK_COUNT"] = disk_count 155 156 if not tags: 157 tags = [] 158 159 env["INSTANCE_TAGS"] = " ".join(tags) 160 161 for source, kind in [(bep, "BE"), (hvp, "HV")]: 162 for key, value in source.items(): 163 env["INSTANCE_%s_%s" % (kind, key)] = value 164 165 return env
166 167
168 -def BuildInstanceHookEnvByObject(lu, instance, secondary_nodes=None, 169 disks=None, override=None):
170 """Builds instance related env variables for hooks from an object. 171 172 @type lu: L{LogicalUnit} 173 @param lu: the logical unit on whose behalf we execute 174 @type instance: L{objects.Instance} 175 @param instance: the instance for which we should build the 176 environment 177 @type override: dict 178 @param override: dictionary with key/values that will override 179 our values 180 @rtype: dict 181 @return: the hook environment dictionary 182 183 """ 184 cluster = lu.cfg.GetClusterInfo() 185 bep = cluster.FillBE(instance) 186 hvp = cluster.FillHV(instance) 187 188 # Override secondary_nodes 189 if secondary_nodes is None: 190 secondary_nodes = lu.cfg.GetInstanceSecondaryNodes(instance.uuid) 191 192 # Override disks 193 if disks is None: 194 disks = lu.cfg.GetInstanceDisks(instance.uuid) 195 196 disk_template = utils.GetDiskTemplate(disks) 197 198 args = { 199 "name": instance.name, 200 "primary_node_name": lu.cfg.GetNodeName(instance.primary_node), 201 "secondary_node_names": lu.cfg.GetNodeNames(secondary_nodes), 202 "os_type": instance.os, 203 "status": instance.admin_state, 204 "maxmem": bep[constants.BE_MAXMEM], 205 "minmem": bep[constants.BE_MINMEM], 206 "vcpus": bep[constants.BE_VCPUS], 207 "nics": NICListToTuple(lu, instance.nics), 208 "disk_template": disk_template, 209 "disks": disks, 210 "bep": bep, 211 "hvp": hvp, 212 "hypervisor_name": instance.hypervisor, 213 "tags": instance.tags, 214 } 215 if override: 216 args.update(override) 217 return BuildInstanceHookEnv(**args) # pylint: disable=W0142
218 219
220 -def GetClusterDomainSecret():
221 """Reads the cluster domain secret. 222 223 """ 224 return utils.ReadOneLineFile(pathutils.CLUSTER_DOMAIN_SECRET_FILE, 225 strict=True)
226 227
228 -def CheckNodeNotDrained(lu, node_uuid):
229 """Ensure that a given node is not drained. 230 231 @param lu: the LU on behalf of which we make the check 232 @param node_uuid: the node to check 233 @raise errors.OpPrereqError: if the node is drained 234 235 """ 236 node = lu.cfg.GetNodeInfo(node_uuid) 237 if node.drained: 238 raise errors.OpPrereqError("Can't use drained node %s" % node.name, 239 errors.ECODE_STATE)
240 241
242 -def CheckNodeVmCapable(lu, node_uuid):
243 """Ensure that a given node is vm capable. 244 245 @param lu: the LU on behalf of which we make the check 246 @param node_uuid: the node to check 247 @raise errors.OpPrereqError: if the node is not vm capable 248 249 """ 250 if not lu.cfg.GetNodeInfo(node_uuid).vm_capable: 251 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node_uuid, 252 errors.ECODE_STATE)
253 254
255 -def RemoveInstance(lu, feedback_fn, instance, ignore_failures):
256 """Utility function to remove an instance. 257 258 """ 259 logging.info("Removing block devices for instance %s", instance.name) 260 261 if not RemoveDisks(lu, instance, ignore_failures=ignore_failures): 262 if not ignore_failures: 263 raise errors.OpExecError("Can't remove instance's disks") 264 feedback_fn("Warning: can't remove instance's disks") 265 266 logging.info("Removing instance's disks") 267 for disk in instance.disks: 268 lu.cfg.RemoveInstanceDisk(instance.uuid, disk) 269 270 logging.info("Removing instance %s out of cluster config", instance.name) 271 lu.cfg.RemoveInstance(instance.uuid)
272 273
274 -def _StoragePathsRemoved(removed, disks):
275 """Returns an iterable of all storage paths to be removed. 276 277 A storage path is removed if no disks are contained in it anymore. 278 279 @type removed: list of L{objects.Disk} 280 @param removed: The disks that are being removed 281 @type disks: list of L{objects.Disk} 282 @param disks: All disks attached to the instance 283 284 @rtype: list of file paths 285 @returns: the storage directories that need to be removed 286 287 """ 288 remaining_storage_dirs = set() 289 for disk in disks: 290 if (disk not in removed and 291 disk.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE)): 292 remaining_storage_dirs.add(os.path.dirname(disk.logical_id[1])) 293 294 deleted_storage_dirs = set() 295 for disk in removed: 296 if disk.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE): 297 deleted_storage_dirs.add(os.path.dirname(disk.logical_id[1])) 298 299 return deleted_storage_dirs - remaining_storage_dirs
300 301
302 -def RemoveDisks(lu, instance, disks=None, 303 target_node_uuid=None, ignore_failures=False):
304 """Remove all or a subset of disks for an instance. 305 306 This abstracts away some work from `AddInstance()` and 307 `RemoveInstance()`. Note that in case some of the devices couldn't 308 be removed, the removal will continue with the other ones. 309 310 This function is also used by the disk template conversion mechanism to 311 remove the old block devices of the instance. Since the instance has 312 changed its template at the time we remove the original disks, we must 313 specify the template of the disks we are about to remove as an argument. 314 315 @type lu: L{LogicalUnit} 316 @param lu: the logical unit on whose behalf we execute 317 @type instance: L{objects.Instance} 318 @param instance: the instance whose disks we should remove 319 @type disks: list of L{objects.Disk} 320 @param disks: the disks to remove; if not specified, all the disks of the 321 instance are removed 322 @type target_node_uuid: string 323 @param target_node_uuid: used to override the node on which to remove the 324 disks 325 @rtype: boolean 326 @return: the success of the removal 327 328 """ 329 logging.info("Removing block devices for instance %s", instance.name) 330 331 all_result = True 332 ports_to_release = set() 333 334 all_disks = lu.cfg.GetInstanceDisks(instance.uuid) 335 if disks is None: 336 disks = all_disks 337 338 anno_disks = AnnotateDiskParams(instance, disks, lu.cfg) 339 for (idx, device) in enumerate(anno_disks): 340 if target_node_uuid: 341 edata = [(target_node_uuid, device)] 342 else: 343 edata = device.ComputeNodeTree(instance.primary_node) 344 for node_uuid, disk in edata: 345 result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance)) 346 if result.fail_msg: 347 lu.LogWarning("Could not remove disk %s on node %s," 348 " continuing anyway: %s", idx, 349 lu.cfg.GetNodeName(node_uuid), result.fail_msg) 350 if not (result.offline and node_uuid != instance.primary_node): 351 all_result = False 352 353 # if this is a DRBD disk, return its port to the pool 354 if device.dev_type in constants.DTS_DRBD: 355 ports_to_release.add(device.logical_id[2]) 356 357 if all_result or ignore_failures: 358 for port in ports_to_release: 359 lu.cfg.AddTcpUdpPort(port) 360 361 for d in disks: 362 CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), d.dev_type) 363 364 if target_node_uuid: 365 tgt = target_node_uuid 366 else: 367 tgt = instance.primary_node 368 369 obsolete_storage_paths = _StoragePathsRemoved(disks, all_disks) 370 371 for file_storage_dir in obsolete_storage_paths: 372 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir) 373 if result.fail_msg: 374 lu.LogWarning("Could not remove directory '%s' on node %s: %s", 375 file_storage_dir, lu.cfg.GetNodeName(tgt), result.fail_msg) 376 all_result = False 377 378 return all_result
379 380
381 -def NICToTuple(lu, nic):
382 """Build a tupple of nic information. 383 384 @type lu: L{LogicalUnit} 385 @param lu: the logical unit on whose behalf we execute 386 @type nic: L{objects.NIC} 387 @param nic: nic to convert to hooks tuple 388 389 """ 390 cluster = lu.cfg.GetClusterInfo() 391 filled_params = cluster.SimpleFillNIC(nic.nicparams) 392 mode = filled_params[constants.NIC_MODE] 393 link = filled_params[constants.NIC_LINK] 394 vlan = filled_params[constants.NIC_VLAN] 395 netinfo = None 396 if nic.network: 397 nobj = lu.cfg.GetNetwork(nic.network) 398 netinfo = objects.Network.ToDict(nobj) 399 return (nic.name, nic.uuid, nic.ip, nic.mac, mode, link, vlan, 400 nic.network, netinfo)
401 402
403 -def NICListToTuple(lu, nics):
404 """Build a list of nic information tuples. 405 406 This list is suitable to be passed to _BuildInstanceHookEnv or as a return 407 value in LUInstanceQueryData. 408 409 @type lu: L{LogicalUnit} 410 @param lu: the logical unit on whose behalf we execute 411 @type nics: list of L{objects.NIC} 412 @param nics: list of nics to convert to hooks tuples 413 414 """ 415 hooks_nics = [] 416 for nic in nics: 417 hooks_nics.append(NICToTuple(lu, nic)) 418 return hooks_nics
419 420
421 -def CopyLockList(names):
422 """Makes a copy of a list of lock names. 423 424 Handles L{locking.ALL_SET} correctly. 425 426 """ 427 if names == locking.ALL_SET: 428 return locking.ALL_SET 429 else: 430 return names[:]
431 432
433 -def ReleaseLocks(lu, level, names=None, keep=None):
434 """Releases locks owned by an LU. 435 436 @type lu: L{LogicalUnit} 437 @param level: Lock level 438 @type names: list or None 439 @param names: Names of locks to release 440 @type keep: list or None 441 @param keep: Names of locks to retain 442 443 """ 444 logging.debug("Lu %s ReleaseLocks %s names=%s, keep=%s", 445 lu.wconfdcontext, level, names, keep) 446 assert not (keep is not None and names is not None), \ 447 "Only one of the 'names' and the 'keep' parameters can be given" 448 449 if names is not None: 450 should_release = names.__contains__ 451 elif keep: 452 should_release = lambda name: name not in keep 453 else: 454 should_release = None 455 456 levelname = locking.LEVEL_NAMES[level] 457 458 owned = lu.owned_locks(level) 459 if not owned: 460 # Not owning any lock at this level, do nothing 461 pass 462 463 elif should_release: 464 retain = [] 465 release = [] 466 467 # Determine which locks to release 468 for name in owned: 469 if should_release(name): 470 release.append(name) 471 else: 472 retain.append(name) 473 474 assert len(lu.owned_locks(level)) == (len(retain) + len(release)) 475 476 # Release just some locks 477 lu.WConfdClient().TryUpdateLocks( 478 lu.release_request(level, release)) 479 assert frozenset(lu.owned_locks(level)) == frozenset(retain) 480 else: 481 lu.WConfdClient().FreeLocksLevel(levelname)
482 483
484 -def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group, 485 target_group, cfg, 486 _compute_fn=ComputeIPolicyInstanceViolation):
487 """Compute if instance meets the specs of the new target group. 488 489 @param ipolicy: The ipolicy to verify 490 @param instance: The instance object to verify 491 @param current_group: The current group of the instance 492 @param target_group: The new group of the instance 493 @type cfg: L{config.ConfigWriter} 494 @param cfg: Cluster configuration 495 @param _compute_fn: The function to verify ipolicy (unittest only) 496 @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation} 497 498 """ 499 if current_group == target_group: 500 return [] 501 else: 502 return _compute_fn(ipolicy, instance, cfg)
503 504
505 -def CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False, 506 _compute_fn=_ComputeIPolicyNodeViolation):
507 """Checks that the target node is correct in terms of instance policy. 508 509 @param ipolicy: The ipolicy to verify 510 @param instance: The instance object to verify 511 @param node: The new node to relocate 512 @type cfg: L{config.ConfigWriter} 513 @param cfg: Cluster configuration 514 @param ignore: Ignore violations of the ipolicy 515 @param _compute_fn: The function to verify ipolicy (unittest only) 516 @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation} 517 518 """ 519 primary_node = lu.cfg.GetNodeInfo(instance.primary_node) 520 res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg) 521 522 if res: 523 msg = ("Instance does not meet target node group's (%s) instance" 524 " policy: %s") % (node.group, utils.CommaJoin(res)) 525 if ignore: 526 lu.LogWarning(msg) 527 else: 528 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
529 530
531 -def GetInstanceInfoText(instance):
532 """Compute that text that should be added to the disk's metadata. 533 534 """ 535 return "originstname+%s" % instance.name
536 537
538 -def CheckNodeFreeMemory(lu, node_uuid, reason, requested, hvname, hvparams):
539 """Checks if a node has enough free memory. 540 541 This function checks if a given node has the needed amount of free 542 memory. In case the node has less memory or we cannot get the 543 information from the node, this function raises an OpPrereqError 544 exception. 545 546 @type lu: C{LogicalUnit} 547 @param lu: a logical unit from which we get configuration data 548 @type node_uuid: C{str} 549 @param node_uuid: the node to check 550 @type reason: C{str} 551 @param reason: string to use in the error message 552 @type requested: C{int} 553 @param requested: the amount of memory in MiB to check for 554 @type hvname: string 555 @param hvname: the hypervisor's name 556 @type hvparams: dict of strings 557 @param hvparams: the hypervisor's parameters 558 @rtype: integer 559 @return: node current free memory 560 @raise errors.OpPrereqError: if the node doesn't have enough memory, or 561 we cannot check the node 562 563 """ 564 node_name = lu.cfg.GetNodeName(node_uuid) 565 nodeinfo = lu.rpc.call_node_info([node_uuid], None, [(hvname, hvparams)]) 566 nodeinfo[node_uuid].Raise("Can't get data from node %s" % node_name, 567 prereq=True, ecode=errors.ECODE_ENVIRON) 568 (_, _, (hv_info, )) = nodeinfo[node_uuid].payload 569 570 free_mem = hv_info.get("memory_free", None) 571 if not isinstance(free_mem, int): 572 raise errors.OpPrereqError("Can't compute free memory on node %s, result" 573 " was '%s'" % (node_name, free_mem), 574 errors.ECODE_ENVIRON) 575 if requested > free_mem: 576 raise errors.OpPrereqError("Not enough memory on node %s for %s:" 577 " needed %s MiB, available %s MiB" % 578 (node_name, reason, requested, free_mem), 579 errors.ECODE_NORES) 580 return free_mem
581 582
583 -def CheckInstanceBridgesExist(lu, instance, node_uuid=None):
584 """Check that the brigdes needed by an instance exist. 585 586 """ 587 if node_uuid is None: 588 node_uuid = instance.primary_node 589 CheckNicsBridgesExist(lu, instance.nics, node_uuid)
590 591
592 -def CheckNicsBridgesExist(lu, nics, node_uuid):
593 """Check that the brigdes needed by a list of nics exist. 594 595 """ 596 cluster = lu.cfg.GetClusterInfo() 597 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in nics] 598 brlist = [params[constants.NIC_LINK] for params in paramslist 599 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED] 600 if brlist: 601 result = lu.rpc.call_bridges_exist(node_uuid, brlist) 602 result.Raise("Error checking bridges on destination node '%s'" % 603 lu.cfg.GetNodeName(node_uuid), prereq=True, 604 ecode=errors.ECODE_ENVIRON)
605 606
607 -def UpdateMetadata(feedback_fn, rpc, instance, 608 osparams_public=None, 609 osparams_private=None, 610 osparams_secret=None):
611 """Updates instance metadata on the metadata daemon on the 612 instance's primary node. 613 614 If the daemon isn't available (not compiled), do nothing. 615 616 In case the RPC fails, this function simply issues a warning and 617 proceeds normally. 618 619 @type feedback_fn: callable 620 @param feedback_fn: function used send feedback back to the caller 621 622 @type rpc: L{rpc.node.RpcRunner} 623 @param rpc: RPC runner 624 625 @type instance: L{objects.Instance} 626 @param instance: instance for which the metadata should be updated 627 628 @type osparams_public: NoneType or dict 629 @param osparams_public: public OS parameters used to override those 630 defined in L{instance} 631 632 @type osparams_private: NoneType or dict 633 @param osparams_private: private OS parameters used to override those 634 defined in L{instance} 635 636 @type osparams_secret: NoneType or dict 637 @param osparams_secret: secret OS parameters used to override those 638 defined in L{instance} 639 640 @rtype: NoneType 641 @return: None 642 643 """ 644 if not constants.ENABLE_METAD: 645 return 646 647 data = instance.ToDict() 648 649 if osparams_public is not None: 650 data["osparams_public"] = osparams_public 651 652 if osparams_private is not None: 653 data["osparams_private"] = osparams_private 654 655 if osparams_secret is not None: 656 data["osparams_secret"] = osparams_secret 657 else: 658 data["osparams_secret"] = {} 659 660 result = rpc.call_instance_metadata_modify(instance.primary_node, data) 661 result.Warn("Could not update metadata for instance '%s'" % instance.name, 662 feedback_fn)
663 664
665 -def CheckCompressionTool(lu, compression_tool):
666 """ Checks if the provided compression tool is allowed to be used. 667 668 @type compression_tool: string 669 @param compression_tool: Compression tool to use for importing or exporting 670 the instance 671 672 @rtype: NoneType 673 @return: None 674 675 @raise errors.OpPrereqError: If the tool is not enabled by Ganeti or 676 whitelisted 677 678 """ 679 allowed_tools = lu.cfg.GetCompressionTools() 680 if (compression_tool != constants.IEC_NONE and 681 compression_tool not in allowed_tools): 682 raise errors.OpPrereqError( 683 "Compression tool not allowed, tools allowed are [%s]" 684 % ", ".join(allowed_tools), errors.ECODE_INVAL 685 )
686 687
688 -def BuildDiskLogicalIDEnv(idx, disk):
689 """Helper method to create hooks env related to disk's logical_id 690 691 @type idx: integer 692 @param idx: The index of the disk 693 @type disk: L{objects.Disk} 694 @param disk: The disk object 695 696 """ 697 if disk.dev_type == constants.DT_PLAIN: 698 vg, name = disk.logical_id 699 ret = { 700 "INSTANCE_DISK%d_VG" % idx: vg, 701 "INSTANCE_DISK%d_ID" % idx: name 702 } 703 elif disk.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE): 704 file_driver, name = disk.logical_id 705 ret = { 706 "INSTANCE_DISK%d_DRIVER" % idx: file_driver, 707 "INSTANCE_DISK%d_ID" % idx: name 708 } 709 elif disk.dev_type == constants.DT_BLOCK: 710 block_driver, adopt = disk.logical_id 711 ret = { 712 "INSTANCE_DISK%d_DRIVER" % idx: block_driver, 713 "INSTANCE_DISK%d_ID" % idx: adopt 714 } 715 elif disk.dev_type == constants.DT_RBD: 716 rbd, name = disk.logical_id 717 ret = { 718 "INSTANCE_DISK%d_DRIVER" % idx: rbd, 719 "INSTANCE_DISK%d_ID" % idx: name 720 } 721 elif disk.dev_type == constants.DT_EXT: 722 provider, name = disk.logical_id 723 ret = { 724 "INSTANCE_DISK%d_PROVIDER" % idx: provider, 725 "INSTANCE_DISK%d_ID" % idx: name 726 } 727 elif disk.dev_type == constants.DT_DRBD8: 728 pnode, snode, port, pmin, smin, _ = disk.logical_id 729 data, meta = disk.children 730 data_vg, data_name = data.logical_id 731 meta_vg, meta_name = meta.logical_id 732 ret = { 733 "INSTANCE_DISK%d_PNODE" % idx: pnode, 734 "INSTANCE_DISK%d_SNODE" % idx: snode, 735 "INSTANCE_DISK%d_PORT" % idx: port, 736 "INSTANCE_DISK%d_PMINOR" % idx: pmin, 737 "INSTANCE_DISK%d_SMINOR" % idx: smin, 738 "INSTANCE_DISK%d_DATA_VG" % idx: data_vg, 739 "INSTANCE_DISK%d_DATA_ID" % idx: data_name, 740 "INSTANCE_DISK%d_META_VG" % idx: meta_vg, 741 "INSTANCE_DISK%d_META_ID" % idx: meta_name, 742 } 743 elif disk.dev_type == constants.DT_GLUSTER: 744 file_driver, name = disk.logical_id 745 ret = { 746 "INSTANCE_DISK%d_DRIVER" % idx: file_driver, 747 "INSTANCE_DISK%d_ID" % idx: name 748 } 749 elif disk.dev_type == constants.DT_DISKLESS: 750 ret = {} 751 else: 752 ret = {} 753 754 ret.update({ 755 "INSTANCE_DISK%d_DEV_TYPE" % idx: disk.dev_type 756 }) 757 758 return ret
759 760
761 -def BuildDiskEnv(idx, disk):
762 """Helper method to create disk's hooks env 763 764 @type idx: integer 765 @param idx: The index of the disk 766 @type disk: L{objects.Disk} or dict 767 @param disk: The disk object or a simple dict in case of LUInstanceCreate 768 769 """ 770 ret = {} 771 # In case of LUInstanceCreate this runs in CheckPrereq where lu.disks 772 # is a list of dicts i.e the result of ComputeDisks 773 if isinstance(disk, dict): 774 uuid = disk.get("uuid", "") 775 name = disk.get(constants.IDISK_NAME, "") 776 size = disk.get(constants.IDISK_SIZE, "") 777 mode = disk.get(constants.IDISK_MODE, "") 778 elif isinstance(disk, objects.Disk): 779 uuid = disk.uuid 780 name = disk.name 781 size = disk.size 782 mode = disk.mode 783 ret.update(BuildDiskLogicalIDEnv(idx, disk)) 784 785 # only name is optional here 786 if name: 787 ret["INSTANCE_DISK%d_NAME" % idx] = name 788 ret["INSTANCE_DISK%d_UUID" % idx] = uuid 789 ret["INSTANCE_DISK%d_SIZE" % idx] = size 790 ret["INSTANCE_DISK%d_MODE" % idx] = mode 791 792 return ret
793 794
795 -def CheckInstanceExistence(lu, instance_name):
796 """Raises an error if an instance with the given name exists already. 797 798 @type instance_name: string 799 @param instance_name: The name of the instance. 800 801 To be used in the locking phase. 802 803 """ 804 if instance_name in \ 805 [inst.name for inst in lu.cfg.GetAllInstancesInfo().values()]: 806 raise errors.OpPrereqError("Instance '%s' is already in the cluster" % 807 instance_name, errors.ECODE_EXISTS)
808 809
810 -def CheckForConflictingIp(lu, ip, node_uuid):
811 """In case of conflicting IP address raise error. 812 813 @type ip: string 814 @param ip: IP address 815 @type node_uuid: string 816 @param node_uuid: node UUID 817 818 """ 819 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid) 820 if conf_net is not None: 821 raise errors.OpPrereqError(("The requested IP address (%s) belongs to" 822 " network %s, but the target NIC does not." % 823 (ip, conf_net)), 824 errors.ECODE_STATE) 825 826 return (None, None)
827 828
829 -def ComputeIPolicyInstanceSpecViolation( 830 ipolicy, instance_spec, disk_types, 831 _compute_fn=ComputeIPolicySpecViolation):
832 """Compute if instance specs meets the specs of ipolicy. 833 834 @type ipolicy: dict 835 @param ipolicy: The ipolicy to verify against 836 @param instance_spec: dict 837 @param instance_spec: The instance spec to verify 838 @type disk_types: list of strings 839 @param disk_types: the disk templates of the instance 840 @param _compute_fn: The function to verify ipolicy (unittest only) 841 @see: L{ComputeIPolicySpecViolation} 842 843 """ 844 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None) 845 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None) 846 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0) 847 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, []) 848 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0) 849 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None) 850 851 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count, 852 disk_sizes, spindle_use, disk_types)
853 854
855 -def ComputeInstanceCommunicationNIC(instance_name):
856 """Compute the name of the instance NIC used by instance 857 communication. 858 859 With instance communication, a new NIC is added to the instance. 860 This NIC has a special name that identities it as being part of 861 instance communication, and not just a normal NIC. This function 862 generates the name of the NIC based on a prefix and the instance 863 name 864 865 @type instance_name: string 866 @param instance_name: name of the instance the NIC belongs to 867 868 @rtype: string 869 @return: name of the NIC 870 871 """ 872 return constants.INSTANCE_COMMUNICATION_NIC_PREFIX + instance_name
873 874
875 -def PrepareContainerMods(mods, private_fn):
876 """Prepares a list of container modifications by adding a private data field. 877 878 @type mods: list of tuples; (operation, index, parameters) 879 @param mods: List of modifications 880 @type private_fn: callable or None 881 @param private_fn: Callable for constructing a private data field for a 882 modification 883 @rtype: list 884 885 """ 886 if private_fn is None: 887 fn = lambda: None 888 else: 889 fn = private_fn 890 891 return [(op, idx, params, fn()) for (op, idx, params) in mods]
892 893
894 -def ApplyContainerMods(kind, container, chgdesc, mods, 895 create_fn, attach_fn, modify_fn, remove_fn, 896 detach_fn, post_add_fn=None):
897 """Applies descriptions in C{mods} to C{container}. 898 899 @type kind: string 900 @param kind: One-word item description 901 @type container: list 902 @param container: Container to modify 903 @type chgdesc: None or list 904 @param chgdesc: List of applied changes 905 @type mods: list 906 @param mods: Modifications as returned by L{PrepareContainerMods} 907 @type create_fn: callable 908 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD}); 909 receives absolute item index, parameters and private data object as added 910 by L{PrepareContainerMods}, returns tuple containing new item and changes 911 as list 912 @type attach_fn: callable 913 @param attach_fn: Callback for attaching an existing item to a container 914 (L{constants.DDM_ATTACH}); receives absolute item index and item UUID or 915 name, returns tuple containing new item and changes as list 916 @type modify_fn: callable 917 @param modify_fn: Callback for modifying an existing item 918 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters 919 and private data object as added by L{PrepareContainerMods}, returns 920 changes as list 921 @type remove_fn: callable 922 @param remove_fn: Callback on removing item; receives absolute item index, 923 item and private data object as added by L{PrepareContainerMods} 924 @type detach_fn: callable 925 @param detach_fn: Callback on detaching item; receives absolute item index, 926 item and private data object as added by L{PrepareContainerMods} 927 @type post_add_fn: callable 928 @param post_add_fn: Callable for post-processing a newly created item after 929 it has been put into the container. It receives the index of the new item 930 and the new item as parameters. 931 932 """ 933 for (op, identifier, params, private) in mods: 934 changes = None 935 936 if op == constants.DDM_ADD: 937 addidx = GetIndexFromIdentifier(identifier, kind, container) 938 if create_fn is None: 939 item = params 940 else: 941 (item, changes) = create_fn(addidx, params, private) 942 943 InsertItemToIndex(identifier, item, container) 944 945 if post_add_fn is not None: 946 post_add_fn(addidx, item) 947 948 elif op == constants.DDM_ATTACH: 949 addidx = GetIndexFromIdentifier(identifier, kind, container) 950 if attach_fn is None: 951 item = params 952 else: 953 (item, changes) = attach_fn(addidx, params, private) 954 955 InsertItemToIndex(identifier, item, container) 956 957 if post_add_fn is not None: 958 post_add_fn(addidx, item) 959 960 else: 961 # Retrieve existing item 962 (absidx, item) = GetItemFromContainer(identifier, kind, container) 963 964 if op == constants.DDM_REMOVE: 965 assert not params 966 967 changes = [("%s/%s" % (kind, absidx), "remove")] 968 969 if remove_fn is not None: 970 msg = remove_fn(absidx, item, private) 971 if msg: 972 changes.append(("%s/%s" % (kind, absidx), msg)) 973 974 assert container[absidx] == item 975 del container[absidx] 976 elif op == constants.DDM_DETACH: 977 assert not params 978 979 changes = [("%s/%s" % (kind, absidx), "detach")] 980 981 if detach_fn is not None: 982 msg = detach_fn(absidx, item, private) 983 if msg: 984 changes.append(("%s/%s" % (kind, absidx), msg)) 985 986 assert container[absidx] == item 987 del container[absidx] 988 elif op == constants.DDM_MODIFY: 989 if modify_fn is not None: 990 changes = modify_fn(absidx, item, params, private) 991 else: 992 raise errors.ProgrammerError("Unhandled operation '%s'" % op) 993 994 assert _TApplyContModsCbChanges(changes) 995 996 if not (chgdesc is None or changes is None): 997 chgdesc.extend(changes)
998 999
1000 -def GetItemFromContainer(identifier, kind, container):
1001 """Return the item refered by the identifier. 1002 1003 @type identifier: string 1004 @param identifier: Item index or name or UUID 1005 @type kind: string 1006 @param kind: One-word item description 1007 @type container: list 1008 @param container: Container to get the item from 1009 1010 """ 1011 # Index 1012 try: 1013 idx = int(identifier) 1014 if idx == -1: 1015 # Append 1016 absidx = len(container) - 1 1017 elif idx < 0: 1018 raise IndexError("Not accepting negative indices other than -1") 1019 elif idx > len(container): 1020 raise IndexError("Got %s index %s, but there are only %s" % 1021 (kind, idx, len(container))) 1022 else: 1023 absidx = idx 1024 return (absidx, container[idx]) 1025 except ValueError: 1026 pass 1027 1028 for idx, item in enumerate(container): 1029 if item.uuid == identifier or item.name == identifier: 1030 return (idx, item) 1031 1032 raise errors.OpPrereqError("Cannot find %s with identifier %s" % 1033 (kind, identifier), errors.ECODE_NOENT)
1034 1035
1036 -def GetIndexFromIdentifier(identifier, kind, container):
1037 """Check if the identifier represents a valid container index and return it. 1038 1039 Used in "add" and "attach" actions. 1040 1041 @type identifier: string 1042 @param identifier: Item index or name or UUID 1043 @type kind: string 1044 @param kind: Type of item, e.g. "disk", "nic" 1045 @type container: list 1046 @param container: Container to calculate the index from 1047 1048 """ 1049 try: 1050 idx = int(identifier) 1051 except ValueError: 1052 raise errors.OpPrereqError("Only positive integer or -1 is accepted", 1053 errors.ECODE_INVAL) 1054 if idx == -1: 1055 return len(container) 1056 else: 1057 if idx < 0: 1058 raise IndexError("Not accepting negative indices other than -1") 1059 elif idx > len(container): 1060 raise IndexError("Got %s index %s, but there are only %s" % 1061 (kind, idx, len(container))) 1062 return idx
1063 1064
1065 -def InsertItemToIndex(identifier, item, container):
1066 """Insert an item to the provided index of a container. 1067 1068 Used in "add" and "attach" actions. 1069 1070 @type identifier: string 1071 @param identifier: Item index 1072 @type item: object 1073 @param item: The item to be inserted 1074 @type container: list 1075 @param container: Container to insert the item to 1076 1077 """ 1078 try: 1079 idx = int(identifier) 1080 except ValueError: 1081 raise errors.OpPrereqError("Only positive integer or -1 is accepted", 1082 errors.ECODE_INVAL) 1083 if idx == -1: 1084 container.append(item) 1085 else: 1086 assert idx >= 0 1087 assert idx <= len(container) 1088 # list.insert does so before the specified index 1089 container.insert(idx, item)
1090 1091
1092 -def CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
1093 """Checks if nodes have enough physical CPUs 1094 1095 This function checks if all given nodes have the needed number of 1096 physical CPUs. In case any node has less CPUs or we cannot get the 1097 information from the node, this function raises an OpPrereqError 1098 exception. 1099 1100 @type lu: C{LogicalUnit} 1101 @param lu: a logical unit from which we get configuration data 1102 @type node_uuids: C{list} 1103 @param node_uuids: the list of node UUIDs to check 1104 @type requested: C{int} 1105 @param requested: the minimum acceptable number of physical CPUs 1106 @type hypervisor_specs: list of pairs (string, dict of strings) 1107 @param hypervisor_specs: list of hypervisor specifications in 1108 pairs (hypervisor_name, hvparams) 1109 @raise errors.OpPrereqError: if the node doesn't have enough CPUs, 1110 or we cannot check the node 1111 1112 """ 1113 nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs) 1114 for node_uuid in node_uuids: 1115 info = nodeinfo[node_uuid] 1116 node_name = lu.cfg.GetNodeName(node_uuid) 1117 info.Raise("Cannot get current information from node %s" % node_name, 1118 prereq=True, ecode=errors.ECODE_ENVIRON) 1119 (_, _, (hv_info, )) = info.payload 1120 num_cpus = hv_info.get("cpu_total", None) 1121 if not isinstance(num_cpus, int): 1122 raise errors.OpPrereqError("Can't compute the number of physical CPUs" 1123 " on node %s, result was '%s'" % 1124 (node_name, num_cpus), errors.ECODE_ENVIRON) 1125 if requested > num_cpus: 1126 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are " 1127 "required" % (node_name, num_cpus, requested), 1128 errors.ECODE_NORES)
1129 1130
1131 -def CheckHostnameSane(lu, name):
1132 """Ensures that a given hostname resolves to a 'sane' name. 1133 1134 The given name is required to be a prefix of the resolved hostname, 1135 to prevent accidental mismatches. 1136 1137 @param lu: the logical unit on behalf of which we're checking 1138 @param name: the name we should resolve and check 1139 @return: the resolved hostname object 1140 1141 """ 1142 hostname = netutils.GetHostname(name=name) 1143 if hostname.name != name: 1144 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name) 1145 if not utils.MatchNameComponent(name, [hostname.name]): 1146 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the" 1147 " same as given hostname '%s'") % 1148 (hostname.name, name), errors.ECODE_INVAL) 1149 return hostname
1150 1151
1152 -def CheckOpportunisticLocking(op):
1153 """Generate error if opportunistic locking is not possible. 1154 1155 """ 1156 if op.opportunistic_locking and not op.iallocator: 1157 raise errors.OpPrereqError("Opportunistic locking is only available in" 1158 " combination with an instance allocator", 1159 errors.ECODE_INVAL)
1160 1161
1162 -def CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
1163 """Wrapper around IAReqInstanceAlloc. 1164 1165 @param op: The instance opcode 1166 @param disks: The computed disks 1167 @param nics: The computed nics 1168 @param beparams: The full filled beparams 1169 @param node_name_whitelist: List of nodes which should appear as online to the 1170 allocator (unless the node is already marked offline) 1171 1172 @returns: A filled L{iallocator.IAReqInstanceAlloc} 1173 1174 """ 1175 spindle_use = beparams[constants.BE_SPINDLE_USE] 1176 return iallocator.IAReqInstanceAlloc(name=op.instance_name, 1177 disk_template=op.disk_template, 1178 group_name=op.group_name, 1179 tags=op.tags, 1180 os=op.os_type, 1181 vcpus=beparams[constants.BE_VCPUS], 1182 memory=beparams[constants.BE_MAXMEM], 1183 spindle_use=spindle_use, 1184 disks=disks, 1185 nics=[n.ToDict() for n in nics], 1186 hypervisor=op.hypervisor, 1187 node_whitelist=node_name_whitelist)
1188 1189
1190 -def ComputeFullBeParams(op, cluster):
1191 """Computes the full beparams. 1192 1193 @param op: The instance opcode 1194 @param cluster: The cluster config object 1195 1196 @return: The fully filled beparams 1197 1198 """ 1199 default_beparams = cluster.beparams[constants.PP_DEFAULT] 1200 for param, value in op.beparams.iteritems(): 1201 if value == constants.VALUE_AUTO: 1202 op.beparams[param] = default_beparams[param] 1203 objects.UpgradeBeParams(op.beparams) 1204 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES) 1205 return cluster.SimpleFillBE(op.beparams)
1206 1207
1208 -def ComputeNics(op, cluster, default_ip, cfg, ec_id):
1209 """Computes the nics. 1210 1211 @param op: The instance opcode 1212 @param cluster: Cluster configuration object 1213 @param default_ip: The default ip to assign 1214 @param cfg: An instance of the configuration object 1215 @param ec_id: Execution context ID 1216 1217 @returns: The build up nics 1218 1219 """ 1220 nics = [] 1221 for nic in op.nics: 1222 nic_mode_req = nic.get(constants.INIC_MODE, None) 1223 nic_mode = nic_mode_req 1224 if nic_mode is None or nic_mode == constants.VALUE_AUTO: 1225 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE] 1226 1227 net = nic.get(constants.INIC_NETWORK, None) 1228 link = nic.get(constants.NIC_LINK, None) 1229 ip = nic.get(constants.INIC_IP, None) 1230 vlan = nic.get(constants.INIC_VLAN, None) 1231 1232 if net is None or net.lower() == constants.VALUE_NONE: 1233 net = None 1234 else: 1235 if nic_mode_req is not None or link is not None: 1236 raise errors.OpPrereqError("If network is given, no mode or link" 1237 " is allowed to be passed", 1238 errors.ECODE_INVAL) 1239 1240 # ip validity checks 1241 if ip is None or ip.lower() == constants.VALUE_NONE: 1242 nic_ip = None 1243 elif ip.lower() == constants.VALUE_AUTO: 1244 if not op.name_check: 1245 raise errors.OpPrereqError("IP address set to auto but name checks" 1246 " have been skipped", 1247 errors.ECODE_INVAL) 1248 nic_ip = default_ip 1249 else: 1250 # We defer pool operations until later, so that the iallocator has 1251 # filled in the instance's node(s) dimara 1252 if ip.lower() == constants.NIC_IP_POOL: 1253 if net is None: 1254 raise errors.OpPrereqError("if ip=pool, parameter network" 1255 " must be passed too", 1256 errors.ECODE_INVAL) 1257 1258 elif not netutils.IPAddress.IsValid(ip): 1259 raise errors.OpPrereqError("Invalid IP address '%s'" % ip, 1260 errors.ECODE_INVAL) 1261 1262 nic_ip = ip 1263 1264 # TODO: check the ip address for uniqueness 1265 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip and not net: 1266 raise errors.OpPrereqError("Routed nic mode requires an ip address" 1267 " if not attached to a network", 1268 errors.ECODE_INVAL) 1269 1270 # MAC address verification 1271 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO) 1272 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 1273 mac = utils.NormalizeAndValidateMac(mac) 1274 1275 try: 1276 # TODO: We need to factor this out 1277 cfg.ReserveMAC(mac, ec_id) 1278 except errors.ReservationError: 1279 raise errors.OpPrereqError("MAC address %s already in use" 1280 " in cluster" % mac, 1281 errors.ECODE_NOTUNIQUE) 1282 1283 # Build nic parameters 1284 nicparams = {} 1285 if nic_mode_req: 1286 nicparams[constants.NIC_MODE] = nic_mode 1287 if link: 1288 nicparams[constants.NIC_LINK] = link 1289 if vlan: 1290 nicparams[constants.NIC_VLAN] = vlan 1291 1292 check_params = cluster.SimpleFillNIC(nicparams) 1293 objects.NIC.CheckParameterSyntax(check_params) 1294 net_uuid = cfg.LookupNetwork(net) 1295 name = nic.get(constants.INIC_NAME, None) 1296 if name is not None and name.lower() == constants.VALUE_NONE: 1297 name = None 1298 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name, 1299 network=net_uuid, nicparams=nicparams) 1300 nic_obj.uuid = cfg.GenerateUniqueID(ec_id) 1301 nics.append(nic_obj) 1302 1303 return nics
1304