Package ganeti :: Package cmdlib :: Module instance_utils
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cmdlib.instance_utils

  1  # 
  2  # 
  3   
  4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc. 
  5  # All rights reserved. 
  6  # 
  7  # Redistribution and use in source and binary forms, with or without 
  8  # modification, are permitted provided that the following conditions are 
  9  # met: 
 10  # 
 11  # 1. Redistributions of source code must retain the above copyright notice, 
 12  # this list of conditions and the following disclaimer. 
 13  # 
 14  # 2. Redistributions in binary form must reproduce the above copyright 
 15  # notice, this list of conditions and the following disclaimer in the 
 16  # documentation and/or other materials provided with the distribution. 
 17  # 
 18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
 19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
 20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
 21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
 22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
 23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
 24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
 25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
 26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
 27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
 28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 29   
 30   
 31  """Utility function mainly, but not only used by instance LU's.""" 
 32   
 33  import logging 
 34  import os 
 35   
 36  from ganeti import constants 
 37  from ganeti import errors 
 38  from ganeti import locking 
 39  from ganeti import network 
 40  from ganeti import objects 
 41  from ganeti import pathutils 
 42  from ganeti import utils 
 43  from ganeti.cmdlib.common import AnnotateDiskParams, \ 
 44    ComputeIPolicyInstanceViolation, CheckDiskTemplateEnabled 
 45   
 46   
47 -def BuildInstanceHookEnv(name, primary_node_name, secondary_node_names, os_type, 48 status, minmem, maxmem, vcpus, nics, disk_template, 49 disks, bep, hvp, hypervisor_name, tags):
50 """Builds instance related env variables for hooks 51 52 This builds the hook environment from individual variables. 53 54 @type name: string 55 @param name: the name of the instance 56 @type primary_node_name: string 57 @param primary_node_name: the name of the instance's primary node 58 @type secondary_node_names: list 59 @param secondary_node_names: list of secondary nodes as strings 60 @type os_type: string 61 @param os_type: the name of the instance's OS 62 @type status: string 63 @param status: the desired status of the instance 64 @type minmem: string 65 @param minmem: the minimum memory size of the instance 66 @type maxmem: string 67 @param maxmem: the maximum memory size of the instance 68 @type vcpus: string 69 @param vcpus: the count of VCPUs the instance has 70 @type nics: list 71 @param nics: list of tuples (name, uuid, ip, mac, mode, link, vlan, net, 72 netinfo) representing the NICs the instance has 73 @type disk_template: string 74 @param disk_template: the disk template of the instance 75 @type disks: list 76 @param disks: list of disks (either objects.Disk or dict) 77 @type bep: dict 78 @param bep: the backend parameters for the instance 79 @type hvp: dict 80 @param hvp: the hypervisor parameters for the instance 81 @type hypervisor_name: string 82 @param hypervisor_name: the hypervisor for the instance 83 @type tags: list 84 @param tags: list of instance tags as strings 85 @rtype: dict 86 @return: the hook environment for this instance 87 88 """ 89 env = { 90 "OP_TARGET": name, 91 "INSTANCE_NAME": name, 92 "INSTANCE_PRIMARY": primary_node_name, 93 "INSTANCE_SECONDARIES": " ".join(secondary_node_names), 94 "INSTANCE_OS_TYPE": os_type, 95 "INSTANCE_STATUS": status, 96 "INSTANCE_MINMEM": minmem, 97 "INSTANCE_MAXMEM": maxmem, 98 # TODO(2.9) remove deprecated "memory" value 99 "INSTANCE_MEMORY": maxmem, 100 "INSTANCE_VCPUS": vcpus, 101 "INSTANCE_DISK_TEMPLATE": disk_template, 102 "INSTANCE_HYPERVISOR": hypervisor_name, 103 } 104 if nics: 105 nic_count = len(nics) 106 for idx, (name, uuid, ip, mac, mode, link, vlan, net, netinfo) \ 107 in enumerate(nics): 108 if ip is None: 109 ip = "" 110 if name: 111 env["INSTANCE_NIC%d_NAME" % idx] = name 112 env["INSTANCE_NIC%d_UUID" % idx] = uuid 113 env["INSTANCE_NIC%d_IP" % idx] = ip 114 env["INSTANCE_NIC%d_MAC" % idx] = mac 115 env["INSTANCE_NIC%d_MODE" % idx] = mode 116 env["INSTANCE_NIC%d_LINK" % idx] = link 117 env["INSTANCE_NIC%d_VLAN" % idx] = vlan 118 if netinfo: 119 nobj = objects.Network.FromDict(netinfo) 120 env.update(nobj.HooksDict("INSTANCE_NIC%d_" % idx)) 121 elif network: 122 # FIXME: broken network reference: the instance NIC specifies a 123 # network, but the relevant network entry was not in the config. This 124 # should be made impossible. 125 env["INSTANCE_NIC%d_NETWORK_NAME" % idx] = net 126 if mode == constants.NIC_MODE_BRIDGED or \ 127 mode == constants.NIC_MODE_OVS: 128 env["INSTANCE_NIC%d_BRIDGE" % idx] = link 129 else: 130 nic_count = 0 131 132 env["INSTANCE_NIC_COUNT"] = nic_count 133 134 if disks: 135 disk_count = len(disks) 136 for idx, disk in enumerate(disks): 137 env.update(BuildDiskEnv(idx, disk)) 138 else: 139 disk_count = 0 140 141 env["INSTANCE_DISK_COUNT"] = disk_count 142 143 if not tags: 144 tags = [] 145 146 env["INSTANCE_TAGS"] = " ".join(tags) 147 148 for source, kind in [(bep, "BE"), (hvp, "HV")]: 149 for key, value in source.items(): 150 env["INSTANCE_%s_%s" % (kind, key)] = value 151 152 return env
153 154
155 -def BuildInstanceHookEnvByObject(lu, instance, secondary_nodes=None, 156 disks=None, override=None):
157 """Builds instance related env variables for hooks from an object. 158 159 @type lu: L{LogicalUnit} 160 @param lu: the logical unit on whose behalf we execute 161 @type instance: L{objects.Instance} 162 @param instance: the instance for which we should build the 163 environment 164 @type override: dict 165 @param override: dictionary with key/values that will override 166 our values 167 @rtype: dict 168 @return: the hook environment dictionary 169 170 """ 171 cluster = lu.cfg.GetClusterInfo() 172 bep = cluster.FillBE(instance) 173 hvp = cluster.FillHV(instance) 174 175 # Override secondary_nodes 176 if secondary_nodes is None: 177 secondary_nodes = lu.cfg.GetInstanceSecondaryNodes(instance.uuid) 178 179 # Override disks 180 if disks is None: 181 disks = lu.cfg.GetInstanceDisks(instance.uuid) 182 183 args = { 184 "name": instance.name, 185 "primary_node_name": lu.cfg.GetNodeName(instance.primary_node), 186 "secondary_node_names": lu.cfg.GetNodeNames(secondary_nodes), 187 "os_type": instance.os, 188 "status": instance.admin_state, 189 "maxmem": bep[constants.BE_MAXMEM], 190 "minmem": bep[constants.BE_MINMEM], 191 "vcpus": bep[constants.BE_VCPUS], 192 "nics": NICListToTuple(lu, instance.nics), 193 "disk_template": instance.disk_template, 194 "disks": disks, 195 "bep": bep, 196 "hvp": hvp, 197 "hypervisor_name": instance.hypervisor, 198 "tags": instance.tags, 199 } 200 if override: 201 args.update(override) 202 return BuildInstanceHookEnv(**args) # pylint: disable=W0142
203 204
205 -def GetClusterDomainSecret():
206 """Reads the cluster domain secret. 207 208 """ 209 return utils.ReadOneLineFile(pathutils.CLUSTER_DOMAIN_SECRET_FILE, 210 strict=True)
211 212
213 -def CheckNodeNotDrained(lu, node_uuid):
214 """Ensure that a given node is not drained. 215 216 @param lu: the LU on behalf of which we make the check 217 @param node_uuid: the node to check 218 @raise errors.OpPrereqError: if the node is drained 219 220 """ 221 node = lu.cfg.GetNodeInfo(node_uuid) 222 if node.drained: 223 raise errors.OpPrereqError("Can't use drained node %s" % node.name, 224 errors.ECODE_STATE)
225 226
227 -def CheckNodeVmCapable(lu, node_uuid):
228 """Ensure that a given node is vm capable. 229 230 @param lu: the LU on behalf of which we make the check 231 @param node_uuid: the node to check 232 @raise errors.OpPrereqError: if the node is not vm capable 233 234 """ 235 if not lu.cfg.GetNodeInfo(node_uuid).vm_capable: 236 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node_uuid, 237 errors.ECODE_STATE)
238 239
240 -def RemoveInstance(lu, feedback_fn, instance, ignore_failures):
241 """Utility function to remove an instance. 242 243 """ 244 logging.info("Removing block devices for instance %s", instance.name) 245 246 if not RemoveDisks(lu, instance, ignore_failures=ignore_failures): 247 if not ignore_failures: 248 raise errors.OpExecError("Can't remove instance's disks") 249 feedback_fn("Warning: can't remove instance's disks") 250 251 logging.info("Removing instance's disks") 252 for disk in instance.disks: 253 lu.cfg.RemoveInstanceDisk(instance.uuid, disk) 254 255 logging.info("Removing instance %s out of cluster config", instance.name) 256 lu.cfg.RemoveInstance(instance.uuid)
257 258
259 -def RemoveDisks(lu, instance, disk_template=None, disks=None, 260 target_node_uuid=None, ignore_failures=False):
261 """Remove all or a subset of disks for an instance. 262 263 This abstracts away some work from `AddInstance()` and 264 `RemoveInstance()`. Note that in case some of the devices couldn't 265 be removed, the removal will continue with the other ones. 266 267 This function is also used by the disk template conversion mechanism to 268 remove the old block devices of the instance. Since the instance has 269 changed its template at the time we remove the original disks, we must 270 specify the template of the disks we are about to remove as an argument. 271 272 @type lu: L{LogicalUnit} 273 @param lu: the logical unit on whose behalf we execute 274 @type instance: L{objects.Instance} 275 @param instance: the instance whose disks we should remove 276 @type disk_template: string 277 @param disk_template: if passed, overrides the instance's disk_template 278 @type disks: list of L{objects.Disk} 279 @param disks: the disks to remove; if not specified, all the disks of the 280 instance are removed 281 @type target_node_uuid: string 282 @param target_node_uuid: used to override the node on which to remove the 283 disks 284 @rtype: boolean 285 @return: the success of the removal 286 287 """ 288 logging.info("Removing block devices for instance %s", instance.name) 289 290 all_result = True 291 ports_to_release = set() 292 293 disk_count = len(instance.disks) 294 if disks is None: 295 disks = lu.cfg.GetInstanceDisks(instance.uuid) 296 297 if disk_template is None: 298 disk_template = instance.disk_template 299 300 anno_disks = AnnotateDiskParams(instance, disks, lu.cfg) 301 for (idx, device) in enumerate(anno_disks): 302 if target_node_uuid: 303 edata = [(target_node_uuid, device)] 304 else: 305 edata = device.ComputeNodeTree(instance.primary_node) 306 for node_uuid, disk in edata: 307 result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance)) 308 if result.fail_msg: 309 lu.LogWarning("Could not remove disk %s on node %s," 310 " continuing anyway: %s", idx, 311 lu.cfg.GetNodeName(node_uuid), result.fail_msg) 312 if not (result.offline and node_uuid != instance.primary_node): 313 all_result = False 314 315 # if this is a DRBD disk, return its port to the pool 316 if device.dev_type in constants.DTS_DRBD: 317 ports_to_release.add(device.logical_id[2]) 318 319 if all_result or ignore_failures: 320 for port in ports_to_release: 321 lu.cfg.AddTcpUdpPort(port) 322 323 CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), disk_template) 324 325 if (len(disks) == disk_count and 326 disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]): 327 if len(disks) > 0: 328 file_storage_dir = os.path.dirname(disks[0].logical_id[1]) 329 else: 330 if disk_template == constants.DT_SHARED_FILE: 331 file_storage_dir = utils.PathJoin(lu.cfg.GetSharedFileStorageDir(), 332 instance.name) 333 else: 334 file_storage_dir = utils.PathJoin(lu.cfg.GetFileStorageDir(), 335 instance.name) 336 if target_node_uuid: 337 tgt = target_node_uuid 338 else: 339 tgt = instance.primary_node 340 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir) 341 if result.fail_msg: 342 lu.LogWarning("Could not remove directory '%s' on node %s: %s", 343 file_storage_dir, lu.cfg.GetNodeName(tgt), result.fail_msg) 344 all_result = False 345 346 return all_result
347 348
349 -def NICToTuple(lu, nic):
350 """Build a tupple of nic information. 351 352 @type lu: L{LogicalUnit} 353 @param lu: the logical unit on whose behalf we execute 354 @type nic: L{objects.NIC} 355 @param nic: nic to convert to hooks tuple 356 357 """ 358 cluster = lu.cfg.GetClusterInfo() 359 filled_params = cluster.SimpleFillNIC(nic.nicparams) 360 mode = filled_params[constants.NIC_MODE] 361 link = filled_params[constants.NIC_LINK] 362 vlan = filled_params[constants.NIC_VLAN] 363 netinfo = None 364 if nic.network: 365 nobj = lu.cfg.GetNetwork(nic.network) 366 netinfo = objects.Network.ToDict(nobj) 367 return (nic.name, nic.uuid, nic.ip, nic.mac, mode, link, vlan, 368 nic.network, netinfo)
369 370
371 -def NICListToTuple(lu, nics):
372 """Build a list of nic information tuples. 373 374 This list is suitable to be passed to _BuildInstanceHookEnv or as a return 375 value in LUInstanceQueryData. 376 377 @type lu: L{LogicalUnit} 378 @param lu: the logical unit on whose behalf we execute 379 @type nics: list of L{objects.NIC} 380 @param nics: list of nics to convert to hooks tuples 381 382 """ 383 hooks_nics = [] 384 for nic in nics: 385 hooks_nics.append(NICToTuple(lu, nic)) 386 return hooks_nics
387 388
389 -def CopyLockList(names):
390 """Makes a copy of a list of lock names. 391 392 Handles L{locking.ALL_SET} correctly. 393 394 """ 395 if names == locking.ALL_SET: 396 return locking.ALL_SET 397 else: 398 return names[:]
399 400
401 -def ReleaseLocks(lu, level, names=None, keep=None):
402 """Releases locks owned by an LU. 403 404 @type lu: L{LogicalUnit} 405 @param level: Lock level 406 @type names: list or None 407 @param names: Names of locks to release 408 @type keep: list or None 409 @param keep: Names of locks to retain 410 411 """ 412 logging.debug("Lu %s ReleaseLocks %s names=%s, keep=%s", 413 lu.wconfdcontext, level, names, keep) 414 assert not (keep is not None and names is not None), \ 415 "Only one of the 'names' and the 'keep' parameters can be given" 416 417 if names is not None: 418 should_release = names.__contains__ 419 elif keep: 420 should_release = lambda name: name not in keep 421 else: 422 should_release = None 423 424 levelname = locking.LEVEL_NAMES[level] 425 426 owned = lu.owned_locks(level) 427 if not owned: 428 # Not owning any lock at this level, do nothing 429 pass 430 431 elif should_release: 432 retain = [] 433 release = [] 434 435 # Determine which locks to release 436 for name in owned: 437 if should_release(name): 438 release.append(name) 439 else: 440 retain.append(name) 441 442 assert len(lu.owned_locks(level)) == (len(retain) + len(release)) 443 444 # Release just some locks 445 lu.WConfdClient().TryUpdateLocks( 446 lu.release_request(level, release)) 447 assert frozenset(lu.owned_locks(level)) == frozenset(retain) 448 else: 449 lu.WConfdClient().FreeLocksLevel(levelname)
450 451
452 -def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group, 453 target_group, cfg, 454 _compute_fn=ComputeIPolicyInstanceViolation):
455 """Compute if instance meets the specs of the new target group. 456 457 @param ipolicy: The ipolicy to verify 458 @param instance: The instance object to verify 459 @param current_group: The current group of the instance 460 @param target_group: The new group of the instance 461 @type cfg: L{config.ConfigWriter} 462 @param cfg: Cluster configuration 463 @param _compute_fn: The function to verify ipolicy (unittest only) 464 @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation} 465 466 """ 467 if current_group == target_group: 468 return [] 469 else: 470 return _compute_fn(ipolicy, instance, cfg)
471 472
473 -def CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False, 474 _compute_fn=_ComputeIPolicyNodeViolation):
475 """Checks that the target node is correct in terms of instance policy. 476 477 @param ipolicy: The ipolicy to verify 478 @param instance: The instance object to verify 479 @param node: The new node to relocate 480 @type cfg: L{config.ConfigWriter} 481 @param cfg: Cluster configuration 482 @param ignore: Ignore violations of the ipolicy 483 @param _compute_fn: The function to verify ipolicy (unittest only) 484 @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation} 485 486 """ 487 primary_node = lu.cfg.GetNodeInfo(instance.primary_node) 488 res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg) 489 490 if res: 491 msg = ("Instance does not meet target node group's (%s) instance" 492 " policy: %s") % (node.group, utils.CommaJoin(res)) 493 if ignore: 494 lu.LogWarning(msg) 495 else: 496 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
497 498
499 -def GetInstanceInfoText(instance):
500 """Compute that text that should be added to the disk's metadata. 501 502 """ 503 return "originstname+%s" % instance.name
504 505
506 -def CheckNodeFreeMemory(lu, node_uuid, reason, requested, hvname, hvparams):
507 """Checks if a node has enough free memory. 508 509 This function checks if a given node has the needed amount of free 510 memory. In case the node has less memory or we cannot get the 511 information from the node, this function raises an OpPrereqError 512 exception. 513 514 @type lu: C{LogicalUnit} 515 @param lu: a logical unit from which we get configuration data 516 @type node_uuid: C{str} 517 @param node_uuid: the node to check 518 @type reason: C{str} 519 @param reason: string to use in the error message 520 @type requested: C{int} 521 @param requested: the amount of memory in MiB to check for 522 @type hvname: string 523 @param hvname: the hypervisor's name 524 @type hvparams: dict of strings 525 @param hvparams: the hypervisor's parameters 526 @rtype: integer 527 @return: node current free memory 528 @raise errors.OpPrereqError: if the node doesn't have enough memory, or 529 we cannot check the node 530 531 """ 532 node_name = lu.cfg.GetNodeName(node_uuid) 533 nodeinfo = lu.rpc.call_node_info([node_uuid], None, [(hvname, hvparams)]) 534 nodeinfo[node_uuid].Raise("Can't get data from node %s" % node_name, 535 prereq=True, ecode=errors.ECODE_ENVIRON) 536 (_, _, (hv_info, )) = nodeinfo[node_uuid].payload 537 538 free_mem = hv_info.get("memory_free", None) 539 if not isinstance(free_mem, int): 540 raise errors.OpPrereqError("Can't compute free memory on node %s, result" 541 " was '%s'" % (node_name, free_mem), 542 errors.ECODE_ENVIRON) 543 if requested > free_mem: 544 raise errors.OpPrereqError("Not enough memory on node %s for %s:" 545 " needed %s MiB, available %s MiB" % 546 (node_name, reason, requested, free_mem), 547 errors.ECODE_NORES) 548 return free_mem
549 550
551 -def CheckInstanceBridgesExist(lu, instance, node_uuid=None):
552 """Check that the brigdes needed by an instance exist. 553 554 """ 555 if node_uuid is None: 556 node_uuid = instance.primary_node 557 CheckNicsBridgesExist(lu, instance.nics, node_uuid)
558 559
560 -def CheckNicsBridgesExist(lu, nics, node_uuid):
561 """Check that the brigdes needed by a list of nics exist. 562 563 """ 564 cluster = lu.cfg.GetClusterInfo() 565 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in nics] 566 brlist = [params[constants.NIC_LINK] for params in paramslist 567 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED] 568 if brlist: 569 result = lu.rpc.call_bridges_exist(node_uuid, brlist) 570 result.Raise("Error checking bridges on destination node '%s'" % 571 lu.cfg.GetNodeName(node_uuid), prereq=True, 572 ecode=errors.ECODE_ENVIRON)
573 574
575 -def UpdateMetadata(feedback_fn, rpc, instance, 576 osparams_public=None, 577 osparams_private=None, 578 osparams_secret=None):
579 """Updates instance metadata on the metadata daemon on the 580 instance's primary node. 581 582 If the daemon isn't available (not compiled), do nothing. 583 584 In case the RPC fails, this function simply issues a warning and 585 proceeds normally. 586 587 @type feedback_fn: callable 588 @param feedback_fn: function used send feedback back to the caller 589 590 @type rpc: L{rpc.node.RpcRunner} 591 @param rpc: RPC runner 592 593 @type instance: L{objects.Instance} 594 @param instance: instance for which the metadata should be updated 595 596 @type osparams_public: NoneType or dict 597 @param osparams_public: public OS parameters used to override those 598 defined in L{instance} 599 600 @type osparams_private: NoneType or dict 601 @param osparams_private: private OS parameters used to override those 602 defined in L{instance} 603 604 @type osparams_secret: NoneType or dict 605 @param osparams_secret: secret OS parameters used to override those 606 defined in L{instance} 607 608 @rtype: NoneType 609 @return: None 610 611 """ 612 if not constants.ENABLE_METAD: 613 return 614 615 data = instance.ToDict() 616 617 if osparams_public is not None: 618 data["osparams_public"] = osparams_public 619 620 if osparams_private is not None: 621 data["osparams_private"] = osparams_private 622 623 if osparams_secret is not None: 624 data["osparams_secret"] = osparams_secret 625 else: 626 data["osparams_secret"] = {} 627 628 result = rpc.call_instance_metadata_modify(instance.primary_node, data) 629 result.Warn("Could not update metadata for instance '%s'" % instance.name, 630 feedback_fn)
631 632
633 -def CheckCompressionTool(lu, compression_tool):
634 """ Checks if the provided compression tool is allowed to be used. 635 636 @type compression_tool: string 637 @param compression_tool: Compression tool to use for importing or exporting 638 the instance 639 640 @rtype: NoneType 641 @return: None 642 643 @raise errors.OpPrereqError: If the tool is not enabled by Ganeti or 644 whitelisted 645 646 """ 647 allowed_tools = lu.cfg.GetCompressionTools() 648 if (compression_tool != constants.IEC_NONE and 649 compression_tool not in allowed_tools): 650 raise errors.OpPrereqError( 651 "Compression tool not allowed, tools allowed are [%s]" 652 % ", ".join(allowed_tools), errors.ECODE_INVAL 653 )
654 655
656 -def BuildDiskLogicalIDEnv(idx, disk):
657 """Helper method to create hooks env related to disk's logical_id 658 659 @type idx: integer 660 @param idx: The index of the disk 661 @type disk: L{objects.Disk} 662 @param disk: The disk object 663 664 """ 665 if disk.dev_type == constants.DT_PLAIN: 666 vg, name = disk.logical_id 667 ret = { 668 "INSTANCE_DISK%d_VG" % idx: vg, 669 "INSTANCE_DISK%d_ID" % idx: name 670 } 671 elif disk.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE): 672 file_driver, name = disk.logical_id 673 ret = { 674 "INSTANCE_DISK%d_DRIVER" % idx: file_driver, 675 "INSTANCE_DISK%d_ID" % idx: name 676 } 677 elif disk.dev_type == constants.DT_BLOCK: 678 block_driver, adopt = disk.logical_id 679 ret = { 680 "INSTANCE_DISK%d_DRIVER" % idx: block_driver, 681 "INSTANCE_DISK%d_ID" % idx: adopt 682 } 683 elif disk.dev_type == constants.DT_RBD: 684 rbd, name = disk.logical_id 685 ret = { 686 "INSTANCE_DISK%d_DRIVER" % idx: rbd, 687 "INSTANCE_DISK%d_ID" % idx: name 688 } 689 elif disk.dev_type == constants.DT_EXT: 690 provider, name = disk.logical_id 691 ret = { 692 "INSTANCE_DISK%d_PROVIDER" % idx: provider, 693 "INSTANCE_DISK%d_ID" % idx: name 694 } 695 elif disk.dev_type == constants.DT_DRBD8: 696 pnode, snode, port, pmin, smin, _ = disk.logical_id 697 data, meta = disk.children 698 data_vg, data_name = data.logical_id 699 meta_vg, meta_name = meta.logical_id 700 ret = { 701 "INSTANCE_DISK%d_PNODE" % idx: pnode, 702 "INSTANCE_DISK%d_SNODE" % idx: snode, 703 "INSTANCE_DISK%d_PORT" % idx: port, 704 "INSTANCE_DISK%d_PMINOR" % idx: pmin, 705 "INSTANCE_DISK%d_SMINOR" % idx: smin, 706 "INSTANCE_DISK%d_DATA_VG" % idx: data_vg, 707 "INSTANCE_DISK%d_DATA_ID" % idx: data_name, 708 "INSTANCE_DISK%d_META_VG" % idx: meta_vg, 709 "INSTANCE_DISK%d_META_ID" % idx: meta_name, 710 } 711 elif disk.dev_type == constants.DT_GLUSTER: 712 file_driver, name = disk.logical_id 713 ret = { 714 "INSTANCE_DISK%d_DRIVER" % idx: file_driver, 715 "INSTANCE_DISK%d_ID" % idx: name 716 } 717 elif disk.dev_type == constants.DT_DISKLESS: 718 ret = {} 719 else: 720 ret = {} 721 722 ret.update({ 723 "INSTANCE_DISK%d_DEV_TYPE" % idx: disk.dev_type 724 }) 725 726 return ret
727 728
729 -def BuildDiskEnv(idx, disk):
730 """Helper method to create disk's hooks env 731 732 @type idx: integer 733 @param idx: The index of the disk 734 @type disk: L{objects.Disk} or dict 735 @param disk: The disk object or a simple dict in case of LUInstanceCreate 736 737 """ 738 ret = {} 739 # In case of LUInstanceCreate this runs in CheckPrereq where lu.disks 740 # is a list of dicts i.e the result of ComputeDisks 741 if isinstance(disk, dict): 742 uuid = disk.get("uuid", "") 743 name = disk.get(constants.IDISK_NAME, "") 744 size = disk.get(constants.IDISK_SIZE, "") 745 mode = disk.get(constants.IDISK_MODE, "") 746 elif isinstance(disk, objects.Disk): 747 uuid = disk.uuid 748 name = disk.name 749 size = disk.size 750 mode = disk.mode 751 ret.update(BuildDiskLogicalIDEnv(idx, disk)) 752 753 # only name is optional here 754 if name: 755 ret["INSTANCE_DISK%d_NAME" % idx] = name 756 ret["INSTANCE_DISK%d_UUID" % idx] = uuid 757 ret["INSTANCE_DISK%d_SIZE" % idx] = size 758 ret["INSTANCE_DISK%d_MODE" % idx] = mode 759 760 return ret
761 762
763 -def CheckInstanceExistence(lu, instance_name):
764 """Raises an error if an instance with the given name exists already. 765 766 @type instance_name: string 767 @param instance_name: The name of the instance. 768 769 To be used in the locking phase. 770 771 """ 772 if instance_name in \ 773 [inst.name for inst in lu.cfg.GetAllInstancesInfo().values()]: 774 raise errors.OpPrereqError("Instance '%s' is already in the cluster" % 775 instance_name, errors.ECODE_EXISTS)
776