Package ganeti :: Package cmdlib :: Module instance_utils
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cmdlib.instance_utils

  1  # 
  2  # 
  3   
  4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc. 
  5  # All rights reserved. 
  6  # 
  7  # Redistribution and use in source and binary forms, with or without 
  8  # modification, are permitted provided that the following conditions are 
  9  # met: 
 10  # 
 11  # 1. Redistributions of source code must retain the above copyright notice, 
 12  # this list of conditions and the following disclaimer. 
 13  # 
 14  # 2. Redistributions in binary form must reproduce the above copyright 
 15  # notice, this list of conditions and the following disclaimer in the 
 16  # documentation and/or other materials provided with the distribution. 
 17  # 
 18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
 19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
 20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
 21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
 22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
 23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
 24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
 25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
 26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
 27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
 28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 29   
 30   
 31  """Utility function mainly, but not only used by instance LU's.""" 
 32   
 33  import logging 
 34  import os 
 35   
 36  from ganeti import constants 
 37  from ganeti import errors 
 38  from ganeti import locking 
 39  from ganeti import network 
 40  from ganeti import objects 
 41  from ganeti import pathutils 
 42  from ganeti import utils 
 43  from ganeti.cmdlib.common import AnnotateDiskParams, \ 
 44    ComputeIPolicyInstanceViolation, CheckDiskTemplateEnabled 
 45   
 46   
47 -def BuildInstanceHookEnv(name, primary_node_name, secondary_node_names, os_type, 48 status, minmem, maxmem, vcpus, nics, disk_template, 49 disks, bep, hvp, hypervisor_name, tags):
50 """Builds instance related env variables for hooks 51 52 This builds the hook environment from individual variables. 53 54 @type name: string 55 @param name: the name of the instance 56 @type primary_node_name: string 57 @param primary_node_name: the name of the instance's primary node 58 @type secondary_node_names: list 59 @param secondary_node_names: list of secondary nodes as strings 60 @type os_type: string 61 @param os_type: the name of the instance's OS 62 @type status: string 63 @param status: the desired status of the instance 64 @type minmem: string 65 @param minmem: the minimum memory size of the instance 66 @type maxmem: string 67 @param maxmem: the maximum memory size of the instance 68 @type vcpus: string 69 @param vcpus: the count of VCPUs the instance has 70 @type nics: list 71 @param nics: list of tuples (name, uuid, ip, mac, mode, link, vlan, net, 72 netinfo) representing the NICs the instance has 73 @type disk_template: string 74 @param disk_template: the disk template of the instance 75 @type disks: list 76 @param disks: list of tuples (name, uuid, size, mode) 77 @type bep: dict 78 @param bep: the backend parameters for the instance 79 @type hvp: dict 80 @param hvp: the hypervisor parameters for the instance 81 @type hypervisor_name: string 82 @param hypervisor_name: the hypervisor for the instance 83 @type tags: list 84 @param tags: list of instance tags as strings 85 @rtype: dict 86 @return: the hook environment for this instance 87 88 """ 89 env = { 90 "OP_TARGET": name, 91 "INSTANCE_NAME": name, 92 "INSTANCE_PRIMARY": primary_node_name, 93 "INSTANCE_SECONDARIES": " ".join(secondary_node_names), 94 "INSTANCE_OS_TYPE": os_type, 95 "INSTANCE_STATUS": status, 96 "INSTANCE_MINMEM": minmem, 97 "INSTANCE_MAXMEM": maxmem, 98 # TODO(2.9) remove deprecated "memory" value 99 "INSTANCE_MEMORY": maxmem, 100 "INSTANCE_VCPUS": vcpus, 101 "INSTANCE_DISK_TEMPLATE": disk_template, 102 "INSTANCE_HYPERVISOR": hypervisor_name, 103 } 104 if nics: 105 nic_count = len(nics) 106 for idx, (name, uuid, ip, mac, mode, link, vlan, net, netinfo) \ 107 in enumerate(nics): 108 if ip is None: 109 ip = "" 110 if name: 111 env["INSTANCE_NIC%d_NAME" % idx] = name 112 env["INSTANCE_NIC%d_UUID" % idx] = uuid 113 env["INSTANCE_NIC%d_IP" % idx] = ip 114 env["INSTANCE_NIC%d_MAC" % idx] = mac 115 env["INSTANCE_NIC%d_MODE" % idx] = mode 116 env["INSTANCE_NIC%d_LINK" % idx] = link 117 env["INSTANCE_NIC%d_VLAN" % idx] = vlan 118 if netinfo: 119 nobj = objects.Network.FromDict(netinfo) 120 env.update(nobj.HooksDict("INSTANCE_NIC%d_" % idx)) 121 elif network: 122 # FIXME: broken network reference: the instance NIC specifies a 123 # network, but the relevant network entry was not in the config. This 124 # should be made impossible. 125 env["INSTANCE_NIC%d_NETWORK_NAME" % idx] = net 126 if mode == constants.NIC_MODE_BRIDGED or \ 127 mode == constants.NIC_MODE_OVS: 128 env["INSTANCE_NIC%d_BRIDGE" % idx] = link 129 else: 130 nic_count = 0 131 132 env["INSTANCE_NIC_COUNT"] = nic_count 133 134 if disks: 135 disk_count = len(disks) 136 for idx, (name, uuid, size, mode) in enumerate(disks): 137 if name: 138 env["INSTANCE_DISK%d_NAME" % idx] = name 139 env["INSTANCE_DISK%d_UUID" % idx] = uuid 140 env["INSTANCE_DISK%d_SIZE" % idx] = size 141 env["INSTANCE_DISK%d_MODE" % idx] = mode 142 else: 143 disk_count = 0 144 145 env["INSTANCE_DISK_COUNT"] = disk_count 146 147 if not tags: 148 tags = [] 149 150 env["INSTANCE_TAGS"] = " ".join(tags) 151 152 for source, kind in [(bep, "BE"), (hvp, "HV")]: 153 for key, value in source.items(): 154 env["INSTANCE_%s_%s" % (kind, key)] = value 155 156 return env
157 158
159 -def BuildInstanceHookEnvByObject(lu, instance, secondary_nodes=None, 160 disks=None, override=None):
161 """Builds instance related env variables for hooks from an object. 162 163 @type lu: L{LogicalUnit} 164 @param lu: the logical unit on whose behalf we execute 165 @type instance: L{objects.Instance} 166 @param instance: the instance for which we should build the 167 environment 168 @type override: dict 169 @param override: dictionary with key/values that will override 170 our values 171 @rtype: dict 172 @return: the hook environment dictionary 173 174 """ 175 cluster = lu.cfg.GetClusterInfo() 176 bep = cluster.FillBE(instance) 177 hvp = cluster.FillHV(instance) 178 179 # Override secondary_nodes 180 if secondary_nodes is None: 181 secondary_nodes = lu.cfg.GetInstanceSecondaryNodes(instance.uuid) 182 183 # Override disks 184 if disks is None: 185 disks = lu.cfg.GetInstanceDisks(instance.uuid) 186 187 args = { 188 "name": instance.name, 189 "primary_node_name": lu.cfg.GetNodeName(instance.primary_node), 190 "secondary_node_names": lu.cfg.GetNodeNames(secondary_nodes), 191 "os_type": instance.os, 192 "status": instance.admin_state, 193 "maxmem": bep[constants.BE_MAXMEM], 194 "minmem": bep[constants.BE_MINMEM], 195 "vcpus": bep[constants.BE_VCPUS], 196 "nics": NICListToTuple(lu, instance.nics), 197 "disk_template": instance.disk_template, 198 "disks": [(disk.name, disk.uuid, disk.size, disk.mode) 199 for disk in disks], 200 "bep": bep, 201 "hvp": hvp, 202 "hypervisor_name": instance.hypervisor, 203 "tags": instance.tags, 204 } 205 if override: 206 args.update(override) 207 return BuildInstanceHookEnv(**args) # pylint: disable=W0142
208 209
210 -def GetClusterDomainSecret():
211 """Reads the cluster domain secret. 212 213 """ 214 return utils.ReadOneLineFile(pathutils.CLUSTER_DOMAIN_SECRET_FILE, 215 strict=True)
216 217
218 -def CheckNodeNotDrained(lu, node_uuid):
219 """Ensure that a given node is not drained. 220 221 @param lu: the LU on behalf of which we make the check 222 @param node_uuid: the node to check 223 @raise errors.OpPrereqError: if the node is drained 224 225 """ 226 node = lu.cfg.GetNodeInfo(node_uuid) 227 if node.drained: 228 raise errors.OpPrereqError("Can't use drained node %s" % node.name, 229 errors.ECODE_STATE)
230 231
232 -def CheckNodeVmCapable(lu, node_uuid):
233 """Ensure that a given node is vm capable. 234 235 @param lu: the LU on behalf of which we make the check 236 @param node_uuid: the node to check 237 @raise errors.OpPrereqError: if the node is not vm capable 238 239 """ 240 if not lu.cfg.GetNodeInfo(node_uuid).vm_capable: 241 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node_uuid, 242 errors.ECODE_STATE)
243 244
245 -def RemoveInstance(lu, feedback_fn, instance, ignore_failures):
246 """Utility function to remove an instance. 247 248 """ 249 logging.info("Removing block devices for instance %s", instance.name) 250 251 if not RemoveDisks(lu, instance, ignore_failures=ignore_failures): 252 if not ignore_failures: 253 raise errors.OpExecError("Can't remove instance's disks") 254 feedback_fn("Warning: can't remove instance's disks") 255 256 logging.info("Removing instance's disks") 257 for disk in instance.disks: 258 lu.cfg.RemoveInstanceDisk(instance.uuid, disk) 259 260 logging.info("Removing instance %s out of cluster config", instance.name) 261 lu.cfg.RemoveInstance(instance.uuid)
262 263
264 -def RemoveDisks(lu, instance, target_node_uuid=None, ignore_failures=False):
265 """Remove all disks for an instance. 266 267 This abstracts away some work from `AddInstance()` and 268 `RemoveInstance()`. Note that in case some of the devices couldn't 269 be removed, the removal will continue with the other ones. 270 271 @type lu: L{LogicalUnit} 272 @param lu: the logical unit on whose behalf we execute 273 @type instance: L{objects.Instance} 274 @param instance: the instance whose disks we should remove 275 @type target_node_uuid: string 276 @param target_node_uuid: used to override the node on which to remove the 277 disks 278 @rtype: boolean 279 @return: the success of the removal 280 281 """ 282 logging.info("Removing block devices for instance %s", instance.name) 283 284 all_result = True 285 ports_to_release = set() 286 inst_disks = lu.cfg.GetInstanceDisks(instance.uuid) 287 anno_disks = AnnotateDiskParams(instance, inst_disks, lu.cfg) 288 for (idx, device) in enumerate(anno_disks): 289 if target_node_uuid: 290 edata = [(target_node_uuid, device)] 291 else: 292 edata = device.ComputeNodeTree(instance.primary_node) 293 for node_uuid, disk in edata: 294 result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance)) 295 if result.fail_msg: 296 lu.LogWarning("Could not remove disk %s on node %s," 297 " continuing anyway: %s", idx, 298 lu.cfg.GetNodeName(node_uuid), result.fail_msg) 299 if not (result.offline and node_uuid != instance.primary_node): 300 all_result = False 301 302 # if this is a DRBD disk, return its port to the pool 303 if device.dev_type in constants.DTS_DRBD: 304 ports_to_release.add(device.logical_id[2]) 305 306 if all_result or ignore_failures: 307 for port in ports_to_release: 308 lu.cfg.AddTcpUdpPort(port) 309 310 CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template) 311 312 if instance.disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]: 313 if len(inst_disks) > 0: 314 file_storage_dir = os.path.dirname(inst_disks[0].logical_id[1]) 315 else: 316 if instance.disk_template == constants.DT_SHARED_FILE: 317 file_storage_dir = utils.PathJoin(lu.cfg.GetSharedFileStorageDir(), 318 instance.name) 319 else: 320 file_storage_dir = utils.PathJoin(lu.cfg.GetFileStorageDir(), 321 instance.name) 322 if target_node_uuid: 323 tgt = target_node_uuid 324 else: 325 tgt = instance.primary_node 326 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir) 327 if result.fail_msg: 328 lu.LogWarning("Could not remove directory '%s' on node %s: %s", 329 file_storage_dir, lu.cfg.GetNodeName(tgt), result.fail_msg) 330 all_result = False 331 332 return all_result
333 334
335 -def NICToTuple(lu, nic):
336 """Build a tupple of nic information. 337 338 @type lu: L{LogicalUnit} 339 @param lu: the logical unit on whose behalf we execute 340 @type nic: L{objects.NIC} 341 @param nic: nic to convert to hooks tuple 342 343 """ 344 cluster = lu.cfg.GetClusterInfo() 345 filled_params = cluster.SimpleFillNIC(nic.nicparams) 346 mode = filled_params[constants.NIC_MODE] 347 link = filled_params[constants.NIC_LINK] 348 vlan = filled_params[constants.NIC_VLAN] 349 netinfo = None 350 if nic.network: 351 nobj = lu.cfg.GetNetwork(nic.network) 352 netinfo = objects.Network.ToDict(nobj) 353 return (nic.name, nic.uuid, nic.ip, nic.mac, mode, link, vlan, 354 nic.network, netinfo)
355 356
357 -def NICListToTuple(lu, nics):
358 """Build a list of nic information tuples. 359 360 This list is suitable to be passed to _BuildInstanceHookEnv or as a return 361 value in LUInstanceQueryData. 362 363 @type lu: L{LogicalUnit} 364 @param lu: the logical unit on whose behalf we execute 365 @type nics: list of L{objects.NIC} 366 @param nics: list of nics to convert to hooks tuples 367 368 """ 369 hooks_nics = [] 370 for nic in nics: 371 hooks_nics.append(NICToTuple(lu, nic)) 372 return hooks_nics
373 374
375 -def CopyLockList(names):
376 """Makes a copy of a list of lock names. 377 378 Handles L{locking.ALL_SET} correctly. 379 380 """ 381 if names == locking.ALL_SET: 382 return locking.ALL_SET 383 else: 384 return names[:]
385 386
387 -def ReleaseLocks(lu, level, names=None, keep=None):
388 """Releases locks owned by an LU. 389 390 @type lu: L{LogicalUnit} 391 @param level: Lock level 392 @type names: list or None 393 @param names: Names of locks to release 394 @type keep: list or None 395 @param keep: Names of locks to retain 396 397 """ 398 logging.debug("Lu %s ReleaseLocks %s names=%s, keep=%s", 399 lu.wconfdcontext, level, names, keep) 400 assert not (keep is not None and names is not None), \ 401 "Only one of the 'names' and the 'keep' parameters can be given" 402 403 if names is not None: 404 should_release = names.__contains__ 405 elif keep: 406 should_release = lambda name: name not in keep 407 else: 408 should_release = None 409 410 levelname = locking.LEVEL_NAMES[level] 411 412 owned = lu.owned_locks(level) 413 if not owned: 414 # Not owning any lock at this level, do nothing 415 pass 416 417 elif should_release: 418 retain = [] 419 release = [] 420 421 # Determine which locks to release 422 for name in owned: 423 if should_release(name): 424 release.append(name) 425 else: 426 retain.append(name) 427 428 assert len(lu.owned_locks(level)) == (len(retain) + len(release)) 429 430 # Release just some locks 431 lu.WConfdClient().TryUpdateLocks( 432 lu.release_request(level, release)) 433 assert frozenset(lu.owned_locks(level)) == frozenset(retain) 434 else: 435 lu.WConfdClient().FreeLocksLevel(levelname)
436 437
438 -def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group, 439 target_group, cfg, 440 _compute_fn=ComputeIPolicyInstanceViolation):
441 """Compute if instance meets the specs of the new target group. 442 443 @param ipolicy: The ipolicy to verify 444 @param instance: The instance object to verify 445 @param current_group: The current group of the instance 446 @param target_group: The new group of the instance 447 @type cfg: L{config.ConfigWriter} 448 @param cfg: Cluster configuration 449 @param _compute_fn: The function to verify ipolicy (unittest only) 450 @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation} 451 452 """ 453 if current_group == target_group: 454 return [] 455 else: 456 return _compute_fn(ipolicy, instance, cfg)
457 458
459 -def CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False, 460 _compute_fn=_ComputeIPolicyNodeViolation):
461 """Checks that the target node is correct in terms of instance policy. 462 463 @param ipolicy: The ipolicy to verify 464 @param instance: The instance object to verify 465 @param node: The new node to relocate 466 @type cfg: L{config.ConfigWriter} 467 @param cfg: Cluster configuration 468 @param ignore: Ignore violations of the ipolicy 469 @param _compute_fn: The function to verify ipolicy (unittest only) 470 @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation} 471 472 """ 473 primary_node = lu.cfg.GetNodeInfo(instance.primary_node) 474 res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg) 475 476 if res: 477 msg = ("Instance does not meet target node group's (%s) instance" 478 " policy: %s") % (node.group, utils.CommaJoin(res)) 479 if ignore: 480 lu.LogWarning(msg) 481 else: 482 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
483 484
485 -def GetInstanceInfoText(instance):
486 """Compute that text that should be added to the disk's metadata. 487 488 """ 489 return "originstname+%s" % instance.name
490 491
492 -def CheckNodeFreeMemory(lu, node_uuid, reason, requested, hvname, hvparams):
493 """Checks if a node has enough free memory. 494 495 This function checks if a given node has the needed amount of free 496 memory. In case the node has less memory or we cannot get the 497 information from the node, this function raises an OpPrereqError 498 exception. 499 500 @type lu: C{LogicalUnit} 501 @param lu: a logical unit from which we get configuration data 502 @type node_uuid: C{str} 503 @param node_uuid: the node to check 504 @type reason: C{str} 505 @param reason: string to use in the error message 506 @type requested: C{int} 507 @param requested: the amount of memory in MiB to check for 508 @type hvname: string 509 @param hvname: the hypervisor's name 510 @type hvparams: dict of strings 511 @param hvparams: the hypervisor's parameters 512 @rtype: integer 513 @return: node current free memory 514 @raise errors.OpPrereqError: if the node doesn't have enough memory, or 515 we cannot check the node 516 517 """ 518 node_name = lu.cfg.GetNodeName(node_uuid) 519 nodeinfo = lu.rpc.call_node_info([node_uuid], None, [(hvname, hvparams)]) 520 nodeinfo[node_uuid].Raise("Can't get data from node %s" % node_name, 521 prereq=True, ecode=errors.ECODE_ENVIRON) 522 (_, _, (hv_info, )) = nodeinfo[node_uuid].payload 523 524 free_mem = hv_info.get("memory_free", None) 525 if not isinstance(free_mem, int): 526 raise errors.OpPrereqError("Can't compute free memory on node %s, result" 527 " was '%s'" % (node_name, free_mem), 528 errors.ECODE_ENVIRON) 529 if requested > free_mem: 530 raise errors.OpPrereqError("Not enough memory on node %s for %s:" 531 " needed %s MiB, available %s MiB" % 532 (node_name, reason, requested, free_mem), 533 errors.ECODE_NORES) 534 return free_mem
535 536
537 -def CheckInstanceBridgesExist(lu, instance, node_uuid=None):
538 """Check that the brigdes needed by an instance exist. 539 540 """ 541 if node_uuid is None: 542 node_uuid = instance.primary_node 543 CheckNicsBridgesExist(lu, instance.nics, node_uuid)
544 545
546 -def CheckNicsBridgesExist(lu, nics, node_uuid):
547 """Check that the brigdes needed by a list of nics exist. 548 549 """ 550 cluster = lu.cfg.GetClusterInfo() 551 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in nics] 552 brlist = [params[constants.NIC_LINK] for params in paramslist 553 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED] 554 if brlist: 555 result = lu.rpc.call_bridges_exist(node_uuid, brlist) 556 result.Raise("Error checking bridges on destination node '%s'" % 557 lu.cfg.GetNodeName(node_uuid), prereq=True, 558 ecode=errors.ECODE_ENVIRON)
559 560
561 -def UpdateMetadata(feedback_fn, rpc, instance, 562 osparams_public=None, 563 osparams_private=None, 564 osparams_secret=None):
565 """Updates instance metadata on the metadata daemon on the 566 instance's primary node. 567 568 If the daemon isn't available (not compiled), do nothing. 569 570 In case the RPC fails, this function simply issues a warning and 571 proceeds normally. 572 573 @type feedback_fn: callable 574 @param feedback_fn: function used send feedback back to the caller 575 576 @type rpc: L{rpc.node.RpcRunner} 577 @param rpc: RPC runner 578 579 @type instance: L{objects.Instance} 580 @param instance: instance for which the metadata should be updated 581 582 @type osparams_public: NoneType or dict 583 @param osparams_public: public OS parameters used to override those 584 defined in L{instance} 585 586 @type osparams_private: NoneType or dict 587 @param osparams_private: private OS parameters used to override those 588 defined in L{instance} 589 590 @type osparams_secret: NoneType or dict 591 @param osparams_secret: secret OS parameters used to override those 592 defined in L{instance} 593 594 @rtype: NoneType 595 @return: None 596 597 """ 598 if not constants.ENABLE_METAD: 599 return 600 601 data = instance.ToDict() 602 603 if osparams_public is not None: 604 data["osparams_public"] = osparams_public 605 606 if osparams_private is not None: 607 data["osparams_private"] = osparams_private 608 609 if osparams_secret is not None: 610 data["osparams_secret"] = osparams_secret 611 else: 612 data["osparams_secret"] = {} 613 614 result = rpc.call_instance_metadata_modify(instance.primary_node, data) 615 result.Warn("Could not update metadata for instance '%s'" % instance.name, 616 feedback_fn)
617 618
619 -def CheckCompressionTool(lu, compression_tool):
620 """ Checks if the provided compression tool is allowed to be used. 621 622 @type compression_tool: string 623 @param compression_tool: Compression tool to use for importing or exporting 624 the instance 625 626 @rtype: NoneType 627 @return: None 628 629 @raise errors.OpPrereqError: If the tool is not enabled by Ganeti or 630 whitelisted 631 632 """ 633 allowed_tools = lu.cfg.GetCompressionTools() 634 if (compression_tool != constants.IEC_NONE and 635 compression_tool not in allowed_tools): 636 raise errors.OpPrereqError( 637 "Compression tool not allowed, tools allowed are [%s]" 638 % ", ".join(allowed_tools) 639 )
640 641
642 -def CheckInstanceExistence(lu, instance_name):
643 """Raises an error if an instance with the given name exists already. 644 645 @type instance_name: string 646 @param instance_name: The name of the instance. 647 648 To be used in the locking phase. 649 650 """ 651 if instance_name in \ 652 [inst.name for inst in lu.cfg.GetAllInstancesInfo().values()]: 653 raise errors.OpPrereqError("Instance '%s' is already in the cluster" % 654 instance_name, errors.ECODE_EXISTS)
655