Package ganeti :: Package cmdlib :: Module instance_utils
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cmdlib.instance_utils

  1  # 
  2  # 
  3   
  4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
  5  # 
  6  # This program is free software; you can redistribute it and/or modify 
  7  # it under the terms of the GNU General Public License as published by 
  8  # the Free Software Foundation; either version 2 of the License, or 
  9  # (at your option) any later version. 
 10  # 
 11  # This program is distributed in the hope that it will be useful, but 
 12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
 13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
 14  # General Public License for more details. 
 15  # 
 16  # You should have received a copy of the GNU General Public License 
 17  # along with this program; if not, write to the Free Software 
 18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
 19  # 02110-1301, USA. 
 20   
 21   
 22  """Utility function mainly, but not only used by instance LU's.""" 
 23   
 24  import logging 
 25  import os 
 26   
 27  from ganeti import constants 
 28  from ganeti import errors 
 29  from ganeti import locking 
 30  from ganeti import network 
 31  from ganeti import objects 
 32  from ganeti import pathutils 
 33  from ganeti import utils 
 34  from ganeti.cmdlib.common import AnnotateDiskParams, \ 
 35    ComputeIPolicyInstanceViolation 
 36   
 37   
38 -def BuildInstanceHookEnv(name, primary_node, secondary_nodes, os_type, status, 39 minmem, maxmem, vcpus, nics, disk_template, disks, 40 bep, hvp, hypervisor_name, tags):
41 """Builds instance related env variables for hooks 42 43 This builds the hook environment from individual variables. 44 45 @type name: string 46 @param name: the name of the instance 47 @type primary_node: string 48 @param primary_node: the name of the instance's primary node 49 @type secondary_nodes: list 50 @param secondary_nodes: list of secondary nodes as strings 51 @type os_type: string 52 @param os_type: the name of the instance's OS 53 @type status: string 54 @param status: the desired status of the instance 55 @type minmem: string 56 @param minmem: the minimum memory size of the instance 57 @type maxmem: string 58 @param maxmem: the maximum memory size of the instance 59 @type vcpus: string 60 @param vcpus: the count of VCPUs the instance has 61 @type nics: list 62 @param nics: list of tuples (name, uuid, ip, mac, mode, link, net, netinfo) 63 representing the NICs the instance has 64 @type disk_template: string 65 @param disk_template: the disk template of the instance 66 @type disks: list 67 @param disks: list of tuples (name, uuid, size, mode) 68 @type bep: dict 69 @param bep: the backend parameters for the instance 70 @type hvp: dict 71 @param hvp: the hypervisor parameters for the instance 72 @type hypervisor_name: string 73 @param hypervisor_name: the hypervisor for the instance 74 @type tags: list 75 @param tags: list of instance tags as strings 76 @rtype: dict 77 @return: the hook environment for this instance 78 79 """ 80 env = { 81 "OP_TARGET": name, 82 "INSTANCE_NAME": name, 83 "INSTANCE_PRIMARY": primary_node, 84 "INSTANCE_SECONDARIES": " ".join(secondary_nodes), 85 "INSTANCE_OS_TYPE": os_type, 86 "INSTANCE_STATUS": status, 87 "INSTANCE_MINMEM": minmem, 88 "INSTANCE_MAXMEM": maxmem, 89 # TODO(2.9) remove deprecated "memory" value 90 "INSTANCE_MEMORY": maxmem, 91 "INSTANCE_VCPUS": vcpus, 92 "INSTANCE_DISK_TEMPLATE": disk_template, 93 "INSTANCE_HYPERVISOR": hypervisor_name, 94 } 95 if nics: 96 nic_count = len(nics) 97 for idx, (name, uuid, ip, mac, mode, link, net, netinfo) in enumerate(nics): 98 if ip is None: 99 ip = "" 100 if name: 101 env["INSTANCE_NIC%d_NAME" % idx] = name 102 env["INSTANCE_NIC%d_UUID" % idx] = uuid 103 env["INSTANCE_NIC%d_IP" % idx] = ip 104 env["INSTANCE_NIC%d_MAC" % idx] = mac 105 env["INSTANCE_NIC%d_MODE" % idx] = mode 106 env["INSTANCE_NIC%d_LINK" % idx] = link 107 if netinfo: 108 nobj = objects.Network.FromDict(netinfo) 109 env.update(nobj.HooksDict("INSTANCE_NIC%d_" % idx)) 110 elif network: 111 # FIXME: broken network reference: the instance NIC specifies a 112 # network, but the relevant network entry was not in the config. This 113 # should be made impossible. 114 env["INSTANCE_NIC%d_NETWORK_NAME" % idx] = net 115 if mode == constants.NIC_MODE_BRIDGED: 116 env["INSTANCE_NIC%d_BRIDGE" % idx] = link 117 else: 118 nic_count = 0 119 120 env["INSTANCE_NIC_COUNT"] = nic_count 121 122 if disks: 123 disk_count = len(disks) 124 for idx, (name, uuid, size, mode) in enumerate(disks): 125 if name: 126 env["INSTANCE_DISK%d_NAME" % idx] = name 127 env["INSTANCE_DISK%d_UUID" % idx] = uuid 128 env["INSTANCE_DISK%d_SIZE" % idx] = size 129 env["INSTANCE_DISK%d_MODE" % idx] = mode 130 else: 131 disk_count = 0 132 133 env["INSTANCE_DISK_COUNT"] = disk_count 134 135 if not tags: 136 tags = [] 137 138 env["INSTANCE_TAGS"] = " ".join(tags) 139 140 for source, kind in [(bep, "BE"), (hvp, "HV")]: 141 for key, value in source.items(): 142 env["INSTANCE_%s_%s" % (kind, key)] = value 143 144 return env
145 146
147 -def BuildInstanceHookEnvByObject(lu, instance, override=None):
148 """Builds instance related env variables for hooks from an object. 149 150 @type lu: L{LogicalUnit} 151 @param lu: the logical unit on whose behalf we execute 152 @type instance: L{objects.Instance} 153 @param instance: the instance for which we should build the 154 environment 155 @type override: dict 156 @param override: dictionary with key/values that will override 157 our values 158 @rtype: dict 159 @return: the hook environment dictionary 160 161 """ 162 cluster = lu.cfg.GetClusterInfo() 163 bep = cluster.FillBE(instance) 164 hvp = cluster.FillHV(instance) 165 args = { 166 "name": instance.name, 167 "primary_node": instance.primary_node, 168 "secondary_nodes": instance.secondary_nodes, 169 "os_type": instance.os, 170 "status": instance.admin_state, 171 "maxmem": bep[constants.BE_MAXMEM], 172 "minmem": bep[constants.BE_MINMEM], 173 "vcpus": bep[constants.BE_VCPUS], 174 "nics": NICListToTuple(lu, instance.nics), 175 "disk_template": instance.disk_template, 176 "disks": [(disk.name, disk.uuid, disk.size, disk.mode) 177 for disk in instance.disks], 178 "bep": bep, 179 "hvp": hvp, 180 "hypervisor_name": instance.hypervisor, 181 "tags": instance.tags, 182 } 183 if override: 184 args.update(override) 185 return BuildInstanceHookEnv(**args) # pylint: disable=W0142
186 187
188 -def GetClusterDomainSecret():
189 """Reads the cluster domain secret. 190 191 """ 192 return utils.ReadOneLineFile(pathutils.CLUSTER_DOMAIN_SECRET_FILE, 193 strict=True)
194 195
196 -def CheckNodeNotDrained(lu, node):
197 """Ensure that a given node is not drained. 198 199 @param lu: the LU on behalf of which we make the check 200 @param node: the node to check 201 @raise errors.OpPrereqError: if the node is drained 202 203 """ 204 if lu.cfg.GetNodeInfo(node).drained: 205 raise errors.OpPrereqError("Can't use drained node %s" % node, 206 errors.ECODE_STATE)
207 208
209 -def CheckNodeVmCapable(lu, node):
210 """Ensure that a given node is vm capable. 211 212 @param lu: the LU on behalf of which we make the check 213 @param node: the node to check 214 @raise errors.OpPrereqError: if the node is not vm capable 215 216 """ 217 if not lu.cfg.GetNodeInfo(node).vm_capable: 218 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node, 219 errors.ECODE_STATE)
220 221
222 -def RemoveInstance(lu, feedback_fn, instance, ignore_failures):
223 """Utility function to remove an instance. 224 225 """ 226 logging.info("Removing block devices for instance %s", instance.name) 227 228 if not RemoveDisks(lu, instance, ignore_failures=ignore_failures): 229 if not ignore_failures: 230 raise errors.OpExecError("Can't remove instance's disks") 231 feedback_fn("Warning: can't remove instance's disks") 232 233 logging.info("Removing instance %s out of cluster config", instance.name) 234 235 lu.cfg.RemoveInstance(instance.name) 236 237 assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \ 238 "Instance lock removal conflict" 239 240 # Remove lock for the instance 241 lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
242 243
244 -def RemoveDisks(lu, instance, target_node=None, ignore_failures=False):
245 """Remove all disks for an instance. 246 247 This abstracts away some work from `AddInstance()` and 248 `RemoveInstance()`. Note that in case some of the devices couldn't 249 be removed, the removal will continue with the other ones. 250 251 @type lu: L{LogicalUnit} 252 @param lu: the logical unit on whose behalf we execute 253 @type instance: L{objects.Instance} 254 @param instance: the instance whose disks we should remove 255 @type target_node: string 256 @param target_node: used to override the node on which to remove the disks 257 @rtype: boolean 258 @return: the success of the removal 259 260 """ 261 logging.info("Removing block devices for instance %s", instance.name) 262 263 all_result = True 264 ports_to_release = set() 265 anno_disks = AnnotateDiskParams(instance, instance.disks, lu.cfg) 266 for (idx, device) in enumerate(anno_disks): 267 if target_node: 268 edata = [(target_node, device)] 269 else: 270 edata = device.ComputeNodeTree(instance.primary_node) 271 for node, disk in edata: 272 lu.cfg.SetDiskID(disk, node) 273 result = lu.rpc.call_blockdev_remove(node, disk) 274 if result.fail_msg: 275 lu.LogWarning("Could not remove disk %s on node %s," 276 " continuing anyway: %s", idx, node, result.fail_msg) 277 if not (result.offline and node != instance.primary_node): 278 all_result = False 279 280 # if this is a DRBD disk, return its port to the pool 281 if device.dev_type in constants.LDS_DRBD: 282 ports_to_release.add(device.logical_id[2]) 283 284 if all_result or ignore_failures: 285 for port in ports_to_release: 286 lu.cfg.AddTcpUdpPort(port) 287 288 if instance.disk_template in constants.DTS_FILEBASED: 289 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1]) 290 if target_node: 291 tgt = target_node 292 else: 293 tgt = instance.primary_node 294 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir) 295 if result.fail_msg: 296 lu.LogWarning("Could not remove directory '%s' on node %s: %s", 297 file_storage_dir, instance.primary_node, result.fail_msg) 298 all_result = False 299 300 return all_result
301 302
303 -def NICToTuple(lu, nic):
304 """Build a tupple of nic information. 305 306 @type lu: L{LogicalUnit} 307 @param lu: the logical unit on whose behalf we execute 308 @type nic: L{objects.NIC} 309 @param nic: nic to convert to hooks tuple 310 311 """ 312 cluster = lu.cfg.GetClusterInfo() 313 filled_params = cluster.SimpleFillNIC(nic.nicparams) 314 mode = filled_params[constants.NIC_MODE] 315 link = filled_params[constants.NIC_LINK] 316 netinfo = None 317 if nic.network: 318 nobj = lu.cfg.GetNetwork(nic.network) 319 netinfo = objects.Network.ToDict(nobj) 320 return (nic.name, nic.uuid, nic.ip, nic.mac, mode, link, nic.network, netinfo)
321 322
323 -def NICListToTuple(lu, nics):
324 """Build a list of nic information tuples. 325 326 This list is suitable to be passed to _BuildInstanceHookEnv or as a return 327 value in LUInstanceQueryData. 328 329 @type lu: L{LogicalUnit} 330 @param lu: the logical unit on whose behalf we execute 331 @type nics: list of L{objects.NIC} 332 @param nics: list of nics to convert to hooks tuples 333 334 """ 335 hooks_nics = [] 336 for nic in nics: 337 hooks_nics.append(NICToTuple(lu, nic)) 338 return hooks_nics
339 340
341 -def CopyLockList(names):
342 """Makes a copy of a list of lock names. 343 344 Handles L{locking.ALL_SET} correctly. 345 346 """ 347 if names == locking.ALL_SET: 348 return locking.ALL_SET 349 else: 350 return names[:]
351 352
353 -def ReleaseLocks(lu, level, names=None, keep=None):
354 """Releases locks owned by an LU. 355 356 @type lu: L{LogicalUnit} 357 @param level: Lock level 358 @type names: list or None 359 @param names: Names of locks to release 360 @type keep: list or None 361 @param keep: Names of locks to retain 362 363 """ 364 assert not (keep is not None and names is not None), \ 365 "Only one of the 'names' and the 'keep' parameters can be given" 366 367 if names is not None: 368 should_release = names.__contains__ 369 elif keep: 370 should_release = lambda name: name not in keep 371 else: 372 should_release = None 373 374 owned = lu.owned_locks(level) 375 if not owned: 376 # Not owning any lock at this level, do nothing 377 pass 378 379 elif should_release: 380 retain = [] 381 release = [] 382 383 # Determine which locks to release 384 for name in owned: 385 if should_release(name): 386 release.append(name) 387 else: 388 retain.append(name) 389 390 assert len(lu.owned_locks(level)) == (len(retain) + len(release)) 391 392 # Release just some locks 393 lu.glm.release(level, names=release) 394 395 assert frozenset(lu.owned_locks(level)) == frozenset(retain) 396 else: 397 # Release everything 398 lu.glm.release(level) 399 400 assert not lu.glm.is_owned(level), "No locks should be owned"
401 402
403 -def _ComputeIPolicyNodeViolation(ipolicy, instance, current_group, 404 target_group, cfg, 405 _compute_fn=ComputeIPolicyInstanceViolation):
406 """Compute if instance meets the specs of the new target group. 407 408 @param ipolicy: The ipolicy to verify 409 @param instance: The instance object to verify 410 @param current_group: The current group of the instance 411 @param target_group: The new group of the instance 412 @type cfg: L{config.ConfigWriter} 413 @param cfg: Cluster configuration 414 @param _compute_fn: The function to verify ipolicy (unittest only) 415 @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation} 416 417 """ 418 if current_group == target_group: 419 return [] 420 else: 421 return _compute_fn(ipolicy, instance, cfg)
422 423
424 -def CheckTargetNodeIPolicy(lu, ipolicy, instance, node, cfg, ignore=False, 425 _compute_fn=_ComputeIPolicyNodeViolation):
426 """Checks that the target node is correct in terms of instance policy. 427 428 @param ipolicy: The ipolicy to verify 429 @param instance: The instance object to verify 430 @param node: The new node to relocate 431 @type cfg: L{config.ConfigWriter} 432 @param cfg: Cluster configuration 433 @param ignore: Ignore violations of the ipolicy 434 @param _compute_fn: The function to verify ipolicy (unittest only) 435 @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation} 436 437 """ 438 primary_node = lu.cfg.GetNodeInfo(instance.primary_node) 439 res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg) 440 441 if res: 442 msg = ("Instance does not meet target node group's (%s) instance" 443 " policy: %s") % (node.group, utils.CommaJoin(res)) 444 if ignore: 445 lu.LogWarning(msg) 446 else: 447 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
448 449
450 -def GetInstanceInfoText(instance):
451 """Compute that text that should be added to the disk's metadata. 452 453 """ 454 return "originstname+%s" % instance.name
455 456
457 -def CheckNodeFreeMemory(lu, node, reason, requested, hypervisor_name):
458 """Checks if a node has enough free memory. 459 460 This function checks if a given node has the needed amount of free 461 memory. In case the node has less memory or we cannot get the 462 information from the node, this function raises an OpPrereqError 463 exception. 464 465 @type lu: C{LogicalUnit} 466 @param lu: a logical unit from which we get configuration data 467 @type node: C{str} 468 @param node: the node to check 469 @type reason: C{str} 470 @param reason: string to use in the error message 471 @type requested: C{int} 472 @param requested: the amount of memory in MiB to check for 473 @type hypervisor_name: C{str} 474 @param hypervisor_name: the hypervisor to ask for memory stats 475 @rtype: integer 476 @return: node current free memory 477 @raise errors.OpPrereqError: if the node doesn't have enough memory, or 478 we cannot check the node 479 480 """ 481 nodeinfo = lu.rpc.call_node_info([node], None, [hypervisor_name], False) 482 nodeinfo[node].Raise("Can't get data from node %s" % node, 483 prereq=True, ecode=errors.ECODE_ENVIRON) 484 (_, _, (hv_info, )) = nodeinfo[node].payload 485 486 free_mem = hv_info.get("memory_free", None) 487 if not isinstance(free_mem, int): 488 raise errors.OpPrereqError("Can't compute free memory on node %s, result" 489 " was '%s'" % (node, free_mem), 490 errors.ECODE_ENVIRON) 491 if requested > free_mem: 492 raise errors.OpPrereqError("Not enough memory on node %s for %s:" 493 " needed %s MiB, available %s MiB" % 494 (node, reason, requested, free_mem), 495 errors.ECODE_NORES) 496 return free_mem
497 498
499 -def CheckInstanceBridgesExist(lu, instance, node=None):
500 """Check that the brigdes needed by an instance exist. 501 502 """ 503 if node is None: 504 node = instance.primary_node 505 CheckNicsBridgesExist(lu, instance.nics, node)
506 507
508 -def CheckNicsBridgesExist(lu, target_nics, target_node):
509 """Check that the brigdes needed by a list of nics exist. 510 511 """ 512 cluster = lu.cfg.GetClusterInfo() 513 paramslist = [cluster.SimpleFillNIC(nic.nicparams) for nic in target_nics] 514 brlist = [params[constants.NIC_LINK] for params in paramslist 515 if params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED] 516 if brlist: 517 result = lu.rpc.call_bridges_exist(target_node, brlist) 518 result.Raise("Error checking bridges on destination node '%s'" % 519 target_node, prereq=True, ecode=errors.ECODE_ENVIRON)
520 521
522 -def CheckNodeHasOS(lu, node, os_name, force_variant):
523 """Ensure that a node supports a given OS. 524 525 @param lu: the LU on behalf of which we make the check 526 @param node: the node to check 527 @param os_name: the OS to query about 528 @param force_variant: whether to ignore variant errors 529 @raise errors.OpPrereqError: if the node is not supporting the OS 530 531 """ 532 result = lu.rpc.call_os_get(node, os_name) 533 result.Raise("OS '%s' not in supported OS list for node %s" % 534 (os_name, node), 535 prereq=True, ecode=errors.ECODE_INVAL) 536 if not force_variant: 537 _CheckOSVariant(result.payload, os_name)
538 539
540 -def _CheckOSVariant(os_obj, name):
541 """Check whether an OS name conforms to the os variants specification. 542 543 @type os_obj: L{objects.OS} 544 @param os_obj: OS object to check 545 @type name: string 546 @param name: OS name passed by the user, to check for validity 547 548 """ 549 variant = objects.OS.GetVariant(name) 550 if not os_obj.supported_variants: 551 if variant: 552 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'" 553 " passed)" % (os_obj.name, variant), 554 errors.ECODE_INVAL) 555 return 556 if not variant: 557 raise errors.OpPrereqError("OS name must include a variant", 558 errors.ECODE_INVAL) 559 560 if variant not in os_obj.supported_variants: 561 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
562