Package ganeti :: Package cmdlib :: Module common
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cmdlib.common

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Common functions used by multiple logical units.""" 
  32   
  33  import copy 
  34  import math 
  35  import os 
  36  import urllib2 
  37   
  38  from ganeti import compat 
  39  from ganeti import constants 
  40  from ganeti import errors 
  41  from ganeti import hypervisor 
  42  from ganeti import locking 
  43  from ganeti import objects 
  44  from ganeti import opcodes 
  45  from ganeti import pathutils 
  46  import ganeti.rpc.node as rpc 
  47  from ganeti.serializer import Private 
  48  from ganeti import ssconf 
  49  from ganeti import utils 
  50   
  51   
  52  # States of instance 
  53  INSTANCE_DOWN = [constants.ADMINST_DOWN] 
  54  INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP] 
  55  INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE] 
  56   
  57   
58 -def _ExpandItemName(expand_fn, name, kind):
59 """Expand an item name. 60 61 @param expand_fn: the function to use for expansion 62 @param name: requested item name 63 @param kind: text description ('Node' or 'Instance') 64 @return: the result of the expand_fn, if successful 65 @raise errors.OpPrereqError: if the item is not found 66 67 """ 68 (uuid, full_name) = expand_fn(name) 69 if uuid is None or full_name is None: 70 raise errors.OpPrereqError("%s '%s' not known" % (kind, name), 71 errors.ECODE_NOENT) 72 return (uuid, full_name)
73 74
75 -def ExpandInstanceUuidAndName(cfg, expected_uuid, name):
76 """Wrapper over L{_ExpandItemName} for instance.""" 77 (uuid, full_name) = _ExpandItemName(cfg.ExpandInstanceName, name, "Instance") 78 if expected_uuid is not None and uuid != expected_uuid: 79 raise errors.OpPrereqError( 80 "The instances UUID '%s' does not match the expected UUID '%s' for" 81 " instance '%s'. Maybe the instance changed since you submitted this" 82 " job." % (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE) 83 return (uuid, full_name)
84 85
86 -def ExpandNodeUuidAndName(cfg, expected_uuid, name):
87 """Expand a short node name into the node UUID and full name. 88 89 @type cfg: L{config.ConfigWriter} 90 @param cfg: The cluster configuration 91 @type expected_uuid: string 92 @param expected_uuid: expected UUID for the node (or None if there is no 93 expectation). If it does not match, a L{errors.OpPrereqError} is 94 raised. 95 @type name: string 96 @param name: the short node name 97 98 """ 99 (uuid, full_name) = _ExpandItemName(cfg.ExpandNodeName, name, "Node") 100 if expected_uuid is not None and uuid != expected_uuid: 101 raise errors.OpPrereqError( 102 "The nodes UUID '%s' does not match the expected UUID '%s' for node" 103 " '%s'. Maybe the node changed since you submitted this job." % 104 (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE) 105 return (uuid, full_name)
106 107
108 -def ShareAll():
109 """Returns a dict declaring all lock levels shared. 110 111 """ 112 return dict.fromkeys(locking.LEVELS, 1)
113 114
115 -def CheckNodeGroupInstances(cfg, group_uuid, owned_instance_names):
116 """Checks if the instances in a node group are still correct. 117 118 @type cfg: L{config.ConfigWriter} 119 @param cfg: The cluster configuration 120 @type group_uuid: string 121 @param group_uuid: Node group UUID 122 @type owned_instance_names: set or frozenset 123 @param owned_instance_names: List of currently owned instances 124 125 """ 126 wanted_instances = frozenset(cfg.GetInstanceNames( 127 cfg.GetNodeGroupInstances(group_uuid))) 128 if owned_instance_names != wanted_instances: 129 raise errors.OpPrereqError("Instances in node group '%s' changed since" 130 " locks were acquired, wanted '%s', have '%s';" 131 " retry the operation" % 132 (group_uuid, 133 utils.CommaJoin(wanted_instances), 134 utils.CommaJoin(owned_instance_names)), 135 errors.ECODE_STATE) 136 137 return wanted_instances
138 139
140 -def GetWantedNodes(lu, short_node_names):
141 """Returns list of checked and expanded node names. 142 143 @type lu: L{LogicalUnit} 144 @param lu: the logical unit on whose behalf we execute 145 @type short_node_names: list 146 @param short_node_names: list of node names or None for all nodes 147 @rtype: tuple of lists 148 @return: tupe with (list of node UUIDs, list of node names) 149 @raise errors.ProgrammerError: if the nodes parameter is wrong type 150 151 """ 152 if short_node_names: 153 node_uuids = [ExpandNodeUuidAndName(lu.cfg, None, name)[0] 154 for name in short_node_names] 155 else: 156 node_uuids = lu.cfg.GetNodeList() 157 158 return (node_uuids, [lu.cfg.GetNodeName(uuid) for uuid in node_uuids])
159 160
161 -def GetWantedInstances(lu, short_inst_names):
162 """Returns list of checked and expanded instance names. 163 164 @type lu: L{LogicalUnit} 165 @param lu: the logical unit on whose behalf we execute 166 @type short_inst_names: list 167 @param short_inst_names: list of instance names or None for all instances 168 @rtype: tuple of lists 169 @return: tuple of (instance UUIDs, instance names) 170 @raise errors.OpPrereqError: if the instances parameter is wrong type 171 @raise errors.OpPrereqError: if any of the passed instances is not found 172 173 """ 174 if short_inst_names: 175 inst_uuids = [ExpandInstanceUuidAndName(lu.cfg, None, name)[0] 176 for name in short_inst_names] 177 else: 178 inst_uuids = lu.cfg.GetInstanceList() 179 return (inst_uuids, [lu.cfg.GetInstanceName(uuid) for uuid in inst_uuids])
180 181
182 -def RunPostHook(lu, node_name):
183 """Runs the post-hook for an opcode on a single node. 184 185 """ 186 hm = lu.proc.BuildHooksManager(lu) 187 try: 188 hm.RunPhase(constants.HOOKS_PHASE_POST, node_names=[node_name]) 189 except Exception, err: # pylint: disable=W0703 190 lu.LogWarning("Errors occurred running hooks on %s: %s", 191 node_name, err)
192 193
194 -def RedistributeAncillaryFiles(lu):
195 """Distribute additional files which are part of the cluster configuration. 196 197 ConfigWriter takes care of distributing the config and ssconf files, but 198 there are more files which should be distributed to all nodes. This function 199 makes sure those are copied. 200 201 """ 202 # Gather target nodes 203 cluster = lu.cfg.GetClusterInfo() 204 master_info = lu.cfg.GetMasterNodeInfo() 205 206 online_node_uuids = lu.cfg.GetOnlineNodeList() 207 online_node_uuid_set = frozenset(online_node_uuids) 208 vm_node_uuids = list(online_node_uuid_set.intersection( 209 lu.cfg.GetVmCapableNodeList())) 210 211 # Never distribute to master node 212 for node_uuids in [online_node_uuids, vm_node_uuids]: 213 if master_info.uuid in node_uuids: 214 node_uuids.remove(master_info.uuid) 215 216 # Gather file lists 217 (files_all, _, files_mc, files_vm) = \ 218 ComputeAncillaryFiles(cluster, True) 219 220 # Never re-distribute configuration file from here 221 assert not (pathutils.CLUSTER_CONF_FILE in files_all or 222 pathutils.CLUSTER_CONF_FILE in files_vm) 223 assert not files_mc, "Master candidates not handled in this function" 224 225 filemap = [ 226 (online_node_uuids, files_all), 227 (vm_node_uuids, files_vm), 228 ] 229 230 # Upload the files 231 for (node_uuids, files) in filemap: 232 for fname in files: 233 UploadHelper(lu, node_uuids, fname)
234 235
236 -def ComputeAncillaryFiles(cluster, redist):
237 """Compute files external to Ganeti which need to be consistent. 238 239 @type redist: boolean 240 @param redist: Whether to include files which need to be redistributed 241 242 """ 243 # Compute files for all nodes 244 files_all = set([ 245 pathutils.SSH_KNOWN_HOSTS_FILE, 246 pathutils.CONFD_HMAC_KEY, 247 pathutils.CLUSTER_DOMAIN_SECRET_FILE, 248 pathutils.SPICE_CERT_FILE, 249 pathutils.SPICE_CACERT_FILE, 250 pathutils.RAPI_USERS_FILE, 251 ]) 252 253 if redist: 254 # we need to ship at least the RAPI certificate 255 files_all.add(pathutils.RAPI_CERT_FILE) 256 else: 257 files_all.update(pathutils.ALL_CERT_FILES) 258 files_all.update(ssconf.SimpleStore().GetFileList()) 259 260 if cluster.modify_etc_hosts: 261 files_all.add(pathutils.ETC_HOSTS) 262 263 if cluster.use_external_mip_script: 264 files_all.add(pathutils.EXTERNAL_MASTER_SETUP_SCRIPT) 265 266 # Files which are optional, these must: 267 # - be present in one other category as well 268 # - either exist or not exist on all nodes of that category (mc, vm all) 269 files_opt = set([ 270 pathutils.RAPI_USERS_FILE, 271 ]) 272 273 # Files which should only be on master candidates 274 files_mc = set() 275 276 if not redist: 277 files_mc.add(pathutils.CLUSTER_CONF_FILE) 278 279 # File storage 280 if (not redist and (cluster.IsFileStorageEnabled() or 281 cluster.IsSharedFileStorageEnabled())): 282 files_all.add(pathutils.FILE_STORAGE_PATHS_FILE) 283 files_opt.add(pathutils.FILE_STORAGE_PATHS_FILE) 284 285 # Files which should only be on VM-capable nodes 286 files_vm = set( 287 filename 288 for hv_name in cluster.enabled_hypervisors 289 for filename in 290 hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[0]) 291 292 files_opt |= set( 293 filename 294 for hv_name in cluster.enabled_hypervisors 295 for filename in 296 hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[1]) 297 298 # Filenames in each category must be unique 299 all_files_set = files_all | files_mc | files_vm 300 assert (len(all_files_set) == 301 sum(map(len, [files_all, files_mc, files_vm]))), \ 302 "Found file listed in more than one file list" 303 304 # Optional files must be present in one other category 305 assert all_files_set.issuperset(files_opt), \ 306 "Optional file not in a different required list" 307 308 # This one file should never ever be re-distributed via RPC 309 assert not (redist and 310 pathutils.FILE_STORAGE_PATHS_FILE in all_files_set) 311 312 return (files_all, files_opt, files_mc, files_vm)
313 314
315 -def UploadHelper(lu, node_uuids, fname):
316 """Helper for uploading a file and showing warnings. 317 318 """ 319 if os.path.exists(fname): 320 result = lu.rpc.call_upload_file(node_uuids, fname) 321 for to_node_uuids, to_result in result.items(): 322 msg = to_result.fail_msg 323 if msg: 324 msg = ("Copy of file %s to node %s failed: %s" % 325 (fname, lu.cfg.GetNodeName(to_node_uuids), msg)) 326 lu.LogWarning(msg)
327 328
329 -def MergeAndVerifyHvState(op_input, obj_input):
330 """Combines the hv state from an opcode with the one of the object 331 332 @param op_input: The input dict from the opcode 333 @param obj_input: The input dict from the objects 334 @return: The verified and updated dict 335 336 """ 337 if op_input: 338 invalid_hvs = set(op_input) - constants.HYPER_TYPES 339 if invalid_hvs: 340 raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:" 341 " %s" % utils.CommaJoin(invalid_hvs), 342 errors.ECODE_INVAL) 343 if obj_input is None: 344 obj_input = {} 345 type_check = constants.HVSTS_PARAMETER_TYPES 346 return _UpdateAndVerifySubDict(obj_input, op_input, type_check) 347 348 return None
349 350
351 -def MergeAndVerifyDiskState(op_input, obj_input):
352 """Combines the disk state from an opcode with the one of the object 353 354 @param op_input: The input dict from the opcode 355 @param obj_input: The input dict from the objects 356 @return: The verified and updated dict 357 """ 358 if op_input: 359 invalid_dst = set(op_input) - constants.DS_VALID_TYPES 360 if invalid_dst: 361 raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" % 362 utils.CommaJoin(invalid_dst), 363 errors.ECODE_INVAL) 364 type_check = constants.DSS_PARAMETER_TYPES 365 if obj_input is None: 366 obj_input = {} 367 return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value, 368 type_check)) 369 for key, value in op_input.items()) 370 371 return None
372 373
374 -def CheckOSParams(lu, required, node_uuids, osname, osparams, force_variant):
375 """OS parameters validation. 376 377 @type lu: L{LogicalUnit} 378 @param lu: the logical unit for which we check 379 @type required: boolean 380 @param required: whether the validation should fail if the OS is not 381 found 382 @type node_uuids: list 383 @param node_uuids: the list of nodes on which we should check 384 @type osname: string 385 @param osname: the name of the OS we should use 386 @type osparams: dict 387 @param osparams: the parameters which we need to check 388 @raise errors.OpPrereqError: if the parameters are not valid 389 390 """ 391 node_uuids = _FilterVmNodes(lu, node_uuids) 392 393 # Last chance to unwrap private elements. 394 for key in osparams: 395 if isinstance(osparams[key], Private): 396 osparams[key] = osparams[key].Get() 397 398 if osname: 399 result = lu.rpc.call_os_validate(node_uuids, required, osname, 400 [constants.OS_VALIDATE_PARAMETERS], 401 osparams, force_variant) 402 for node_uuid, nres in result.items(): 403 # we don't check for offline cases since this should be run only 404 # against the master node and/or an instance's nodes 405 nres.Raise("OS Parameters validation failed on node %s" % 406 lu.cfg.GetNodeName(node_uuid)) 407 if not nres.payload: 408 lu.LogInfo("OS %s not found on node %s, validation skipped", 409 osname, lu.cfg.GetNodeName(node_uuid))
410 411
412 -def CheckImageValidity(image, error_message):
413 """Checks if a given image description is either a valid file path or a URL. 414 415 @type image: string 416 @param image: An absolute path or URL, the assumed location of a disk image. 417 @type error_message: string 418 @param error_message: The error message to show if the image is not valid. 419 420 @raise errors.OpPrereqError: If the validation fails. 421 422 """ 423 if image is not None and not (utils.IsUrl(image) or os.path.isabs(image)): 424 raise errors.OpPrereqError(error_message)
425 426
427 -def CheckOSImage(op):
428 """Checks if the OS image in the OS parameters of an opcode is 429 valid. 430 431 This function can also be used in LUs as they carry an opcode. 432 433 @type op: L{opcodes.OpCode} 434 @param op: opcode containing the OS params 435 436 @rtype: string or NoneType 437 @return: 438 None if the OS parameters in the opcode do not contain the OS 439 image, otherwise the OS image value contained in the OS parameters 440 @raise errors.OpPrereqError: if OS image is not a URL or an absolute path 441 442 """ 443 os_image = objects.GetOSImage(op.osparams) 444 CheckImageValidity(os_image, "OS image must be an absolute path or a URL") 445 return os_image
446 447
448 -def CheckHVParams(lu, node_uuids, hvname, hvparams):
449 """Hypervisor parameter validation. 450 451 This function abstracts the hypervisor parameter validation to be 452 used in both instance create and instance modify. 453 454 @type lu: L{LogicalUnit} 455 @param lu: the logical unit for which we check 456 @type node_uuids: list 457 @param node_uuids: the list of nodes on which we should check 458 @type hvname: string 459 @param hvname: the name of the hypervisor we should use 460 @type hvparams: dict 461 @param hvparams: the parameters which we need to check 462 @raise errors.OpPrereqError: if the parameters are not valid 463 464 """ 465 node_uuids = _FilterVmNodes(lu, node_uuids) 466 467 cluster = lu.cfg.GetClusterInfo() 468 hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams) 469 470 hvinfo = lu.rpc.call_hypervisor_validate_params(node_uuids, hvname, hvfull) 471 for node_uuid in node_uuids: 472 info = hvinfo[node_uuid] 473 if info.offline: 474 continue 475 info.Raise("Hypervisor parameter validation failed on node %s" % 476 lu.cfg.GetNodeName(node_uuid))
477 478
479 -def AdjustCandidatePool(lu, exceptions):
480 """Adjust the candidate pool after node operations. 481 482 """ 483 mod_list = lu.cfg.MaintainCandidatePool(exceptions) 484 if mod_list: 485 lu.LogInfo("Promoted nodes to master candidate role: %s", 486 utils.CommaJoin(node.name for node in mod_list)) 487 for node in mod_list: 488 lu.context.ReaddNode(node) 489 AddNodeCertToCandidateCerts(lu, lu.cfg, node.uuid) 490 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions) 491 if mc_now > mc_max: 492 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" % 493 (mc_now, mc_max))
494 495
496 -def CheckNodePVs(nresult, exclusive_storage):
497 """Check node PVs. 498 499 """ 500 pvlist_dict = nresult.get(constants.NV_PVLIST, None) 501 if pvlist_dict is None: 502 return (["Can't get PV list from node"], None) 503 pvlist = map(objects.LvmPvInfo.FromDict, pvlist_dict) 504 errlist = [] 505 # check that ':' is not present in PV names, since it's a 506 # special character for lvcreate (denotes the range of PEs to 507 # use on the PV) 508 for pv in pvlist: 509 if ":" in pv.name: 510 errlist.append("Invalid character ':' in PV '%s' of VG '%s'" % 511 (pv.name, pv.vg_name)) 512 es_pvinfo = None 513 if exclusive_storage: 514 (errmsgs, es_pvinfo) = utils.LvmExclusiveCheckNodePvs(pvlist) 515 errlist.extend(errmsgs) 516 shared_pvs = nresult.get(constants.NV_EXCLUSIVEPVS, None) 517 if shared_pvs: 518 for (pvname, lvlist) in shared_pvs: 519 # TODO: Check that LVs are really unrelated (snapshots, DRBD meta...) 520 errlist.append("PV %s is shared among unrelated LVs (%s)" % 521 (pvname, utils.CommaJoin(lvlist))) 522 return (errlist, es_pvinfo)
523 524
525 -def _ComputeMinMaxSpec(name, qualifier, ispecs, value):
526 """Computes if value is in the desired range. 527 528 @param name: name of the parameter for which we perform the check 529 @param qualifier: a qualifier used in the error message (e.g. 'disk/1', 530 not just 'disk') 531 @param ispecs: dictionary containing min and max values 532 @param value: actual value that we want to use 533 @return: None or an error string 534 535 """ 536 if value in [None, constants.VALUE_AUTO]: 537 return None 538 max_v = ispecs[constants.ISPECS_MAX].get(name, value) 539 min_v = ispecs[constants.ISPECS_MIN].get(name, value) 540 if value > max_v or min_v > value: 541 if qualifier: 542 fqn = "%s/%s" % (name, qualifier) 543 else: 544 fqn = name 545 return ("%s value %s is not in range [%s, %s]" % 546 (fqn, value, min_v, max_v)) 547 return None
548 549
550 -def ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count, 551 nic_count, disk_sizes, spindle_use, 552 disk_template, 553 _compute_fn=_ComputeMinMaxSpec):
554 """Verifies ipolicy against provided specs. 555 556 @type ipolicy: dict 557 @param ipolicy: The ipolicy 558 @type mem_size: int 559 @param mem_size: The memory size 560 @type cpu_count: int 561 @param cpu_count: Used cpu cores 562 @type disk_count: int 563 @param disk_count: Number of disks used 564 @type nic_count: int 565 @param nic_count: Number of nics used 566 @type disk_sizes: list of ints 567 @param disk_sizes: Disk sizes of used disk (len must match C{disk_count}) 568 @type spindle_use: int 569 @param spindle_use: The number of spindles this instance uses 570 @type disk_template: string 571 @param disk_template: The disk template of the instance 572 @param _compute_fn: The compute function (unittest only) 573 @return: A list of violations, or an empty list of no violations are found 574 575 """ 576 assert disk_count == len(disk_sizes) 577 578 test_settings = [ 579 (constants.ISPEC_MEM_SIZE, "", mem_size), 580 (constants.ISPEC_CPU_COUNT, "", cpu_count), 581 (constants.ISPEC_NIC_COUNT, "", nic_count), 582 (constants.ISPEC_SPINDLE_USE, "", spindle_use), 583 ] + [(constants.ISPEC_DISK_SIZE, str(idx), d) 584 for idx, d in enumerate(disk_sizes)] 585 if disk_template != constants.DT_DISKLESS: 586 # This check doesn't make sense for diskless instances 587 test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count)) 588 ret = [] 589 allowed_dts = ipolicy[constants.IPOLICY_DTS] 590 if disk_template not in allowed_dts: 591 ret.append("Disk template %s is not allowed (allowed templates: %s)" % 592 (disk_template, utils.CommaJoin(allowed_dts))) 593 594 min_errs = None 595 for minmax in ipolicy[constants.ISPECS_MINMAX]: 596 errs = filter(None, 597 (_compute_fn(name, qualifier, minmax, value) 598 for (name, qualifier, value) in test_settings)) 599 if min_errs is None or len(errs) < len(min_errs): 600 min_errs = errs 601 assert min_errs is not None 602 return ret + min_errs
603 604
605 -def ComputeIPolicyDiskSizesViolation(ipolicy, disk_sizes, 606 disk_template, 607 _compute_fn=_ComputeMinMaxSpec):
608 """Verifies ipolicy against provided disk sizes. 609 610 No other specs except the disk sizes, the number of disks and the disk 611 template are checked. 612 613 @type ipolicy: dict 614 @param ipolicy: The ipolicy 615 @type disk_sizes: list of ints 616 @param disk_sizes: Disk sizes of used disk (len must match C{disk_count}) 617 @type disk_template: string 618 @param disk_template: The disk template of the instance 619 @param _compute_fn: The compute function (unittest only) 620 @return: A list of violations, or an empty list of no violations are found 621 622 """ 623 return ComputeIPolicySpecViolation(ipolicy, 624 # mem_size, cpu_count, disk_count 625 None, None, len(disk_sizes), 626 None, disk_sizes, # nic_count, disk_sizes 627 None, # spindle_use 628 disk_template, 629 _compute_fn=_compute_fn)
630 631
632 -def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg, 633 _compute_fn=ComputeIPolicySpecViolation):
634 """Compute if instance meets the specs of ipolicy. 635 636 @type ipolicy: dict 637 @param ipolicy: The ipolicy to verify against 638 @type instance: L{objects.Instance} 639 @param instance: The instance to verify 640 @type cfg: L{config.ConfigWriter} 641 @param cfg: Cluster configuration 642 @param _compute_fn: The function to verify ipolicy (unittest only) 643 @see: L{ComputeIPolicySpecViolation} 644 645 """ 646 ret = [] 647 be_full = cfg.GetClusterInfo().FillBE(instance) 648 mem_size = be_full[constants.BE_MAXMEM] 649 cpu_count = be_full[constants.BE_VCPUS] 650 inst_nodes = cfg.GetInstanceNodes(instance.uuid) 651 es_flags = rpc.GetExclusiveStorageForNodes(cfg, inst_nodes) 652 disks = cfg.GetInstanceDisks(instance.uuid) 653 if any(es_flags.values()): 654 # With exclusive storage use the actual spindles 655 try: 656 spindle_use = sum([disk.spindles for disk in disks]) 657 except TypeError: 658 ret.append("Number of spindles not configured for disks of instance %s" 659 " while exclusive storage is enabled, try running gnt-cluster" 660 " repair-disk-sizes" % instance.name) 661 # _ComputeMinMaxSpec ignores 'None's 662 spindle_use = None 663 else: 664 spindle_use = be_full[constants.BE_SPINDLE_USE] 665 disk_count = len(disks) 666 disk_sizes = [disk.size for disk in disks] 667 nic_count = len(instance.nics) 668 disk_template = instance.disk_template 669 670 return ret + _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count, 671 disk_sizes, spindle_use, disk_template)
672 673
674 -def _ComputeViolatingInstances(ipolicy, instances, cfg):
675 """Computes a set of instances who violates given ipolicy. 676 677 @param ipolicy: The ipolicy to verify 678 @type instances: L{objects.Instance} 679 @param instances: List of instances to verify 680 @type cfg: L{config.ConfigWriter} 681 @param cfg: Cluster configuration 682 @return: A frozenset of instance names violating the ipolicy 683 684 """ 685 return frozenset([inst.name for inst in instances 686 if ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
687 688
689 -def ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
690 """Computes a set of any instances that would violate the new ipolicy. 691 692 @param old_ipolicy: The current (still in-place) ipolicy 693 @param new_ipolicy: The new (to become) ipolicy 694 @param instances: List of instances to verify 695 @type cfg: L{config.ConfigWriter} 696 @param cfg: Cluster configuration 697 @return: A list of instances which violates the new ipolicy but 698 did not before 699 700 """ 701 return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) - 702 _ComputeViolatingInstances(old_ipolicy, instances, cfg))
703 704
705 -def GetUpdatedParams(old_params, update_dict, 706 use_default=True, use_none=False):
707 """Return the new version of a parameter dictionary. 708 709 @type old_params: dict 710 @param old_params: old parameters 711 @type update_dict: dict 712 @param update_dict: dict containing new parameter values, or 713 constants.VALUE_DEFAULT to reset the parameter to its default 714 value 715 @param use_default: boolean 716 @type use_default: whether to recognise L{constants.VALUE_DEFAULT} 717 values as 'to be deleted' values 718 @param use_none: boolean 719 @type use_none: whether to recognise C{None} values as 'to be 720 deleted' values 721 @rtype: dict 722 @return: the new parameter dictionary 723 724 """ 725 params_copy = copy.deepcopy(old_params) 726 for key, val in update_dict.iteritems(): 727 if ((use_default and val == constants.VALUE_DEFAULT) or 728 (use_none and val is None)): 729 try: 730 del params_copy[key] 731 except KeyError: 732 pass 733 else: 734 params_copy[key] = val 735 return params_copy
736 737
738 -def GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
739 """Return the new version of an instance policy. 740 741 @param group_policy: whether this policy applies to a group and thus 742 we should support removal of policy entries 743 744 """ 745 ipolicy = copy.deepcopy(old_ipolicy) 746 for key, value in new_ipolicy.items(): 747 if key not in constants.IPOLICY_ALL_KEYS: 748 raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key, 749 errors.ECODE_INVAL) 750 if (not value or value == [constants.VALUE_DEFAULT] or 751 value == constants.VALUE_DEFAULT): 752 if group_policy: 753 if key in ipolicy: 754 del ipolicy[key] 755 else: 756 raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'" 757 " on the cluster'" % key, 758 errors.ECODE_INVAL) 759 else: 760 if key in constants.IPOLICY_PARAMETERS: 761 # FIXME: we assume all such values are float 762 try: 763 ipolicy[key] = float(value) 764 except (TypeError, ValueError), err: 765 raise errors.OpPrereqError("Invalid value for attribute" 766 " '%s': '%s', error: %s" % 767 (key, value, err), errors.ECODE_INVAL) 768 elif key == constants.ISPECS_MINMAX: 769 for minmax in value: 770 for k in minmax.keys(): 771 utils.ForceDictType(minmax[k], constants.ISPECS_PARAMETER_TYPES) 772 ipolicy[key] = value 773 elif key == constants.ISPECS_STD: 774 if group_policy: 775 msg = "%s cannot appear in group instance specs" % key 776 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 777 ipolicy[key] = GetUpdatedParams(old_ipolicy.get(key, {}), value, 778 use_none=False, use_default=False) 779 utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES) 780 else: 781 # FIXME: we assume all others are lists; this should be redone 782 # in a nicer way 783 ipolicy[key] = list(value) 784 try: 785 objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy) 786 except errors.ConfigurationError, err: 787 raise errors.OpPrereqError("Invalid instance policy: %s" % err, 788 errors.ECODE_INVAL) 789 return ipolicy
790 791
792 -def AnnotateDiskParams(instance, devs, cfg):
793 """Little helper wrapper to the rpc annotation method. 794 795 @param instance: The instance object 796 @type devs: List of L{objects.Disk} 797 @param devs: The root devices (not any of its children!) 798 @param cfg: The config object 799 @returns The annotated disk copies 800 @see L{ganeti.rpc.node.AnnotateDiskParams} 801 802 """ 803 return rpc.AnnotateDiskParams(devs, cfg.GetInstanceDiskParams(instance))
804 805
806 -def SupportsOob(cfg, node):
807 """Tells if node supports OOB. 808 809 @type cfg: L{config.ConfigWriter} 810 @param cfg: The cluster configuration 811 @type node: L{objects.Node} 812 @param node: The node 813 @return: The OOB script if supported or an empty string otherwise 814 815 """ 816 return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
817 818
819 -def _UpdateAndVerifySubDict(base, updates, type_check):
820 """Updates and verifies a dict with sub dicts of the same type. 821 822 @param base: The dict with the old data 823 @param updates: The dict with the new data 824 @param type_check: Dict suitable to ForceDictType to verify correct types 825 @returns: A new dict with updated and verified values 826 827 """ 828 def fn(old, value): 829 new = GetUpdatedParams(old, value) 830 utils.ForceDictType(new, type_check) 831 return new
832 833 ret = copy.deepcopy(base) 834 ret.update(dict((key, fn(base.get(key, {}), value)) 835 for key, value in updates.items())) 836 return ret 837 838
839 -def _FilterVmNodes(lu, node_uuids):
840 """Filters out non-vm_capable nodes from a list. 841 842 @type lu: L{LogicalUnit} 843 @param lu: the logical unit for which we check 844 @type node_uuids: list 845 @param node_uuids: the list of nodes on which we should check 846 @rtype: list 847 @return: the list of vm-capable nodes 848 849 """ 850 vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList()) 851 return [uuid for uuid in node_uuids if uuid not in vm_nodes]
852 853
854 -def GetDefaultIAllocator(cfg, ialloc):
855 """Decides on which iallocator to use. 856 857 @type cfg: L{config.ConfigWriter} 858 @param cfg: Cluster configuration object 859 @type ialloc: string or None 860 @param ialloc: Iallocator specified in opcode 861 @rtype: string 862 @return: Iallocator name 863 864 """ 865 if not ialloc: 866 # Use default iallocator 867 ialloc = cfg.GetDefaultIAllocator() 868 869 if not ialloc: 870 raise errors.OpPrereqError("No iallocator was specified, neither in the" 871 " opcode nor as a cluster-wide default", 872 errors.ECODE_INVAL) 873 874 return ialloc
875 876
877 -def CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_node_uuids, 878 cur_group_uuid):
879 """Checks if node groups for locked instances are still correct. 880 881 @type cfg: L{config.ConfigWriter} 882 @param cfg: Cluster configuration 883 @type instances: dict; string as key, L{objects.Instance} as value 884 @param instances: Dictionary, instance UUID as key, instance object as value 885 @type owned_groups: iterable of string 886 @param owned_groups: List of owned groups 887 @type owned_node_uuids: iterable of string 888 @param owned_node_uuids: List of owned nodes 889 @type cur_group_uuid: string or None 890 @param cur_group_uuid: Optional group UUID to check against instance's groups 891 892 """ 893 for (uuid, inst) in instances.items(): 894 inst_nodes = cfg.GetInstanceNodes(inst.uuid) 895 assert owned_node_uuids.issuperset(inst_nodes), \ 896 "Instance %s's nodes changed while we kept the lock" % inst.name 897 898 inst_groups = CheckInstanceNodeGroups(cfg, uuid, owned_groups) 899 900 assert cur_group_uuid is None or cur_group_uuid in inst_groups, \ 901 "Instance %s has no node in group %s" % (inst.name, cur_group_uuid)
902 903
904 -def CheckInstanceNodeGroups(cfg, inst_uuid, owned_groups, primary_only=False):
905 """Checks if the owned node groups are still correct for an instance. 906 907 @type cfg: L{config.ConfigWriter} 908 @param cfg: The cluster configuration 909 @type inst_uuid: string 910 @param inst_uuid: Instance UUID 911 @type owned_groups: set or frozenset 912 @param owned_groups: List of currently owned node groups 913 @type primary_only: boolean 914 @param primary_only: Whether to check node groups for only the primary node 915 916 """ 917 inst_groups = cfg.GetInstanceNodeGroups(inst_uuid, primary_only) 918 919 if not owned_groups.issuperset(inst_groups): 920 raise errors.OpPrereqError("Instance %s's node groups changed since" 921 " locks were acquired, current groups are" 922 " are '%s', owning groups '%s'; retry the" 923 " operation" % 924 (cfg.GetInstanceName(inst_uuid), 925 utils.CommaJoin(inst_groups), 926 utils.CommaJoin(owned_groups)), 927 errors.ECODE_STATE) 928 929 return inst_groups
930 931
932 -def LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
933 """Unpacks the result of change-group and node-evacuate iallocator requests. 934 935 Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and 936 L{constants.IALLOCATOR_MODE_CHG_GROUP}. 937 938 @type lu: L{LogicalUnit} 939 @param lu: Logical unit instance 940 @type alloc_result: tuple/list 941 @param alloc_result: Result from iallocator 942 @type early_release: bool 943 @param early_release: Whether to release locks early if possible 944 @type use_nodes: bool 945 @param use_nodes: Whether to display node names instead of groups 946 947 """ 948 (moved, failed, jobs) = alloc_result 949 950 if failed: 951 failreason = utils.CommaJoin("%s (%s)" % (name, reason) 952 for (name, reason) in failed) 953 lu.LogWarning("Unable to evacuate instances %s", failreason) 954 raise errors.OpExecError("Unable to evacuate instances %s" % failreason) 955 956 if moved: 957 lu.LogInfo("Instances to be moved: %s", 958 utils.CommaJoin( 959 "%s (to %s)" % 960 (name, _NodeEvacDest(use_nodes, group, node_names)) 961 for (name, group, node_names) in moved)) 962 963 return [map(compat.partial(_SetOpEarlyRelease, early_release), 964 map(opcodes.OpCode.LoadOpCode, ops)) 965 for ops in jobs]
966 967
968 -def _NodeEvacDest(use_nodes, group, node_names):
969 """Returns group or nodes depending on caller's choice. 970 971 """ 972 if use_nodes: 973 return utils.CommaJoin(node_names) 974 else: 975 return group
976 977
978 -def _SetOpEarlyRelease(early_release, op):
979 """Sets C{early_release} flag on opcodes if available. 980 981 """ 982 try: 983 op.early_release = early_release 984 except AttributeError: 985 assert not isinstance(op, opcodes.OpInstanceReplaceDisks) 986 987 return op
988 989
990 -def MapInstanceLvsToNodes(cfg, instances):
991 """Creates a map from (node, volume) to instance name. 992 993 @type cfg: L{config.ConfigWriter} 994 @param cfg: The cluster configuration 995 @type instances: list of L{objects.Instance} 996 @rtype: dict; tuple of (node uuid, volume name) as key, L{objects.Instance} 997 object as value 998 999 """ 1000 return dict( 1001 ((node_uuid, vol), inst) 1002 for inst in instances 1003 for (node_uuid, vols) in cfg.GetInstanceLVsByNode(inst.uuid).items() 1004 for vol in vols)
1005 1006
1007 -def CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels):
1008 """Make sure that none of the given paramters is global. 1009 1010 If a global parameter is found, an L{errors.OpPrereqError} exception is 1011 raised. This is used to avoid setting global parameters for individual nodes. 1012 1013 @type params: dictionary 1014 @param params: Parameters to check 1015 @type glob_pars: dictionary 1016 @param glob_pars: Forbidden parameters 1017 @type kind: string 1018 @param kind: Kind of parameters (e.g. "node") 1019 @type bad_levels: string 1020 @param bad_levels: Level(s) at which the parameters are forbidden (e.g. 1021 "instance") 1022 @type good_levels: strings 1023 @param good_levels: Level(s) at which the parameters are allowed (e.g. 1024 "cluster or group") 1025 1026 """ 1027 used_globals = glob_pars.intersection(params) 1028 if used_globals: 1029 msg = ("The following %s parameters are global and cannot" 1030 " be customized at %s level, please modify them at" 1031 " %s level: %s" % 1032 (kind, bad_levels, good_levels, utils.CommaJoin(used_globals))) 1033 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1034 1035
1036 -def IsExclusiveStorageEnabledNode(cfg, node):
1037 """Whether exclusive_storage is in effect for the given node. 1038 1039 @type cfg: L{config.ConfigWriter} 1040 @param cfg: The cluster configuration 1041 @type node: L{objects.Node} 1042 @param node: The node 1043 @rtype: bool 1044 @return: The effective value of exclusive_storage 1045 1046 """ 1047 return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE]
1048 1049
1050 -def IsInstanceRunning(lu, instance, prereq=True):
1051 """Given an instance object, checks if the instance is running. 1052 1053 This function asks the backend whether the instance is running and 1054 user shutdown instances are considered not to be running. 1055 1056 @type lu: L{LogicalUnit} 1057 @param lu: LU on behalf of which we make the check 1058 1059 @type instance: L{objects.Instance} 1060 @param instance: instance to check whether it is running 1061 1062 @rtype: bool 1063 @return: 'True' if the instance is running, 'False' otherwise 1064 1065 """ 1066 hvparams = lu.cfg.GetClusterInfo().FillHV(instance) 1067 result = lu.rpc.call_instance_info(instance.primary_node, instance.name, 1068 instance.hypervisor, hvparams) 1069 # TODO: This 'prepreq=True' is a problem if this function is called 1070 # within the 'Exec' method of a LU. 1071 result.Raise("Can't retrieve instance information for instance '%s'" % 1072 instance.name, prereq=prereq, ecode=errors.ECODE_ENVIRON) 1073 1074 return result.payload and \ 1075 "state" in result.payload and \ 1076 (result.payload["state"] != hypervisor.hv_base.HvInstanceState.SHUTDOWN)
1077 1078
1079 -def CheckInstanceState(lu, instance, req_states, msg=None):
1080 """Ensure that an instance is in one of the required states. 1081 1082 @param lu: the LU on behalf of which we make the check 1083 @param instance: the instance to check 1084 @param msg: if passed, should be a message to replace the default one 1085 @raise errors.OpPrereqError: if the instance is not in the required state 1086 1087 """ 1088 if msg is None: 1089 msg = ("can't use instance from outside %s states" % 1090 utils.CommaJoin(req_states)) 1091 if instance.admin_state not in req_states: 1092 raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" % 1093 (instance.name, instance.admin_state, msg), 1094 errors.ECODE_STATE) 1095 1096 if constants.ADMINST_UP not in req_states: 1097 pnode_uuid = instance.primary_node 1098 # Replicating the offline check 1099 if not lu.cfg.GetNodeInfo(pnode_uuid).offline: 1100 if IsInstanceRunning(lu, instance): 1101 raise errors.OpPrereqError("Instance %s is running, %s" % 1102 (instance.name, msg), errors.ECODE_STATE) 1103 else: 1104 lu.LogWarning("Primary node offline, ignoring check that instance" 1105 " is down")
1106 1107
1108 -def CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
1109 """Check the sanity of iallocator and node arguments and use the 1110 cluster-wide iallocator if appropriate. 1111 1112 Check that at most one of (iallocator, node) is specified. If none is 1113 specified, or the iallocator is L{constants.DEFAULT_IALLOCATOR_SHORTCUT}, 1114 then the LU's opcode's iallocator slot is filled with the cluster-wide 1115 default iallocator. 1116 1117 @type iallocator_slot: string 1118 @param iallocator_slot: the name of the opcode iallocator slot 1119 @type node_slot: string 1120 @param node_slot: the name of the opcode target node slot 1121 1122 """ 1123 node = getattr(lu.op, node_slot, None) 1124 ialloc = getattr(lu.op, iallocator_slot, None) 1125 if node == []: 1126 node = None 1127 1128 if node is not None and ialloc is not None: 1129 raise errors.OpPrereqError("Do not specify both, iallocator and node", 1130 errors.ECODE_INVAL) 1131 elif ((node is None and ialloc is None) or 1132 ialloc == constants.DEFAULT_IALLOCATOR_SHORTCUT): 1133 default_iallocator = lu.cfg.GetDefaultIAllocator() 1134 if default_iallocator: 1135 setattr(lu.op, iallocator_slot, default_iallocator) 1136 else: 1137 raise errors.OpPrereqError("No iallocator or node given and no" 1138 " cluster-wide default iallocator found;" 1139 " please specify either an iallocator or a" 1140 " node, or set a cluster-wide default" 1141 " iallocator", errors.ECODE_INVAL)
1142 1143
1144 -def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_uuid, prereq):
1145 faulty = [] 1146 1147 disks = cfg.GetInstanceDisks(instance.uuid) 1148 result = rpc_runner.call_blockdev_getmirrorstatus( 1149 node_uuid, (disks, instance)) 1150 result.Raise("Failed to get disk status from node %s" % 1151 cfg.GetNodeName(node_uuid), 1152 prereq=prereq, ecode=errors.ECODE_ENVIRON) 1153 1154 for idx, bdev_status in enumerate(result.payload): 1155 if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY: 1156 faulty.append(idx) 1157 1158 return faulty
1159 1160
1161 -def CheckNodeOnline(lu, node_uuid, msg=None):
1162 """Ensure that a given node is online. 1163 1164 @param lu: the LU on behalf of which we make the check 1165 @param node_uuid: the node to check 1166 @param msg: if passed, should be a message to replace the default one 1167 @raise errors.OpPrereqError: if the node is offline 1168 1169 """ 1170 if msg is None: 1171 msg = "Can't use offline node" 1172 if lu.cfg.GetNodeInfo(node_uuid).offline: 1173 raise errors.OpPrereqError("%s: %s" % (msg, lu.cfg.GetNodeName(node_uuid)), 1174 errors.ECODE_STATE)
1175 1176
1177 -def CheckDiskTemplateEnabled(cluster, disk_template):
1178 """Helper function to check if a disk template is enabled. 1179 1180 @type cluster: C{objects.Cluster} 1181 @param cluster: the cluster's configuration 1182 @type disk_template: str 1183 @param disk_template: the disk template to be checked 1184 1185 """ 1186 assert disk_template is not None 1187 if disk_template not in constants.DISK_TEMPLATES: 1188 raise errors.OpPrereqError("'%s' is not a valid disk template." 1189 " Valid disk templates are: %s" % 1190 (disk_template, 1191 ",".join(constants.DISK_TEMPLATES))) 1192 if not disk_template in cluster.enabled_disk_templates: 1193 raise errors.OpPrereqError("Disk template '%s' is not enabled in cluster." 1194 " Enabled disk templates are: %s" % 1195 (disk_template, 1196 ",".join(cluster.enabled_disk_templates)))
1197 1198
1199 -def CheckStorageTypeEnabled(cluster, storage_type):
1200 """Helper function to check if a storage type is enabled. 1201 1202 @type cluster: C{objects.Cluster} 1203 @param cluster: the cluster's configuration 1204 @type storage_type: str 1205 @param storage_type: the storage type to be checked 1206 1207 """ 1208 assert storage_type is not None 1209 assert storage_type in constants.STORAGE_TYPES 1210 # special case for lvm-pv, because it cannot be enabled 1211 # via disk templates 1212 if storage_type == constants.ST_LVM_PV: 1213 CheckStorageTypeEnabled(cluster, constants.ST_LVM_VG) 1214 else: 1215 possible_disk_templates = \ 1216 utils.storage.GetDiskTemplatesOfStorageTypes(storage_type) 1217 for disk_template in possible_disk_templates: 1218 if disk_template in cluster.enabled_disk_templates: 1219 return 1220 raise errors.OpPrereqError("No disk template of storage type '%s' is" 1221 " enabled in this cluster. Enabled disk" 1222 " templates are: %s" % (storage_type, 1223 ",".join(cluster.enabled_disk_templates)))
1224 1225
1226 -def CheckIpolicyVsDiskTemplates(ipolicy, enabled_disk_templates):
1227 """Checks ipolicy disk templates against enabled disk tempaltes. 1228 1229 @type ipolicy: dict 1230 @param ipolicy: the new ipolicy 1231 @type enabled_disk_templates: list of string 1232 @param enabled_disk_templates: list of enabled disk templates on the 1233 cluster 1234 @raises errors.OpPrereqError: if there is at least one allowed disk 1235 template that is not also enabled. 1236 1237 """ 1238 assert constants.IPOLICY_DTS in ipolicy 1239 allowed_disk_templates = ipolicy[constants.IPOLICY_DTS] 1240 not_enabled = set(allowed_disk_templates) - set(enabled_disk_templates) 1241 if not_enabled: 1242 raise errors.OpPrereqError("The following disk templates are allowed" 1243 " by the ipolicy, but not enabled on the" 1244 " cluster: %s" % utils.CommaJoin(not_enabled), 1245 errors.ECODE_INVAL)
1246 1247
1248 -def CheckDiskAccessModeValidity(parameters):
1249 """Checks if the access parameter is legal. 1250 1251 @see: L{CheckDiskAccessModeConsistency} for cluster consistency checks. 1252 @raise errors.OpPrereqError: if the check fails. 1253 1254 """ 1255 for disk_template in parameters: 1256 access = parameters[disk_template].get(constants.LDP_ACCESS, 1257 constants.DISK_KERNELSPACE) 1258 if access not in constants.DISK_VALID_ACCESS_MODES: 1259 valid_vals_str = utils.CommaJoin(constants.DISK_VALID_ACCESS_MODES) 1260 raise errors.OpPrereqError("Invalid value of '{d}:{a}': '{v}' (expected" 1261 " one of {o})".format(d=disk_template, 1262 a=constants.LDP_ACCESS, 1263 v=access, 1264 o=valid_vals_str))
1265 1266
1267 -def CheckDiskAccessModeConsistency(parameters, cfg, group=None):
1268 """Checks if the access param is consistent with the cluster configuration. 1269 1270 @note: requires a configuration lock to run. 1271 @param parameters: the parameters to validate 1272 @param cfg: the cfg object of the cluster 1273 @param group: if set, only check for consistency within this group. 1274 @raise errors.OpPrereqError: if the LU attempts to change the access parameter 1275 to an invalid value, such as "pink bunny". 1276 @raise errors.OpPrereqError: if the LU attempts to change the access parameter 1277 to an inconsistent value, such as asking for RBD 1278 userspace access to the chroot hypervisor. 1279 1280 """ 1281 CheckDiskAccessModeValidity(parameters) 1282 1283 for disk_template in parameters: 1284 access = parameters[disk_template].get(constants.LDP_ACCESS, 1285 constants.DISK_KERNELSPACE) 1286 1287 if disk_template not in constants.DTS_HAVE_ACCESS: 1288 continue 1289 1290 #Check the combination of instance hypervisor, disk template and access 1291 #protocol is sane. 1292 inst_uuids = cfg.GetNodeGroupInstances(group) if group else \ 1293 cfg.GetInstanceList() 1294 1295 for entry in inst_uuids: 1296 inst = cfg.GetInstanceInfo(entry) 1297 inst_template = inst.disk_template 1298 1299 if inst_template != disk_template: 1300 continue 1301 1302 hv = inst.hypervisor 1303 1304 if not IsValidDiskAccessModeCombination(hv, inst_template, access): 1305 raise errors.OpPrereqError("Instance {i}: cannot use '{a}' access" 1306 " setting with {h} hypervisor and {d} disk" 1307 " type.".format(i=inst.name, 1308 a=access, 1309 h=hv, 1310 d=inst_template))
1311 1312
1313 -def IsValidDiskAccessModeCombination(hv, disk_template, mode):
1314 """Checks if an hypervisor can read a disk template with given mode. 1315 1316 @param hv: the hypervisor that will access the data 1317 @param disk_template: the disk template the data is stored as 1318 @param mode: how the hypervisor should access the data 1319 @return: True if the hypervisor can read a given read disk_template 1320 in the specified mode. 1321 1322 """ 1323 if mode == constants.DISK_KERNELSPACE: 1324 return True 1325 1326 if (hv == constants.HT_KVM and 1327 disk_template in constants.DTS_HAVE_ACCESS and 1328 mode == constants.DISK_USERSPACE): 1329 return True 1330 1331 # Everything else: 1332 return False
1333 1334
1335 -def AddNodeCertToCandidateCerts(lu, cfg, node_uuid):
1336 """Add the node's client SSL certificate digest to the candidate certs. 1337 1338 @type lu: L{LogicalUnit} 1339 @param lu: the logical unit 1340 @type cfg: L{ConfigWriter} 1341 @param cfg: the configuration client to use 1342 @type node_uuid: string 1343 @param node_uuid: the node's UUID 1344 1345 """ 1346 result = lu.rpc.call_node_crypto_tokens( 1347 node_uuid, 1348 [(constants.CRYPTO_TYPE_SSL_DIGEST, constants.CRYPTO_ACTION_GET, 1349 None)]) 1350 result.Raise("Could not retrieve the node's (uuid %s) SSL digest." 1351 % node_uuid) 1352 ((crypto_type, digest), ) = result.payload 1353 assert crypto_type == constants.CRYPTO_TYPE_SSL_DIGEST 1354 1355 cfg.AddNodeToCandidateCerts(node_uuid, digest)
1356 1357
1358 -def RemoveNodeCertFromCandidateCerts(cfg, node_uuid):
1359 """Removes the node's certificate from the candidate certificates list. 1360 1361 @type cfg: C{config.ConfigWriter} 1362 @param cfg: the cluster's configuration 1363 @type node_uuid: string 1364 @param node_uuid: the node's UUID 1365 1366 """ 1367 cfg.RemoveNodeFromCandidateCerts(node_uuid)
1368 1369
1370 -def GetClientCertDigest(lu, node_uuid, filename=None):
1371 """Get the client SSL certificate digest for the node. 1372 1373 @type node_uuid: string 1374 @param node_uuid: the node's UUID 1375 @type filename: string 1376 @param filename: the certificate's filename 1377 @rtype: string 1378 @return: the digest of the newly created certificate 1379 1380 """ 1381 options = {} 1382 if filename: 1383 options[constants.CRYPTO_OPTION_CERT_FILE] = filename 1384 result = lu.rpc.call_node_crypto_tokens( 1385 node_uuid, 1386 [(constants.CRYPTO_TYPE_SSL_DIGEST, 1387 constants.CRYPTO_ACTION_GET, 1388 options)]) 1389 result.Raise("Could not fetch the node's (uuid %s) SSL client" 1390 " certificate." % node_uuid) 1391 ((crypto_type, new_digest), ) = result.payload 1392 assert crypto_type == constants.CRYPTO_TYPE_SSL_DIGEST 1393 return new_digest
1394 1395
1396 -def AddInstanceCommunicationNetworkOp(network):
1397 """Create an OpCode that adds the instance communication network. 1398 1399 This OpCode contains the configuration necessary for the instance 1400 communication network. 1401 1402 @type network: string 1403 @param network: name or UUID of the instance communication network 1404 1405 @rtype: L{ganeti.opcodes.OpCode} 1406 @return: OpCode that creates the instance communication network 1407 1408 """ 1409 return opcodes.OpNetworkAdd( 1410 network_name=network, 1411 gateway=None, 1412 network=constants.INSTANCE_COMMUNICATION_NETWORK4, 1413 gateway6=None, 1414 network6=constants.INSTANCE_COMMUNICATION_NETWORK6, 1415 mac_prefix=constants.INSTANCE_COMMUNICATION_MAC_PREFIX, 1416 add_reserved_ips=None, 1417 conflicts_check=True, 1418 tags=[])
1419 1420
1421 -def ConnectInstanceCommunicationNetworkOp(group_uuid, network):
1422 """Create an OpCode that connects a group to the instance 1423 communication network. 1424 1425 This OpCode contains the configuration necessary for the instance 1426 communication network. 1427 1428 @type group_uuid: string 1429 @param group_uuid: UUID of the group to connect 1430 1431 @type network: string 1432 @param network: name or UUID of the network to connect to, i.e., the 1433 instance communication network 1434 1435 @rtype: L{ganeti.opcodes.OpCode} 1436 @return: OpCode that connects the group to the instance 1437 communication network 1438 1439 """ 1440 return opcodes.OpNetworkConnect( 1441 group_name=group_uuid, 1442 network_name=network, 1443 network_mode=constants.INSTANCE_COMMUNICATION_NETWORK_MODE, 1444 network_link=constants.INSTANCE_COMMUNICATION_NETWORK_LINK, 1445 conflicts_check=True)
1446 1447
1448 -def DetermineImageSize(lu, image, node_uuid):
1449 """Determines the size of the specified image. 1450 1451 @type image: string 1452 @param image: absolute filepath or URL of the image 1453 1454 @type node_uuid: string 1455 @param node_uuid: if L{image} is a filepath, this is the UUID of the 1456 node where the image is located 1457 1458 @rtype: int 1459 @return: size of the image in MB, rounded up 1460 @raise OpExecError: if the image does not exist 1461 1462 """ 1463 # Check if we are dealing with a URL first 1464 class _HeadRequest(urllib2.Request): 1465 def get_method(self): 1466 return "HEAD"
1467 1468 if utils.IsUrl(image): 1469 try: 1470 response = urllib2.urlopen(_HeadRequest(image)) 1471 except urllib2.URLError: 1472 raise errors.OpExecError("Could not retrieve image from given url '%s'" % 1473 image) 1474 1475 content_length_str = response.info().getheader('content-length') 1476 1477 if not content_length_str: 1478 raise errors.OpExecError("Could not determine image size from given url" 1479 " '%s'" % image) 1480 1481 byte_size = int(content_length_str) 1482 else: 1483 # We end up here if a file path is used 1484 result = lu.rpc.call_get_file_info(node_uuid, image) 1485 result.Raise("Could not determine size of file '%s'" % image) 1486 1487 success, attributes = result.payload 1488 if not success: 1489 raise errors.OpExecError("Could not open file '%s'" % image) 1490 byte_size = attributes[constants.STAT_SIZE] 1491 1492 # Finally, the conversion 1493 return math.ceil(byte_size / 1024. / 1024.) 1494 1495
1496 -def EnsureKvmdOnNodes(lu, feedback_fn, nodes=None):
1497 """Ensure KVM daemon is running on nodes with KVM instances. 1498 1499 If user shutdown is enabled in the cluster: 1500 - The KVM daemon will be started on VM capable nodes containing 1501 KVM instances. 1502 - The KVM daemon will be stopped on non VM capable nodes. 1503 1504 If user shutdown is disabled in the cluster: 1505 - The KVM daemon will be stopped on all nodes 1506 1507 Issues a warning for each failed RPC call. 1508 1509 @type lu: L{LogicalUnit} 1510 @param lu: logical unit on whose behalf we execute 1511 1512 @type feedback_fn: callable 1513 @param feedback_fn: feedback function 1514 1515 @type nodes: list of string 1516 @param nodes: if supplied, it overrides the node uuids to start/stop; 1517 this is used mainly for optimization 1518 1519 """ 1520 cluster = lu.cfg.GetClusterInfo() 1521 1522 # Either use the passed nodes or consider all cluster nodes 1523 if nodes is not None: 1524 node_uuids = set(nodes) 1525 else: 1526 node_uuids = lu.cfg.GetNodeList() 1527 1528 # Determine in which nodes should the KVM daemon be started/stopped 1529 if constants.HT_KVM in cluster.enabled_hypervisors and \ 1530 cluster.enabled_user_shutdown: 1531 start_nodes = [] 1532 stop_nodes = [] 1533 1534 for node_uuid in node_uuids: 1535 if lu.cfg.GetNodeInfo(node_uuid).vm_capable: 1536 start_nodes.append(node_uuid) 1537 else: 1538 stop_nodes.append(node_uuid) 1539 else: 1540 start_nodes = [] 1541 stop_nodes = node_uuids 1542 1543 # Start KVM where necessary 1544 if start_nodes: 1545 results = lu.rpc.call_node_ensure_daemon(start_nodes, constants.KVMD, True) 1546 for node_uuid in start_nodes: 1547 results[node_uuid].Warn("Failed to start KVM daemon in node '%s'" % 1548 node_uuid, feedback_fn) 1549 1550 # Stop KVM where necessary 1551 if stop_nodes: 1552 results = lu.rpc.call_node_ensure_daemon(stop_nodes, constants.KVMD, False) 1553 for node_uuid in stop_nodes: 1554 results[node_uuid].Warn("Failed to stop KVM daemon in node '%s'" % 1555 node_uuid, feedback_fn)
1556 1557
1558 -def WarnAboutFailedSshUpdates(result, master_uuid, feedback_fn):
1559 node_errors = result[master_uuid].payload 1560 if node_errors: 1561 feedback_fn("Some nodes' SSH key files could not be updated:") 1562 for node_name, error_msg in node_errors: 1563 feedback_fn("%s: %s" % (node_name, error_msg))
1564