Package ganeti :: Module objects
[hide private]
[frames] | no frames]

Source Code for Module ganeti.objects

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Transportable objects for Ganeti. 
  32   
  33  This module provides small, mostly data-only objects which are safe to 
  34  pass to and from external parties. 
  35   
  36  """ 
  37   
  38  # pylint: disable=E0203,W0201,R0902 
  39   
  40  # E0203: Access to member %r before its definition, since we use 
  41  # objects.py which doesn't explicitly initialise its members 
  42   
  43  # W0201: Attribute '%s' defined outside __init__ 
  44   
  45  # R0902: Allow instances of these objects to have more than 20 attributes 
  46   
  47  import ConfigParser 
  48  import re 
  49  import copy 
  50  import logging 
  51  import time 
  52  from cStringIO import StringIO 
  53   
  54  from ganeti import errors 
  55  from ganeti import constants 
  56  from ganeti import netutils 
  57  from ganeti import outils 
  58  from ganeti import utils 
  59  from ganeti import serializer 
  60   
  61  from socket import AF_INET 
  62   
  63   
  64  __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance", 
  65             "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network", 
  66             "Filter"] 
  67   
  68  _TIMESTAMPS = ["ctime", "mtime"] 
  69  _UUID = ["uuid"] 
70 71 72 -def FillDict(defaults_dict, custom_dict, skip_keys=None):
73 """Basic function to apply settings on top a default dict. 74 75 @type defaults_dict: dict 76 @param defaults_dict: dictionary holding the default values 77 @type custom_dict: dict 78 @param custom_dict: dictionary holding customized value 79 @type skip_keys: list 80 @param skip_keys: which keys not to fill 81 @rtype: dict 82 @return: dict with the 'full' values 83 84 """ 85 ret_dict = copy.deepcopy(defaults_dict) 86 ret_dict.update(custom_dict) 87 if skip_keys: 88 for k in skip_keys: 89 if k in ret_dict: 90 del ret_dict[k] 91 return ret_dict
92
93 94 -def FillIPolicy(default_ipolicy, custom_ipolicy):
95 """Fills an instance policy with defaults. 96 97 """ 98 assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS 99 ret_dict = copy.deepcopy(custom_ipolicy) 100 for key in default_ipolicy: 101 if key not in ret_dict: 102 ret_dict[key] = copy.deepcopy(default_ipolicy[key]) 103 elif key == constants.ISPECS_STD: 104 ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key]) 105 return ret_dict
106
107 108 -def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
109 """Fills the disk parameter defaults. 110 111 @see: L{FillDict} for parameters and return value 112 113 """ 114 return dict((dt, FillDict(default_dparams.get(dt, {}), 115 custom_dparams.get(dt, {}), 116 skip_keys=skip_keys)) 117 for dt in constants.DISK_TEMPLATES)
118
119 120 -def UpgradeGroupedParams(target, defaults):
121 """Update all groups for the target parameter. 122 123 @type target: dict of dicts 124 @param target: {group: {parameter: value}} 125 @type defaults: dict 126 @param defaults: default parameter values 127 128 """ 129 if target is None: 130 target = {constants.PP_DEFAULT: defaults} 131 else: 132 for group in target: 133 target[group] = FillDict(defaults, target[group]) 134 return target
135
136 137 -def UpgradeBeParams(target):
138 """Update the be parameters dict to the new format. 139 140 @type target: dict 141 @param target: "be" parameters dict 142 143 """ 144 if constants.BE_MEMORY in target: 145 memory = target[constants.BE_MEMORY] 146 target[constants.BE_MAXMEM] = memory 147 target[constants.BE_MINMEM] = memory 148 del target[constants.BE_MEMORY]
149
150 151 -def UpgradeDiskParams(diskparams):
152 """Upgrade the disk parameters. 153 154 @type diskparams: dict 155 @param diskparams: disk parameters to upgrade 156 @rtype: dict 157 @return: the upgraded disk parameters dict 158 159 """ 160 if not diskparams: 161 result = {} 162 else: 163 result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams) 164 165 return result
166
167 168 -def UpgradeNDParams(ndparams):
169 """Upgrade ndparams structure. 170 171 @type ndparams: dict 172 @param ndparams: disk parameters to upgrade 173 @rtype: dict 174 @return: the upgraded node parameters dict 175 176 """ 177 if ndparams is None: 178 ndparams = {} 179 180 if (constants.ND_OOB_PROGRAM in ndparams and 181 ndparams[constants.ND_OOB_PROGRAM] is None): 182 # will be reset by the line below 183 del ndparams[constants.ND_OOB_PROGRAM] 184 return FillDict(constants.NDC_DEFAULTS, ndparams)
185
186 187 -def MakeEmptyIPolicy():
188 """Create empty IPolicy dictionary. 189 190 """ 191 return {}
192
193 194 -class ConfigObject(outils.ValidatedSlots):
195 """A generic config object. 196 197 It has the following properties: 198 199 - provides somewhat safe recursive unpickling and pickling for its classes 200 - unset attributes which are defined in slots are always returned 201 as None instead of raising an error 202 203 Classes derived from this must always declare __slots__ (we use many 204 config objects and the memory reduction is useful) 205 206 """ 207 __slots__ = [] 208
209 - def __getattr__(self, name):
210 if name not in self.GetAllSlots(): 211 raise AttributeError("Invalid object attribute %s.%s" % 212 (type(self).__name__, name)) 213 return None
214
215 - def __setstate__(self, state):
216 slots = self.GetAllSlots() 217 for name in state: 218 if name in slots: 219 setattr(self, name, state[name])
220
221 - def Validate(self):
222 """Validates the slots. 223 224 This method returns L{None} if the validation succeeds, or raises 225 an exception otherwise. 226 227 This method must be implemented by the child classes. 228 229 @rtype: NoneType 230 @return: L{None}, if the validation succeeds 231 232 @raise Exception: validation fails 233 234 """
235
236 - def ToDict(self, _with_private=False):
237 """Convert to a dict holding only standard python types. 238 239 The generic routine just dumps all of this object's attributes in 240 a dict. It does not work if the class has children who are 241 ConfigObjects themselves (e.g. the nics list in an Instance), in 242 which case the object should subclass the function in order to 243 make sure all objects returned are only standard python types. 244 245 Private fields can be included or not with the _with_private switch. 246 The actual implementation of this switch is left for those subclassses 247 with private fields to implement. 248 249 @type _with_private: bool 250 @param _with_private: if True, the object will leak its private fields in 251 the dictionary representation. If False, the values 252 will be replaced with None. 253 254 """ 255 result = {} 256 for name in self.GetAllSlots(): 257 value = getattr(self, name, None) 258 if value is not None: 259 result[name] = value 260 return result
261 262 __getstate__ = ToDict 263 264 @classmethod
265 - def FromDict(cls, val):
266 """Create an object from a dictionary. 267 268 This generic routine takes a dict, instantiates a new instance of 269 the given class, and sets attributes based on the dict content. 270 271 As for `ToDict`, this does not work if the class has children 272 who are ConfigObjects themselves (e.g. the nics list in an 273 Instance), in which case the object should subclass the function 274 and alter the objects. 275 276 """ 277 if not isinstance(val, dict): 278 raise errors.ConfigurationError("Invalid object passed to FromDict:" 279 " expected dict, got %s" % type(val)) 280 val_str = dict([(str(k), v) for k, v in val.iteritems()]) 281 obj = cls(**val_str) # pylint: disable=W0142 282 return obj
283
284 - def Copy(self):
285 """Makes a deep copy of the current object and its children. 286 287 """ 288 dict_form = self.ToDict() 289 clone_obj = self.__class__.FromDict(dict_form) 290 return clone_obj
291
292 - def __repr__(self):
293 """Implement __repr__ for ConfigObjects.""" 294 return repr(self.ToDict())
295
296 - def __eq__(self, other):
297 """Implement __eq__ for ConfigObjects.""" 298 return isinstance(other, self.__class__) and self.ToDict() == other.ToDict()
299
300 - def UpgradeConfig(self):
301 """Fill defaults for missing configuration values. 302 303 This method will be called at configuration load time, and its 304 implementation will be object dependent. 305 306 """ 307 pass
308
309 310 -class TaggableObject(ConfigObject):
311 """An generic class supporting tags. 312 313 """ 314 __slots__ = ["tags"] 315 VALID_TAG_RE = re.compile(r"^[\w.+*/:@-]+$") 316 317 @classmethod
318 - def ValidateTag(cls, tag):
319 """Check if a tag is valid. 320 321 If the tag is invalid, an errors.TagError will be raised. The 322 function has no return value. 323 324 """ 325 if not isinstance(tag, basestring): 326 raise errors.TagError("Invalid tag type (not a string)") 327 if len(tag) > constants.MAX_TAG_LEN: 328 raise errors.TagError("Tag too long (>%d characters)" % 329 constants.MAX_TAG_LEN) 330 if not tag: 331 raise errors.TagError("Tags cannot be empty") 332 if not cls.VALID_TAG_RE.match(tag): 333 raise errors.TagError("Tag contains invalid characters")
334
335 - def GetTags(self):
336 """Return the tags list. 337 338 """ 339 tags = getattr(self, "tags", None) 340 if tags is None: 341 tags = self.tags = set() 342 return tags
343
344 - def AddTag(self, tag):
345 """Add a new tag. 346 347 """ 348 self.ValidateTag(tag) 349 tags = self.GetTags() 350 if len(tags) >= constants.MAX_TAGS_PER_OBJ: 351 raise errors.TagError("Too many tags") 352 self.GetTags().add(tag)
353
354 - def RemoveTag(self, tag):
355 """Remove a tag. 356 357 """ 358 self.ValidateTag(tag) 359 tags = self.GetTags() 360 try: 361 tags.remove(tag) 362 except KeyError: 363 raise errors.TagError("Tag not found")
364
365 - def ToDict(self, _with_private=False):
366 """Taggable-object-specific conversion to standard python types. 367 368 This replaces the tags set with a list. 369 370 """ 371 bo = super(TaggableObject, self).ToDict(_with_private=_with_private) 372 373 tags = bo.get("tags", None) 374 if isinstance(tags, set): 375 bo["tags"] = list(tags) 376 return bo
377 378 @classmethod
379 - def FromDict(cls, val):
380 """Custom function for instances. 381 382 """ 383 obj = super(TaggableObject, cls).FromDict(val) 384 if hasattr(obj, "tags") and isinstance(obj.tags, list): 385 obj.tags = set(obj.tags) 386 return obj
387
388 389 -class MasterNetworkParameters(ConfigObject):
390 """Network configuration parameters for the master 391 392 @ivar uuid: master nodes UUID 393 @ivar ip: master IP 394 @ivar netmask: master netmask 395 @ivar netdev: master network device 396 @ivar ip_family: master IP family 397 398 """ 399 __slots__ = [ 400 "uuid", 401 "ip", 402 "netmask", 403 "netdev", 404 "ip_family", 405 ]
406
407 408 -class ConfigData(ConfigObject):
409 """Top-level config object.""" 410 __slots__ = [ 411 "version", 412 "cluster", 413 "nodes", 414 "nodegroups", 415 "instances", 416 "networks", 417 "disks", 418 "filters", 419 "serial_no", 420 ] + _TIMESTAMPS 421
422 - def ToDict(self, _with_private=False):
423 """Custom function for top-level config data. 424 425 This just replaces the list of nodes, instances, nodegroups, 426 networks, disks and the cluster with standard python types. 427 428 """ 429 mydict = super(ConfigData, self).ToDict(_with_private=_with_private) 430 mydict["cluster"] = mydict["cluster"].ToDict() 431 for key in ("nodes", "instances", "nodegroups", "networks", "disks", 432 "filters"): 433 mydict[key] = outils.ContainerToDicts(mydict[key]) 434 435 return mydict
436 437 @classmethod
438 - def FromDict(cls, val):
439 """Custom function for top-level config data 440 441 """ 442 obj = super(ConfigData, cls).FromDict(val) 443 obj.cluster = Cluster.FromDict(obj.cluster) 444 obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node) 445 obj.instances = \ 446 outils.ContainerFromDicts(obj.instances, dict, Instance) 447 obj.nodegroups = \ 448 outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup) 449 obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network) 450 obj.disks = outils.ContainerFromDicts(obj.disks, dict, Disk) 451 obj.filters = outils.ContainerFromDicts(obj.filters, dict, Filter) 452 return obj
453
454 - def DisksOfType(self, dev_type):
455 """Check if in there is at disk of the given type in the configuration. 456 457 @type dev_type: L{constants.DTS_BLOCK} 458 @param dev_type: the type to look for 459 @rtype: list of disks 460 @return: all disks of the dev_type 461 462 """ 463 464 return [disk for disk in self.disks.values() 465 if disk.IsBasedOnDiskType(dev_type)]
466
467 - def UpgradeConfig(self):
468 """Fill defaults for missing configuration values. 469 470 """ 471 self.cluster.UpgradeConfig() 472 for node in self.nodes.values(): 473 node.UpgradeConfig() 474 for instance in self.instances.values(): 475 instance.UpgradeConfig() 476 self._UpgradeEnabledDiskTemplates() 477 if self.nodegroups is None: 478 self.nodegroups = {} 479 for nodegroup in self.nodegroups.values(): 480 nodegroup.UpgradeConfig() 481 InstancePolicy.UpgradeDiskTemplates( 482 nodegroup.ipolicy, self.cluster.enabled_disk_templates) 483 if self.cluster.drbd_usermode_helper is None: 484 if self.cluster.IsDiskTemplateEnabled(constants.DT_DRBD8): 485 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER 486 if self.networks is None: 487 self.networks = {} 488 for network in self.networks.values(): 489 network.UpgradeConfig() 490 for disk in self.disks.values(): 491 disk.UpgradeConfig() 492 if self.filters is None: 493 self.filters = {}
494
496 """Upgrade the cluster's enabled disk templates by inspecting the currently 497 enabled and/or used disk templates. 498 499 """ 500 if not self.cluster.enabled_disk_templates: 501 template_set = \ 502 set([d.dev_type for d in self.disks.values()]) 503 if any(not inst.disks for inst in self.instances.values()): 504 template_set.add(constants.DT_DISKLESS) 505 # Add drbd and plain, if lvm is enabled (by specifying a volume group) 506 if self.cluster.volume_group_name: 507 template_set.add(constants.DT_DRBD8) 508 template_set.add(constants.DT_PLAIN) 509 # Set enabled_disk_templates to the inferred disk templates. Order them 510 # according to a preference list that is based on Ganeti's history of 511 # supported disk templates. 512 self.cluster.enabled_disk_templates = [] 513 for preferred_template in constants.DISK_TEMPLATE_PREFERENCE: 514 if preferred_template in template_set: 515 self.cluster.enabled_disk_templates.append(preferred_template) 516 template_set.remove(preferred_template) 517 self.cluster.enabled_disk_templates.extend(list(template_set)) 518 InstancePolicy.UpgradeDiskTemplates( 519 self.cluster.ipolicy, self.cluster.enabled_disk_templates)
520
521 522 -class NIC(ConfigObject):
523 """Config object representing a network card.""" 524 __slots__ = ["name", "mac", "ip", "network", 525 "nicparams", "netinfo", "pci"] + _UUID 526 527 @classmethod
528 - def CheckParameterSyntax(cls, nicparams):
529 """Check the given parameters for validity. 530 531 @type nicparams: dict 532 @param nicparams: dictionary with parameter names/value 533 @raise errors.ConfigurationError: when a parameter is not valid 534 535 """ 536 mode = nicparams[constants.NIC_MODE] 537 if (mode not in constants.NIC_VALID_MODES and 538 mode != constants.VALUE_AUTO): 539 raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode) 540 541 if (mode == constants.NIC_MODE_BRIDGED and 542 not nicparams[constants.NIC_LINK]): 543 raise errors.ConfigurationError("Missing bridged NIC link")
544
545 546 -class Filter(ConfigObject):
547 """Config object representing a filter rule.""" 548 __slots__ = ["watermark", "priority", 549 "predicates", "action", "reason_trail"] + _UUID
550
551 552 -class Disk(ConfigObject):
553 """Config object representing a block device.""" 554 __slots__ = [ 555 "forthcoming", 556 "name", 557 "dev_type", 558 "logical_id", 559 "children", 560 "nodes", 561 "iv_name", 562 "size", 563 "mode", 564 "params", 565 "spindles", 566 "pci", 567 "serial_no", 568 # dynamic_params is special. It depends on the node this instance 569 # is sent to, and should not be persisted. 570 "dynamic_params" 571 ] + _UUID + _TIMESTAMPS 572
573 - def _ComputeAllNodes(self):
574 """Compute the list of all nodes covered by a device and its children.""" 575 def _Helper(nodes, device): 576 """Recursively compute nodes given a top device.""" 577 if device.dev_type in constants.DTS_DRBD: 578 nodes.extend(device.logical_id[:2]) 579 if device.children: 580 for child in device.children: 581 _Helper(nodes, child)
582 583 all_nodes = list() 584 _Helper(all_nodes, self) 585 return tuple(set(all_nodes))
586 587 all_nodes = property(_ComputeAllNodes, None, None, 588 "List of names of all the nodes of a disk") 589
590 - def CreateOnSecondary(self):
591 """Test if this device needs to be created on a secondary node.""" 592 return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
593
594 - def AssembleOnSecondary(self):
595 """Test if this device needs to be assembled on a secondary node.""" 596 return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
597
598 - def OpenOnSecondary(self):
599 """Test if this device needs to be opened on a secondary node.""" 600 return self.dev_type in (constants.DT_PLAIN,)
601
602 - def SupportsSnapshots(self):
603 """Test if this device supports snapshots.""" 604 return self.dev_type in constants.DTS_SNAPSHOT_CAPABLE
605
606 - def StaticDevPath(self):
607 """Return the device path if this device type has a static one. 608 609 Some devices (LVM for example) live always at the same /dev/ path, 610 irrespective of their status. For such devices, we return this 611 path, for others we return None. 612 613 @warning: The path returned is not a normalized pathname; callers 614 should check that it is a valid path. 615 616 """ 617 if self.dev_type == constants.DT_PLAIN: 618 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1]) 619 elif self.dev_type == constants.DT_BLOCK: 620 return self.logical_id[1] 621 elif self.dev_type == constants.DT_RBD: 622 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1]) 623 return None
624
625 - def ChildrenNeeded(self):
626 """Compute the needed number of children for activation. 627 628 This method will return either -1 (all children) or a positive 629 number denoting the minimum number of children needed for 630 activation (only mirrored devices will usually return >=0). 631 632 Currently, only DRBD8 supports diskless activation (therefore we 633 return 0), for all other we keep the previous semantics and return 634 -1. 635 636 """ 637 if self.dev_type == constants.DT_DRBD8: 638 return 0 639 return -1
640
641 - def IsBasedOnDiskType(self, dev_type):
642 """Check if the disk or its children are based on the given type. 643 644 @type dev_type: L{constants.DTS_BLOCK} 645 @param dev_type: the type to look for 646 @rtype: boolean 647 @return: boolean indicating if a device of the given type was found or not 648 649 """ 650 if self.children: 651 for child in self.children: 652 if child.IsBasedOnDiskType(dev_type): 653 return True 654 return self.dev_type == dev_type
655
656 - def GetNodes(self, node_uuid):
657 """This function returns the nodes this device lives on. 658 659 Given the node on which the parent of the device lives on (or, in 660 case of a top-level device, the primary node of the devices' 661 instance), this function will return a list of nodes on which this 662 devices needs to (or can) be assembled. 663 664 """ 665 if self.dev_type in [constants.DT_PLAIN, constants.DT_FILE, 666 constants.DT_BLOCK, constants.DT_RBD, 667 constants.DT_EXT, constants.DT_SHARED_FILE, 668 constants.DT_GLUSTER]: 669 result = [node_uuid] 670 elif self.dev_type in constants.DTS_DRBD: 671 result = [self.logical_id[0], self.logical_id[1]] 672 if node_uuid not in result: 673 raise errors.ConfigurationError("DRBD device passed unknown node") 674 else: 675 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type) 676 return result
677
678 - def GetPrimaryNode(self, node_uuid):
679 """This function returns the primary node of the device. 680 681 If the device is not a DRBD device, we still return the node the device 682 lives on. 683 684 """ 685 if self.dev_type in constants.DTS_DRBD: 686 return self.logical_id[0] 687 return node_uuid
688
689 - def ComputeNodeTree(self, parent_node_uuid):
690 """Compute the node/disk tree for this disk and its children. 691 692 This method, given the node on which the parent disk lives, will 693 return the list of all (node UUID, disk) pairs which describe the disk 694 tree in the most compact way. For example, a drbd/lvm stack 695 will be returned as (primary_node, drbd) and (secondary_node, drbd) 696 which represents all the top-level devices on the nodes. 697 698 """ 699 my_nodes = self.GetNodes(parent_node_uuid) 700 result = [(node, self) for node in my_nodes] 701 if not self.children: 702 # leaf device 703 return result 704 for node in my_nodes: 705 for child in self.children: 706 child_result = child.ComputeNodeTree(node) 707 if len(child_result) == 1: 708 # child (and all its descendants) is simple, doesn't split 709 # over multiple hosts, so we don't need to describe it, our 710 # own entry for this node describes it completely 711 continue 712 else: 713 # check if child nodes differ from my nodes; note that 714 # subdisk can differ from the child itself, and be instead 715 # one of its descendants 716 for subnode, subdisk in child_result: 717 if subnode not in my_nodes: 718 result.append((subnode, subdisk)) 719 # otherwise child is under our own node, so we ignore this 720 # entry (but probably the other results in the list will 721 # be different) 722 return result
723
724 - def ComputeGrowth(self, amount):
725 """Compute the per-VG growth requirements. 726 727 This only works for VG-based disks. 728 729 @type amount: integer 730 @param amount: the desired increase in (user-visible) disk space 731 @rtype: dict 732 @return: a dictionary of volume-groups and the required size 733 734 """ 735 if self.dev_type == constants.DT_PLAIN: 736 return {self.logical_id[0]: amount} 737 elif self.dev_type == constants.DT_DRBD8: 738 if self.children: 739 return self.children[0].ComputeGrowth(amount) 740 else: 741 return {} 742 else: 743 # Other disk types do not require VG space 744 return {}
745
746 - def RecordGrow(self, amount):
747 """Update the size of this disk after growth. 748 749 This method recurses over the disks's children and updates their 750 size correspondigly. The method needs to be kept in sync with the 751 actual algorithms from bdev. 752 753 """ 754 if self.dev_type in (constants.DT_PLAIN, constants.DT_FILE, 755 constants.DT_RBD, constants.DT_EXT, 756 constants.DT_SHARED_FILE, constants.DT_GLUSTER): 757 self.size += amount 758 elif self.dev_type == constants.DT_DRBD8: 759 if self.children: 760 self.children[0].RecordGrow(amount) 761 self.size += amount 762 else: 763 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported" 764 " disk type %s" % self.dev_type)
765
766 - def Update(self, size=None, mode=None, spindles=None):
767 """Apply changes to size, spindles and mode. 768 769 """ 770 if self.dev_type == constants.DT_DRBD8: 771 if self.children: 772 self.children[0].Update(size=size, mode=mode) 773 else: 774 assert not self.children 775 776 if size is not None: 777 self.size = size 778 if mode is not None: 779 self.mode = mode 780 if spindles is not None: 781 self.spindles = spindles
782
783 - def UnsetSize(self):
784 """Sets recursively the size to zero for the disk and its children. 785 786 """ 787 if self.children: 788 for child in self.children: 789 child.UnsetSize() 790 self.size = 0
791
792 - def UpdateDynamicDiskParams(self, target_node_uuid, nodes_ip):
793 """Updates the dynamic disk params for the given node. 794 795 This is mainly used for drbd, which needs ip/port configuration. 796 797 Arguments: 798 - target_node_uuid: the node UUID we wish to configure for 799 - nodes_ip: a mapping of node name to ip 800 801 The target_node must exist in nodes_ip, and should be one of the 802 nodes in the logical ID if this device is a DRBD device. 803 804 """ 805 if self.children: 806 for child in self.children: 807 child.UpdateDynamicDiskParams(target_node_uuid, nodes_ip) 808 809 dyn_disk_params = {} 810 if self.logical_id is not None and self.dev_type in constants.DTS_DRBD: 811 pnode_uuid, snode_uuid, _, pminor, sminor, _ = self.logical_id 812 if target_node_uuid not in (pnode_uuid, snode_uuid): 813 # disk object is being sent to neither the primary nor the secondary 814 # node. reset the dynamic parameters, the target node is not 815 # supposed to use them. 816 self.dynamic_params = dyn_disk_params 817 return 818 819 pnode_ip = nodes_ip.get(pnode_uuid, None) 820 snode_ip = nodes_ip.get(snode_uuid, None) 821 if pnode_ip is None or snode_ip is None: 822 raise errors.ConfigurationError("Can't find primary or secondary node" 823 " for %s" % str(self)) 824 if pnode_uuid == target_node_uuid: 825 dyn_disk_params[constants.DDP_LOCAL_IP] = pnode_ip 826 dyn_disk_params[constants.DDP_REMOTE_IP] = snode_ip 827 dyn_disk_params[constants.DDP_LOCAL_MINOR] = pminor 828 dyn_disk_params[constants.DDP_REMOTE_MINOR] = sminor 829 else: # it must be secondary, we tested above 830 dyn_disk_params[constants.DDP_LOCAL_IP] = snode_ip 831 dyn_disk_params[constants.DDP_REMOTE_IP] = pnode_ip 832 dyn_disk_params[constants.DDP_LOCAL_MINOR] = sminor 833 dyn_disk_params[constants.DDP_REMOTE_MINOR] = pminor 834 835 self.dynamic_params = dyn_disk_params
836 837 # pylint: disable=W0221
838 - def ToDict(self, include_dynamic_params=False, 839 _with_private=False):
840 """Disk-specific conversion to standard python types. 841 842 This replaces the children lists of objects with lists of 843 standard python types. 844 845 """ 846 bo = super(Disk, self).ToDict(_with_private=_with_private) 847 if not include_dynamic_params and "dynamic_params" in bo: 848 del bo["dynamic_params"] 849 850 if _with_private and "logical_id" in bo: 851 mutable_id = list(bo["logical_id"]) 852 mutable_id[5] = mutable_id[5].Get() 853 bo["logical_id"] = tuple(mutable_id) 854 855 for attr in ("children",): 856 alist = bo.get(attr, None) 857 if alist: 858 bo[attr] = outils.ContainerToDicts(alist) 859 return bo
860 861 @classmethod
862 - def FromDict(cls, val):
863 """Custom function for Disks 864 865 """ 866 obj = super(Disk, cls).FromDict(val) 867 if obj.children: 868 obj.children = outils.ContainerFromDicts(obj.children, list, Disk) 869 if obj.logical_id and isinstance(obj.logical_id, list): 870 obj.logical_id = tuple(obj.logical_id) 871 if obj.dev_type in constants.DTS_DRBD: 872 # we need a tuple of length six here 873 if len(obj.logical_id) < 6: 874 obj.logical_id += (None,) * (6 - len(obj.logical_id)) 875 # If we do have a tuple of length 6, make the last entry (secret key) 876 # private 877 elif (len(obj.logical_id) == 6 and 878 not isinstance(obj.logical_id[-1], serializer.Private)): 879 obj.logical_id = obj.logical_id[:-1] + \ 880 (serializer.Private(obj.logical_id[-1]),) 881 return obj
882
883 - def __str__(self):
884 """Custom str() formatter for disks. 885 886 """ 887 if self.dev_type == constants.DT_PLAIN: 888 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id 889 elif self.dev_type in constants.DTS_DRBD: 890 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5] 891 val = "<DRBD8(" 892 893 val += ("hosts=%s/%d-%s/%d, port=%s, " % 894 (node_a, minor_a, node_b, minor_b, port)) 895 if self.children and self.children.count(None) == 0: 896 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1]) 897 else: 898 val += "no local storage" 899 else: 900 val = ("<Disk(type=%s, logical_id=%s, children=%s" % 901 (self.dev_type, self.logical_id, self.children)) 902 if self.iv_name is None: 903 val += ", not visible" 904 else: 905 val += ", visible as /dev/%s" % self.iv_name 906 if self.spindles is not None: 907 val += ", spindles=%s" % self.spindles 908 if isinstance(self.size, int): 909 val += ", size=%dm)>" % self.size 910 else: 911 val += ", size='%s')>" % (self.size,) 912 return val
913
914 - def Verify(self):
915 """Checks that this disk is correctly configured. 916 917 """ 918 all_errors = [] 919 if self.mode not in constants.DISK_ACCESS_SET: 920 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, )) 921 return all_errors
922
923 - def UpgradeConfig(self):
924 """Fill defaults for missing configuration values. 925 926 """ 927 if self.children: 928 for child in self.children: 929 child.UpgradeConfig() 930 931 # FIXME: Make this configurable in Ganeti 2.7 932 # Params should be an empty dict that gets filled any time needed 933 # In case of ext template we allow arbitrary params that should not 934 # be overrided during a config reload/upgrade. 935 if not self.params or not isinstance(self.params, dict): 936 self.params = {} 937 938 # add here config upgrade for this disk 939 if self.serial_no is None: 940 self.serial_no = 1 941 if self.mtime is None: 942 self.mtime = time.time() 943 if self.ctime is None: 944 self.ctime = time.time() 945 946 # map of legacy device types (mapping differing LD constants to new 947 # DT constants) 948 LEG_DEV_TYPE_MAP = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8} 949 if self.dev_type in LEG_DEV_TYPE_MAP: 950 self.dev_type = LEG_DEV_TYPE_MAP[self.dev_type]
951 952 @staticmethod
953 - def ComputeLDParams(disk_template, disk_params):
954 """Computes Logical Disk parameters from Disk Template parameters. 955 956 @type disk_template: string 957 @param disk_template: disk template, one of L{constants.DISK_TEMPLATES} 958 @type disk_params: dict 959 @param disk_params: disk template parameters; 960 dict(template_name -> parameters 961 @rtype: list(dict) 962 @return: a list of dicts, one for each node of the disk hierarchy. Each dict 963 contains the LD parameters of the node. The tree is flattened in-order. 964 965 """ 966 if disk_template not in constants.DISK_TEMPLATES: 967 raise errors.ProgrammerError("Unknown disk template %s" % disk_template) 968 969 assert disk_template in disk_params 970 971 result = list() 972 dt_params = disk_params[disk_template] 973 974 if disk_template == constants.DT_DRBD8: 975 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_DRBD8], { 976 constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE], 977 constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS], 978 constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS], 979 constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG], 980 constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM], 981 constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM], 982 constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL], 983 constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC], 984 constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD], 985 constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET], 986 constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET], 987 constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE], 988 constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE], 989 })) 990 991 # data LV 992 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], { 993 constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES], 994 })) 995 996 # metadata LV 997 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], { 998 constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES], 999 })) 1000 1001 else: 1002 defaults = constants.DISK_LD_DEFAULTS[disk_template] 1003 values = {} 1004 for field in defaults: 1005 values[field] = dt_params[field] 1006 result.append(FillDict(defaults, values)) 1007 1008 return result
1009
1010 1011 -class InstancePolicy(ConfigObject):
1012 """Config object representing instance policy limits dictionary. 1013 1014 Note that this object is not actually used in the config, it's just 1015 used as a placeholder for a few functions. 1016 1017 """ 1018 @classmethod
1019 - def UpgradeDiskTemplates(cls, ipolicy, enabled_disk_templates):
1020 """Upgrades the ipolicy configuration.""" 1021 if constants.IPOLICY_DTS in ipolicy: 1022 if not set(ipolicy[constants.IPOLICY_DTS]).issubset( 1023 set(enabled_disk_templates)): 1024 ipolicy[constants.IPOLICY_DTS] = list( 1025 set(ipolicy[constants.IPOLICY_DTS]) & set(enabled_disk_templates))
1026 1027 @classmethod
1028 - def CheckParameterSyntax(cls, ipolicy, check_std):
1029 """ Check the instance policy for validity. 1030 1031 @type ipolicy: dict 1032 @param ipolicy: dictionary with min/max/std specs and policies 1033 @type check_std: bool 1034 @param check_std: Whether to check std value or just assume compliance 1035 @raise errors.ConfigurationError: when the policy is not legal 1036 1037 """ 1038 InstancePolicy.CheckISpecSyntax(ipolicy, check_std) 1039 if constants.IPOLICY_DTS in ipolicy: 1040 InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS]) 1041 for key in constants.IPOLICY_PARAMETERS: 1042 if key in ipolicy: 1043 InstancePolicy.CheckParameter(key, ipolicy[key]) 1044 wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS 1045 if wrong_keys: 1046 raise errors.ConfigurationError("Invalid keys in ipolicy: %s" % 1047 utils.CommaJoin(wrong_keys))
1048 1049 @classmethod
1050 - def _CheckIncompleteSpec(cls, spec, keyname):
1051 missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys()) 1052 if missing_params: 1053 msg = ("Missing instance specs parameters for %s: %s" % 1054 (keyname, utils.CommaJoin(missing_params))) 1055 raise errors.ConfigurationError(msg)
1056 1057 @classmethod
1058 - def CheckISpecSyntax(cls, ipolicy, check_std):
1059 """Check the instance policy specs for validity. 1060 1061 @type ipolicy: dict 1062 @param ipolicy: dictionary with min/max/std specs 1063 @type check_std: bool 1064 @param check_std: Whether to check std value or just assume compliance 1065 @raise errors.ConfigurationError: when specs are not valid 1066 1067 """ 1068 if constants.ISPECS_MINMAX not in ipolicy: 1069 # Nothing to check 1070 return 1071 1072 if check_std and constants.ISPECS_STD not in ipolicy: 1073 msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD 1074 raise errors.ConfigurationError(msg) 1075 stdspec = ipolicy.get(constants.ISPECS_STD) 1076 if check_std: 1077 InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD) 1078 1079 if not ipolicy[constants.ISPECS_MINMAX]: 1080 raise errors.ConfigurationError("Empty minmax specifications") 1081 std_is_good = False 1082 for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]: 1083 missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys()) 1084 if missing: 1085 msg = "Missing instance specification: %s" % utils.CommaJoin(missing) 1086 raise errors.ConfigurationError(msg) 1087 for (key, spec) in minmaxspecs.items(): 1088 InstancePolicy._CheckIncompleteSpec(spec, key) 1089 1090 spec_std_ok = True 1091 for param in constants.ISPECS_PARAMETERS: 1092 par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec, 1093 param, check_std) 1094 spec_std_ok = spec_std_ok and par_std_ok 1095 std_is_good = std_is_good or spec_std_ok 1096 if not std_is_good: 1097 raise errors.ConfigurationError("Invalid std specifications")
1098 1099 @classmethod
1100 - def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
1101 """Check the instance policy specs for validity on a given key. 1102 1103 We check if the instance specs makes sense for a given key, that is 1104 if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name]. 1105 1106 @type minmaxspecs: dict 1107 @param minmaxspecs: dictionary with min and max instance spec 1108 @type stdspec: dict 1109 @param stdspec: dictionary with standard instance spec 1110 @type name: string 1111 @param name: what are the limits for 1112 @type check_std: bool 1113 @param check_std: Whether to check std value or just assume compliance 1114 @rtype: bool 1115 @return: C{True} when specs are valid, C{False} when standard spec for the 1116 given name is not valid 1117 @raise errors.ConfigurationError: when min/max specs for the given name 1118 are not valid 1119 1120 """ 1121 minspec = minmaxspecs[constants.ISPECS_MIN] 1122 maxspec = minmaxspecs[constants.ISPECS_MAX] 1123 min_v = minspec[name] 1124 max_v = maxspec[name] 1125 1126 if min_v > max_v: 1127 err = ("Invalid specification of min/max values for %s: %s/%s" % 1128 (name, min_v, max_v)) 1129 raise errors.ConfigurationError(err) 1130 elif check_std: 1131 std_v = stdspec.get(name, min_v) 1132 return std_v >= min_v and std_v <= max_v 1133 else: 1134 return True
1135 1136 @classmethod
1137 - def CheckDiskTemplates(cls, disk_templates):
1138 """Checks the disk templates for validity. 1139 1140 """ 1141 if not disk_templates: 1142 raise errors.ConfigurationError("Instance policy must contain" + 1143 " at least one disk template") 1144 wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES) 1145 if wrong: 1146 raise errors.ConfigurationError("Invalid disk template(s) %s" % 1147 utils.CommaJoin(wrong))
1148 1149 @classmethod
1150 - def CheckParameter(cls, key, value):
1151 """Checks a parameter. 1152 1153 Currently we expect all parameters to be float values. 1154 1155 """ 1156 try: 1157 float(value) 1158 except (TypeError, ValueError), err: 1159 raise errors.ConfigurationError("Invalid value for key" " '%s':" 1160 " '%s', error: %s" % (key, value, err))
1161
1162 1163 -def GetOSImage(osparams):
1164 """Gets the OS image value from the OS parameters. 1165 1166 @type osparams: L{dict} or NoneType 1167 @param osparams: OS parameters or None 1168 1169 @rtype: string or NoneType 1170 @return: 1171 value of OS image contained in OS parameters, or None if the OS 1172 parameters are None or the OS parameters do not contain an OS 1173 image 1174 1175 """ 1176 if osparams is None: 1177 return None 1178 else: 1179 return osparams.get("os-image", None)
1180
1181 1182 -def PutOSImage(osparams, os_image):
1183 """Update OS image value in the OS parameters 1184 1185 @type osparams: L{dict} 1186 @param osparams: OS parameters 1187 1188 @type os_image: string 1189 @param os_image: OS image 1190 1191 @rtype: NoneType 1192 @return: None 1193 1194 """ 1195 osparams["os-image"] = os_image
1196
1197 1198 -class Instance(TaggableObject):
1199 """Config object representing an instance.""" 1200 __slots__ = [ 1201 "forthcoming", 1202 "name", 1203 "primary_node", 1204 "secondary_nodes", 1205 "os", 1206 "hypervisor", 1207 "hvparams", 1208 "beparams", 1209 "osparams", 1210 "osparams_private", 1211 "admin_state", 1212 "admin_state_source", 1213 "nics", 1214 "disks", 1215 "disks_info", 1216 "disk_template", 1217 "disks_active", 1218 "network_port", 1219 "serial_no", 1220 ] + _TIMESTAMPS + _UUID 1221
1222 - def FindDisk(self, idx):
1223 """Find a disk given having a specified index. 1224 1225 This is just a wrapper that does validation of the index. 1226 1227 @type idx: int 1228 @param idx: the disk index 1229 @rtype: string 1230 @return: the corresponding disk's uuid 1231 @raise errors.OpPrereqError: when the given index is not valid 1232 1233 """ 1234 try: 1235 idx = int(idx) 1236 return self.disks[idx] 1237 except (TypeError, ValueError), err: 1238 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err), 1239 errors.ECODE_INVAL) 1240 except IndexError: 1241 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks" 1242 " 0 to %d" % (idx, len(self.disks) - 1), 1243 errors.ECODE_INVAL)
1244
1245 - def ToDict(self, _with_private=False):
1246 """Instance-specific conversion to standard python types. 1247 1248 This replaces the children lists of objects with lists of standard 1249 python types. 1250 1251 """ 1252 bo = super(Instance, self).ToDict(_with_private=_with_private) 1253 1254 if _with_private: 1255 bo["osparams_private"] = self.osparams_private.Unprivate() 1256 1257 for attr in "nics", : 1258 alist = bo.get(attr, None) 1259 if alist: 1260 nlist = outils.ContainerToDicts(alist) 1261 else: 1262 nlist = [] 1263 bo[attr] = nlist 1264 1265 if 'disk_template' in bo: 1266 del bo['disk_template'] 1267 1268 return bo
1269 1270 @classmethod
1271 - def FromDict(cls, val):
1272 """Custom function for instances. 1273 1274 """ 1275 if "admin_state" not in val: 1276 if val.get("admin_up", False): 1277 val["admin_state"] = constants.ADMINST_UP 1278 else: 1279 val["admin_state"] = constants.ADMINST_DOWN 1280 if "admin_up" in val: 1281 del val["admin_up"] 1282 obj = super(Instance, cls).FromDict(val) 1283 obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC) 1284 1285 # attribute 'disks_info' is only present when deserializing from a RPC 1286 # call in the backend 1287 disks_info = getattr(obj, "disks_info", None) 1288 if disks_info: 1289 obj.disks_info = outils.ContainerFromDicts(disks_info, list, Disk) 1290 1291 return obj
1292
1293 - def UpgradeConfig(self):
1294 """Fill defaults for missing configuration values. 1295 1296 """ 1297 if self.admin_state_source is None: 1298 self.admin_state_source = constants.ADMIN_SOURCE 1299 for nic in self.nics: 1300 nic.UpgradeConfig() 1301 if self.disks is None: 1302 self.disks = [] 1303 if self.hvparams: 1304 for key in constants.HVC_GLOBALS: 1305 try: 1306 del self.hvparams[key] 1307 except KeyError: 1308 pass 1309 if self.osparams is None: 1310 self.osparams = {} 1311 if self.osparams_private is None: 1312 self.osparams_private = serializer.PrivateDict() 1313 UpgradeBeParams(self.beparams) 1314 if self.disks_active is None: 1315 self.disks_active = self.admin_state == constants.ADMINST_UP
1316
1317 1318 -class OS(ConfigObject):
1319 """Config object representing an operating system. 1320 1321 @type supported_parameters: list 1322 @ivar supported_parameters: a list of tuples, name and description, 1323 containing the supported parameters by this OS 1324 1325 @type VARIANT_DELIM: string 1326 @cvar VARIANT_DELIM: the variant delimiter 1327 1328 """ 1329 __slots__ = [ 1330 "name", 1331 "path", 1332 "api_versions", 1333 "create_script", 1334 "create_script_untrusted", 1335 "export_script", 1336 "import_script", 1337 "rename_script", 1338 "verify_script", 1339 "supported_variants", 1340 "supported_parameters", 1341 ] 1342 1343 VARIANT_DELIM = "+" 1344 1345 @classmethod
1346 - def SplitNameVariant(cls, name):
1347 """Splits the name into the proper name and variant. 1348 1349 @param name: the OS (unprocessed) name 1350 @rtype: list 1351 @return: a list of two elements; if the original name didn't 1352 contain a variant, it's returned as an empty string 1353 1354 """ 1355 nv = name.split(cls.VARIANT_DELIM, 1) 1356 if len(nv) == 1: 1357 nv.append("") 1358 return nv
1359 1360 @classmethod
1361 - def GetName(cls, name):
1362 """Returns the proper name of the os (without the variant). 1363 1364 @param name: the OS (unprocessed) name 1365 1366 """ 1367 return cls.SplitNameVariant(name)[0]
1368 1369 @classmethod
1370 - def GetVariant(cls, name):
1371 """Returns the variant the os (without the base name). 1372 1373 @param name: the OS (unprocessed) name 1374 1375 """ 1376 return cls.SplitNameVariant(name)[1]
1377
1378 - def IsTrusted(self):
1379 """Returns whether this OS is trusted. 1380 1381 @rtype: bool 1382 @return: L{True} if this OS is trusted, L{False} otherwise 1383 1384 """ 1385 return not self.create_script_untrusted
1386
1387 1388 -class ExtStorage(ConfigObject):
1389 """Config object representing an External Storage Provider. 1390 1391 """ 1392 __slots__ = [ 1393 "name", 1394 "path", 1395 "create_script", 1396 "remove_script", 1397 "grow_script", 1398 "attach_script", 1399 "detach_script", 1400 "setinfo_script", 1401 "verify_script", 1402 "snapshot_script", 1403 "open_script", 1404 "close_script", 1405 "supported_parameters", 1406 ]
1407
1408 1409 -class NodeHvState(ConfigObject):
1410 """Hypvervisor state on a node. 1411 1412 @ivar mem_total: Total amount of memory 1413 @ivar mem_node: Memory used by, or reserved for, the node itself (not always 1414 available) 1415 @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation 1416 rounding 1417 @ivar mem_inst: Memory used by instances living on node 1418 @ivar cpu_total: Total node CPU core count 1419 @ivar cpu_node: Number of CPU cores reserved for the node itself 1420 1421 """ 1422 __slots__ = [ 1423 "mem_total", 1424 "mem_node", 1425 "mem_hv", 1426 "mem_inst", 1427 "cpu_total", 1428 "cpu_node", 1429 ] + _TIMESTAMPS
1430
1431 1432 -class NodeDiskState(ConfigObject):
1433 """Disk state on a node. 1434 1435 """ 1436 __slots__ = [ 1437 "total", 1438 "reserved", 1439 "overhead", 1440 ] + _TIMESTAMPS
1441
1442 1443 -class Node(TaggableObject):
1444 """Config object representing a node. 1445 1446 @ivar hv_state: Hypervisor state (e.g. number of CPUs) 1447 @ivar hv_state_static: Hypervisor state overriden by user 1448 @ivar disk_state: Disk state (e.g. free space) 1449 @ivar disk_state_static: Disk state overriden by user 1450 1451 """ 1452 __slots__ = [ 1453 "name", 1454 "primary_ip", 1455 "secondary_ip", 1456 "serial_no", 1457 "master_candidate", 1458 "offline", 1459 "drained", 1460 "group", 1461 "master_capable", 1462 "vm_capable", 1463 "ndparams", 1464 "powered", 1465 "hv_state", 1466 "hv_state_static", 1467 "disk_state", 1468 "disk_state_static", 1469 ] + _TIMESTAMPS + _UUID 1470
1471 - def UpgradeConfig(self):
1472 """Fill defaults for missing configuration values. 1473 1474 """ 1475 # pylint: disable=E0203 1476 # because these are "defined" via slots, not manually 1477 if self.master_capable is None: 1478 self.master_capable = True 1479 1480 if self.vm_capable is None: 1481 self.vm_capable = True 1482 1483 if self.ndparams is None: 1484 self.ndparams = {} 1485 # And remove any global parameter 1486 for key in constants.NDC_GLOBALS: 1487 if key in self.ndparams: 1488 logging.warning("Ignoring %s node parameter for node %s", 1489 key, self.name) 1490 del self.ndparams[key] 1491 1492 if self.powered is None: 1493 self.powered = True
1494
1495 - def ToDict(self, _with_private=False):
1496 """Custom function for serializing. 1497 1498 """ 1499 data = super(Node, self).ToDict(_with_private=_with_private) 1500 1501 hv_state = data.get("hv_state", None) 1502 if hv_state is not None: 1503 data["hv_state"] = outils.ContainerToDicts(hv_state) 1504 1505 disk_state = data.get("disk_state", None) 1506 if disk_state is not None: 1507 data["disk_state"] = \ 1508 dict((key, outils.ContainerToDicts(value)) 1509 for (key, value) in disk_state.items()) 1510 1511 return data
1512 1513 @classmethod
1514 - def FromDict(cls, val):
1515 """Custom function for deserializing. 1516 1517 """ 1518 obj = super(Node, cls).FromDict(val) 1519 1520 if obj.hv_state is not None: 1521 obj.hv_state = \ 1522 outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState) 1523 1524 if obj.disk_state is not None: 1525 obj.disk_state = \ 1526 dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState)) 1527 for (key, value) in obj.disk_state.items()) 1528 1529 return obj
1530
1531 1532 -class NodeGroup(TaggableObject):
1533 """Config object representing a node group.""" 1534 __slots__ = [ 1535 "name", 1536 "members", 1537 "ndparams", 1538 "diskparams", 1539 "ipolicy", 1540 "serial_no", 1541 "hv_state_static", 1542 "disk_state_static", 1543 "alloc_policy", 1544 "networks", 1545 ] + _TIMESTAMPS + _UUID 1546
1547 - def ToDict(self, _with_private=False):
1548 """Custom function for nodegroup. 1549 1550 This discards the members object, which gets recalculated and is only kept 1551 in memory. 1552 1553 """ 1554 mydict = super(NodeGroup, self).ToDict(_with_private=_with_private) 1555 del mydict["members"] 1556 return mydict
1557 1558 @classmethod
1559 - def FromDict(cls, val):
1560 """Custom function for nodegroup. 1561 1562 The members slot is initialized to an empty list, upon deserialization. 1563 1564 """ 1565 obj = super(NodeGroup, cls).FromDict(val) 1566 obj.members = [] 1567 return obj
1568
1569 - def UpgradeConfig(self):
1570 """Fill defaults for missing configuration values. 1571 1572 """ 1573 if self.ndparams is None: 1574 self.ndparams = {} 1575 1576 if self.serial_no is None: 1577 self.serial_no = 1 1578 1579 if self.alloc_policy is None: 1580 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED 1581 1582 # We only update mtime, and not ctime, since we would not be able 1583 # to provide a correct value for creation time. 1584 if self.mtime is None: 1585 self.mtime = time.time() 1586 1587 if self.diskparams is None: 1588 self.diskparams = {} 1589 if self.ipolicy is None: 1590 self.ipolicy = MakeEmptyIPolicy() 1591 1592 if self.networks is None: 1593 self.networks = {} 1594 1595 for network, netparams in self.networks.items(): 1596 self.networks[network] = FillDict(constants.NICC_DEFAULTS, netparams)
1597
1598 - def FillND(self, node):
1599 """Return filled out ndparams for L{objects.Node} 1600 1601 @type node: L{objects.Node} 1602 @param node: A Node object to fill 1603 @return a copy of the node's ndparams with defaults filled 1604 1605 """ 1606 return self.SimpleFillND(node.ndparams)
1607
1608 - def SimpleFillND(self, ndparams):
1609 """Fill a given ndparams dict with defaults. 1610 1611 @type ndparams: dict 1612 @param ndparams: the dict to fill 1613 @rtype: dict 1614 @return: a copy of the passed in ndparams with missing keys filled 1615 from the node group defaults 1616 1617 """ 1618 return FillDict(self.ndparams, ndparams)
1619
1620 1621 -class Cluster(TaggableObject):
1622 """Config object representing the cluster.""" 1623 __slots__ = [ 1624 "serial_no", 1625 "rsahostkeypub", 1626 "dsahostkeypub", 1627 "highest_used_port", 1628 "tcpudp_port_pool", 1629 "mac_prefix", 1630 "volume_group_name", 1631 "reserved_lvs", 1632 "drbd_usermode_helper", 1633 "default_bridge", 1634 "default_hypervisor", 1635 "master_node", 1636 "master_ip", 1637 "master_netdev", 1638 "master_netmask", 1639 "use_external_mip_script", 1640 "cluster_name", 1641 "file_storage_dir", 1642 "shared_file_storage_dir", 1643 "gluster_storage_dir", 1644 "enabled_hypervisors", 1645 "hvparams", 1646 "ipolicy", 1647 "os_hvp", 1648 "beparams", 1649 "osparams", 1650 "osparams_private_cluster", 1651 "nicparams", 1652 "ndparams", 1653 "diskparams", 1654 "candidate_pool_size", 1655 "modify_etc_hosts", 1656 "modify_ssh_setup", 1657 "maintain_node_health", 1658 "uid_pool", 1659 "default_iallocator", 1660 "default_iallocator_params", 1661 "hidden_os", 1662 "blacklisted_os", 1663 "primary_ip_family", 1664 "prealloc_wipe_disks", 1665 "hv_state_static", 1666 "disk_state_static", 1667 "enabled_disk_templates", 1668 "candidate_certs", 1669 "max_running_jobs", 1670 "max_tracked_jobs", 1671 "install_image", 1672 "instance_communication_network", 1673 "zeroing_image", 1674 "compression_tools", 1675 "enabled_user_shutdown", 1676 "data_collectors", 1677 ] + _TIMESTAMPS + _UUID 1678
1679 - def UpgradeConfig(self):
1680 """Fill defaults for missing configuration values. 1681 1682 """ 1683 # pylint: disable=E0203 1684 # because these are "defined" via slots, not manually 1685 if self.hvparams is None: 1686 self.hvparams = constants.HVC_DEFAULTS 1687 else: 1688 for hypervisor in constants.HYPER_TYPES: 1689 try: 1690 existing_params = self.hvparams[hypervisor] 1691 except KeyError: 1692 existing_params = {} 1693 self.hvparams[hypervisor] = FillDict( 1694 constants.HVC_DEFAULTS[hypervisor], existing_params) 1695 1696 if self.os_hvp is None: 1697 self.os_hvp = {} 1698 1699 if self.osparams is None: 1700 self.osparams = {} 1701 # osparams_private_cluster added in 2.12 1702 if self.osparams_private_cluster is None: 1703 self.osparams_private_cluster = {} 1704 1705 self.ndparams = UpgradeNDParams(self.ndparams) 1706 1707 self.beparams = UpgradeGroupedParams(self.beparams, 1708 constants.BEC_DEFAULTS) 1709 for beparams_group in self.beparams: 1710 UpgradeBeParams(self.beparams[beparams_group]) 1711 1712 migrate_default_bridge = not self.nicparams 1713 self.nicparams = UpgradeGroupedParams(self.nicparams, 1714 constants.NICC_DEFAULTS) 1715 if migrate_default_bridge: 1716 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \ 1717 self.default_bridge 1718 1719 if self.modify_etc_hosts is None: 1720 self.modify_etc_hosts = True 1721 1722 if self.modify_ssh_setup is None: 1723 self.modify_ssh_setup = True 1724 1725 # default_bridge is no longer used in 2.1. The slot is left there to 1726 # support auto-upgrading. It can be removed once we decide to deprecate 1727 # upgrading straight from 2.0. 1728 if self.default_bridge is not None: 1729 self.default_bridge = None 1730 1731 # default_hypervisor is just the first enabled one in 2.1. This slot and 1732 # code can be removed once upgrading straight from 2.0 is deprecated. 1733 if self.default_hypervisor is not None: 1734 self.enabled_hypervisors = ([self.default_hypervisor] + 1735 [hvname for hvname in self.enabled_hypervisors 1736 if hvname != self.default_hypervisor]) 1737 self.default_hypervisor = None 1738 1739 # maintain_node_health added after 2.1.1 1740 if self.maintain_node_health is None: 1741 self.maintain_node_health = False 1742 1743 if self.uid_pool is None: 1744 self.uid_pool = [] 1745 1746 if self.default_iallocator is None: 1747 self.default_iallocator = "" 1748 1749 if self.default_iallocator_params is None: 1750 self.default_iallocator_params = {} 1751 1752 # reserved_lvs added before 2.2 1753 if self.reserved_lvs is None: 1754 self.reserved_lvs = [] 1755 1756 # hidden and blacklisted operating systems added before 2.2.1 1757 if self.hidden_os is None: 1758 self.hidden_os = [] 1759 1760 if self.blacklisted_os is None: 1761 self.blacklisted_os = [] 1762 1763 # primary_ip_family added before 2.3 1764 if self.primary_ip_family is None: 1765 self.primary_ip_family = AF_INET 1766 1767 if self.master_netmask is None: 1768 ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family) 1769 self.master_netmask = ipcls.iplen 1770 1771 if self.prealloc_wipe_disks is None: 1772 self.prealloc_wipe_disks = False 1773 1774 # shared_file_storage_dir added before 2.5 1775 if self.shared_file_storage_dir is None: 1776 self.shared_file_storage_dir = "" 1777 1778 # gluster_storage_dir added in 2.11 1779 if self.gluster_storage_dir is None: 1780 self.gluster_storage_dir = "" 1781 1782 if self.use_external_mip_script is None: 1783 self.use_external_mip_script = False 1784 1785 if self.diskparams: 1786 self.diskparams = UpgradeDiskParams(self.diskparams) 1787 else: 1788 self.diskparams = constants.DISK_DT_DEFAULTS.copy() 1789 1790 # instance policy added before 2.6 1791 if self.ipolicy is None: 1792 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {}) 1793 else: 1794 # we can either make sure to upgrade the ipolicy always, or only 1795 # do it in some corner cases (e.g. missing keys); note that this 1796 # will break any removal of keys from the ipolicy dict 1797 wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS 1798 if wrongkeys: 1799 # These keys would be silently removed by FillIPolicy() 1800 msg = ("Cluster instance policy contains spurious keys: %s" % 1801 utils.CommaJoin(wrongkeys)) 1802 raise errors.ConfigurationError(msg) 1803 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy) 1804 1805 # hv_state_static added in 2.7 1806 if self.hv_state_static is None: 1807 self.hv_state_static = {} 1808 if self.disk_state_static is None: 1809 self.disk_state_static = {} 1810 1811 if self.candidate_certs is None: 1812 self.candidate_certs = {} 1813 1814 if self.max_running_jobs is None: 1815 self.max_running_jobs = constants.LUXID_MAXIMAL_RUNNING_JOBS_DEFAULT 1816 1817 if self.max_tracked_jobs is None: 1818 self.max_tracked_jobs = constants.LUXID_MAXIMAL_TRACKED_JOBS_DEFAULT 1819 1820 if self.instance_communication_network is None: 1821 self.instance_communication_network = "" 1822 1823 if self.install_image is None: 1824 self.install_image = "" 1825 1826 if self.compression_tools is None: 1827 self.compression_tools = constants.IEC_DEFAULT_TOOLS 1828 1829 if self.enabled_user_shutdown is None: 1830 self.enabled_user_shutdown = False
1831 1832 @property
1833 - def primary_hypervisor(self):
1834 """The first hypervisor is the primary. 1835 1836 Useful, for example, for L{Node}'s hv/disk state. 1837 1838 """ 1839 return self.enabled_hypervisors[0]
1840
1841 - def ToDict(self, _with_private=False):
1842 """Custom function for cluster. 1843 1844 """ 1845 mydict = super(Cluster, self).ToDict(_with_private=_with_private) 1846 1847 # Explicitly save private parameters. 1848 if _with_private: 1849 for os in mydict["osparams_private_cluster"]: 1850 mydict["osparams_private_cluster"][os] = \ 1851 self.osparams_private_cluster[os].Unprivate() 1852 1853 if self.tcpudp_port_pool is None: 1854 tcpudp_port_pool = [] 1855 else: 1856 tcpudp_port_pool = list(self.tcpudp_port_pool) 1857 1858 mydict["tcpudp_port_pool"] = tcpudp_port_pool 1859 1860 return mydict
1861 1862 @classmethod
1863 - def FromDict(cls, val):
1864 """Custom function for cluster. 1865 1866 """ 1867 obj = super(Cluster, cls).FromDict(val) 1868 1869 if obj.tcpudp_port_pool is None: 1870 obj.tcpudp_port_pool = set() 1871 elif not isinstance(obj.tcpudp_port_pool, set): 1872 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool) 1873 1874 return obj
1875
1876 - def SimpleFillDP(self, diskparams):
1877 """Fill a given diskparams dict with cluster defaults. 1878 1879 @param diskparams: The diskparams 1880 @return: The defaults dict 1881 1882 """ 1883 return FillDiskParams(self.diskparams, diskparams)
1884
1885 - def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1886 """Get the default hypervisor parameters for the cluster. 1887 1888 @param hypervisor: the hypervisor name 1889 @param os_name: if specified, we'll also update the defaults for this OS 1890 @param skip_keys: if passed, list of keys not to use 1891 @return: the defaults dict 1892 1893 """ 1894 if skip_keys is None: 1895 skip_keys = [] 1896 1897 fill_stack = [self.hvparams.get(hypervisor, {})] 1898 if os_name is not None: 1899 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {}) 1900 fill_stack.append(os_hvp) 1901 1902 ret_dict = {} 1903 for o_dict in fill_stack: 1904 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys) 1905 1906 return ret_dict
1907
1908 - def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1909 """Fill a given hvparams dict with cluster defaults. 1910 1911 @type hv_name: string 1912 @param hv_name: the hypervisor to use 1913 @type os_name: string 1914 @param os_name: the OS to use for overriding the hypervisor defaults 1915 @type skip_globals: boolean 1916 @param skip_globals: if True, the global hypervisor parameters will 1917 not be filled 1918 @rtype: dict 1919 @return: a copy of the given hvparams with missing keys filled from 1920 the cluster defaults 1921 1922 """ 1923 if skip_globals: 1924 skip_keys = constants.HVC_GLOBALS 1925 else: 1926 skip_keys = [] 1927 1928 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys) 1929 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1930
1931 - def FillHV(self, instance, skip_globals=False):
1932 """Fill an instance's hvparams dict with cluster defaults. 1933 1934 @type instance: L{objects.Instance} 1935 @param instance: the instance parameter to fill 1936 @type skip_globals: boolean 1937 @param skip_globals: if True, the global hypervisor parameters will 1938 not be filled 1939 @rtype: dict 1940 @return: a copy of the instance's hvparams with missing keys filled from 1941 the cluster defaults 1942 1943 """ 1944 return self.SimpleFillHV(instance.hypervisor, instance.os, 1945 instance.hvparams, skip_globals)
1946
1947 - def SimpleFillBE(self, beparams):
1948 """Fill a given beparams dict with cluster defaults. 1949 1950 @type beparams: dict 1951 @param beparams: the dict to fill 1952 @rtype: dict 1953 @return: a copy of the passed in beparams with missing keys filled 1954 from the cluster defaults 1955 1956 """ 1957 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1958
1959 - def FillBE(self, instance):
1960 """Fill an instance's beparams dict with cluster defaults. 1961 1962 @type instance: L{objects.Instance} 1963 @param instance: the instance parameter to fill 1964 @rtype: dict 1965 @return: a copy of the instance's beparams with missing keys filled from 1966 the cluster defaults 1967 1968 """ 1969 return self.SimpleFillBE(instance.beparams)
1970
1971 - def SimpleFillNIC(self, nicparams):
1972 """Fill a given nicparams dict with cluster defaults. 1973 1974 @type nicparams: dict 1975 @param nicparams: the dict to fill 1976 @rtype: dict 1977 @return: a copy of the passed in nicparams with missing keys filled 1978 from the cluster defaults 1979 1980 """ 1981 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1982
1983 - def SimpleFillOS(self, os_name, 1984 os_params_public, 1985 os_params_private=None, 1986 os_params_secret=None):
1987 """Fill an instance's osparams dict with cluster defaults. 1988 1989 @type os_name: string 1990 @param os_name: the OS name to use 1991 @type os_params_public: dict 1992 @param os_params_public: the dict to fill with default values 1993 @type os_params_private: dict 1994 @param os_params_private: the dict with private fields to fill 1995 with default values. Not passing this field 1996 results in no private fields being added to the 1997 return value. Private fields will be wrapped in 1998 L{Private} objects. 1999 @type os_params_secret: dict 2000 @param os_params_secret: the dict with secret fields to fill 2001 with default values. Not passing this field 2002 results in no secret fields being added to the 2003 return value. Private fields will be wrapped in 2004 L{Private} objects. 2005 @rtype: dict 2006 @return: a copy of the instance's osparams with missing keys filled from 2007 the cluster defaults. Private and secret parameters are not included 2008 unless the respective optional parameters are supplied. 2009 2010 """ 2011 if os_name is None: 2012 name_only = None 2013 else: 2014 name_only = OS.GetName(os_name) 2015 2016 defaults_base_public = self.osparams.get(name_only, {}) 2017 defaults_public = FillDict(defaults_base_public, 2018 self.osparams.get(os_name, {})) 2019 params_public = FillDict(defaults_public, os_params_public) 2020 2021 if os_params_private is not None: 2022 defaults_base_private = self.osparams_private_cluster.get(name_only, {}) 2023 defaults_private = FillDict(defaults_base_private, 2024 self.osparams_private_cluster.get(os_name, 2025 {})) 2026 params_private = FillDict(defaults_private, os_params_private) 2027 else: 2028 params_private = {} 2029 2030 if os_params_secret is not None: 2031 # There can't be default secret settings, so there's nothing to be done. 2032 params_secret = os_params_secret 2033 else: 2034 params_secret = {} 2035 2036 # Enforce that the set of keys be distinct: 2037 duplicate_keys = utils.GetRepeatedKeys(params_public, 2038 params_private, 2039 params_secret) 2040 if not duplicate_keys: 2041 2042 # Actually update them: 2043 params_public.update(params_private) 2044 params_public.update(params_secret) 2045 2046 return params_public 2047 2048 else: 2049 2050 def formatter(keys): 2051 return utils.CommaJoin(sorted(map(repr, keys))) if keys else "(none)"
2052 2053 #Lose the values. 2054 params_public = set(params_public) 2055 params_private = set(params_private) 2056 params_secret = set(params_secret) 2057 2058 msg = """Cannot assign multiple values to OS parameters. 2059 2060 Conflicting OS parameters that would have been set by this operation: 2061 - at public visibility: {public} 2062 - at private visibility: {private} 2063 - at secret visibility: {secret} 2064 """.format(dupes=formatter(duplicate_keys), 2065 public=formatter(params_public & duplicate_keys), 2066 private=formatter(params_private & duplicate_keys), 2067 secret=formatter(params_secret & duplicate_keys)) 2068 raise errors.OpPrereqError(msg)
2069 2070 @staticmethod
2071 - def SimpleFillHvState(hv_state):
2072 """Fill an hv_state sub dict with cluster defaults. 2073 2074 """ 2075 return FillDict(constants.HVST_DEFAULTS, hv_state)
2076 2077 @staticmethod
2078 - def SimpleFillDiskState(disk_state):
2079 """Fill an disk_state sub dict with cluster defaults. 2080 2081 """ 2082 return FillDict(constants.DS_DEFAULTS, disk_state)
2083
2084 - def FillND(self, node, nodegroup):
2085 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node} 2086 2087 @type node: L{objects.Node} 2088 @param node: A Node object to fill 2089 @type nodegroup: L{objects.NodeGroup} 2090 @param nodegroup: A Node object to fill 2091 @return a copy of the node's ndparams with defaults filled 2092 2093 """ 2094 return self.SimpleFillND(nodegroup.FillND(node))
2095
2096 - def FillNDGroup(self, nodegroup):
2097 """Return filled out ndparams for just L{objects.NodeGroup} 2098 2099 @type nodegroup: L{objects.NodeGroup} 2100 @param nodegroup: A Node object to fill 2101 @return a copy of the node group's ndparams with defaults filled 2102 2103 """ 2104 return self.SimpleFillND(nodegroup.SimpleFillND({}))
2105
2106 - def SimpleFillND(self, ndparams):
2107 """Fill a given ndparams dict with defaults. 2108 2109 @type ndparams: dict 2110 @param ndparams: the dict to fill 2111 @rtype: dict 2112 @return: a copy of the passed in ndparams with missing keys filled 2113 from the cluster defaults 2114 2115 """ 2116 return FillDict(self.ndparams, ndparams)
2117
2118 - def SimpleFillIPolicy(self, ipolicy):
2119 """ Fill instance policy dict with defaults. 2120 2121 @type ipolicy: dict 2122 @param ipolicy: the dict to fill 2123 @rtype: dict 2124 @return: a copy of passed ipolicy with missing keys filled from 2125 the cluster defaults 2126 2127 """ 2128 return FillIPolicy(self.ipolicy, ipolicy)
2129
2130 - def IsDiskTemplateEnabled(self, disk_template):
2131 """Checks if a particular disk template is enabled. 2132 2133 """ 2134 return utils.storage.IsDiskTemplateEnabled( 2135 disk_template, self.enabled_disk_templates)
2136
2137 - def IsFileStorageEnabled(self):
2138 """Checks if file storage is enabled. 2139 2140 """ 2141 return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
2142
2143 - def IsSharedFileStorageEnabled(self):
2144 """Checks if shared file storage is enabled. 2145 2146 """ 2147 return utils.storage.IsSharedFileStorageEnabled( 2148 self.enabled_disk_templates)
2149
2150 2151 -class BlockDevStatus(ConfigObject):
2152 """Config object representing the status of a block device.""" 2153 __slots__ = [ 2154 "dev_path", 2155 "major", 2156 "minor", 2157 "sync_percent", 2158 "estimated_time", 2159 "is_degraded", 2160 "ldisk_status", 2161 ]
2162
2163 2164 -class ImportExportStatus(ConfigObject):
2165 """Config object representing the status of an import or export.""" 2166 __slots__ = [ 2167 "recent_output", 2168 "listen_port", 2169 "connected", 2170 "progress_mbytes", 2171 "progress_throughput", 2172 "progress_eta", 2173 "progress_percent", 2174 "exit_status", 2175 "error_message", 2176 ] + _TIMESTAMPS
2177
2178 2179 -class ImportExportOptions(ConfigObject):
2180 """Options for import/export daemon 2181 2182 @ivar key_name: X509 key name (None for cluster certificate) 2183 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate) 2184 @ivar compress: Compression tool to use 2185 @ivar magic: Used to ensure the connection goes to the right disk 2186 @ivar ipv6: Whether to use IPv6 2187 @ivar connect_timeout: Number of seconds for establishing connection 2188 2189 """ 2190 __slots__ = [ 2191 "key_name", 2192 "ca_pem", 2193 "compress", 2194 "magic", 2195 "ipv6", 2196 "connect_timeout", 2197 ]
2198
2199 2200 -class ConfdRequest(ConfigObject):
2201 """Object holding a confd request. 2202 2203 @ivar protocol: confd protocol version 2204 @ivar type: confd query type 2205 @ivar query: query request 2206 @ivar rsalt: requested reply salt 2207 2208 """ 2209 __slots__ = [ 2210 "protocol", 2211 "type", 2212 "query", 2213 "rsalt", 2214 ]
2215
2216 2217 -class ConfdReply(ConfigObject):
2218 """Object holding a confd reply. 2219 2220 @ivar protocol: confd protocol version 2221 @ivar status: reply status code (ok, error) 2222 @ivar answer: confd query reply 2223 @ivar serial: configuration serial number 2224 2225 """ 2226 __slots__ = [ 2227 "protocol", 2228 "status", 2229 "answer", 2230 "serial", 2231 ]
2232
2233 2234 -class QueryFieldDefinition(ConfigObject):
2235 """Object holding a query field definition. 2236 2237 @ivar name: Field name 2238 @ivar title: Human-readable title 2239 @ivar kind: Field type 2240 @ivar doc: Human-readable description 2241 2242 """ 2243 __slots__ = [ 2244 "name", 2245 "title", 2246 "kind", 2247 "doc", 2248 ]
2249
2250 2251 -class _QueryResponseBase(ConfigObject):
2252 __slots__ = [ 2253 "fields", 2254 ] 2255
2256 - def ToDict(self, _with_private=False):
2257 """Custom function for serializing. 2258 2259 """ 2260 mydict = super(_QueryResponseBase, self).ToDict() 2261 mydict["fields"] = outils.ContainerToDicts(mydict["fields"]) 2262 return mydict
2263 2264 @classmethod
2265 - def FromDict(cls, val):
2266 """Custom function for de-serializing. 2267 2268 """ 2269 obj = super(_QueryResponseBase, cls).FromDict(val) 2270 obj.fields = \ 2271 outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition) 2272 return obj
2273
2274 2275 -class QueryResponse(_QueryResponseBase):
2276 """Object holding the response to a query. 2277 2278 @ivar fields: List of L{QueryFieldDefinition} objects 2279 @ivar data: Requested data 2280 2281 """ 2282 __slots__ = [ 2283 "data", 2284 ]
2285
2286 2287 -class QueryFieldsRequest(ConfigObject):
2288 """Object holding a request for querying available fields. 2289 2290 """ 2291 __slots__ = [ 2292 "what", 2293 "fields", 2294 ]
2295
2296 2297 -class QueryFieldsResponse(_QueryResponseBase):
2298 """Object holding the response to a query for fields. 2299 2300 @ivar fields: List of L{QueryFieldDefinition} objects 2301 2302 """ 2303 __slots__ = []
2304
2305 2306 -class MigrationStatus(ConfigObject):
2307 """Object holding the status of a migration. 2308 2309 """ 2310 __slots__ = [ 2311 "status", 2312 "transferred_ram", 2313 "total_ram", 2314 ]
2315
2316 2317 -class InstanceConsole(ConfigObject):
2318 """Object describing how to access the console of an instance. 2319 2320 """ 2321 __slots__ = [ 2322 "instance", 2323 "kind", 2324 "message", 2325 "host", 2326 "port", 2327 "user", 2328 "command", 2329 "display", 2330 ] 2331
2332 - def Validate(self):
2333 """Validates contents of this object. 2334 2335 """ 2336 assert self.kind in constants.CONS_ALL, "Unknown console type" 2337 assert self.instance, "Missing instance name" 2338 assert self.message or self.kind in [constants.CONS_SSH, 2339 constants.CONS_SPICE, 2340 constants.CONS_VNC] 2341 assert self.host or self.kind == constants.CONS_MESSAGE 2342 assert self.port or self.kind in [constants.CONS_MESSAGE, 2343 constants.CONS_SSH] 2344 assert self.user or self.kind in [constants.CONS_MESSAGE, 2345 constants.CONS_SPICE, 2346 constants.CONS_VNC] 2347 assert self.command or self.kind in [constants.CONS_MESSAGE, 2348 constants.CONS_SPICE, 2349 constants.CONS_VNC] 2350 assert self.display or self.kind in [constants.CONS_MESSAGE, 2351 constants.CONS_SPICE, 2352 constants.CONS_SSH]
2353
2354 2355 -class Network(TaggableObject):
2356 """Object representing a network definition for ganeti. 2357 2358 """ 2359 __slots__ = [ 2360 "name", 2361 "serial_no", 2362 "mac_prefix", 2363 "network", 2364 "network6", 2365 "gateway", 2366 "gateway6", 2367 "reservations", 2368 "ext_reservations", 2369 ] + _TIMESTAMPS + _UUID 2370
2371 - def HooksDict(self, prefix=""):
2372 """Export a dictionary used by hooks with a network's information. 2373 2374 @type prefix: String 2375 @param prefix: Prefix to prepend to the dict entries 2376 2377 """ 2378 result = { 2379 "%sNETWORK_NAME" % prefix: self.name, 2380 "%sNETWORK_UUID" % prefix: self.uuid, 2381 "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()), 2382 } 2383 if self.network: 2384 result["%sNETWORK_SUBNET" % prefix] = self.network 2385 if self.gateway: 2386 result["%sNETWORK_GATEWAY" % prefix] = self.gateway 2387 if self.network6: 2388 result["%sNETWORK_SUBNET6" % prefix] = self.network6 2389 if self.gateway6: 2390 result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6 2391 if self.mac_prefix: 2392 result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix 2393 2394 return result
2395 2396 @classmethod
2397 - def FromDict(cls, val):
2398 """Custom function for networks. 2399 2400 Remove deprecated network_type and family. 2401 2402 """ 2403 if "network_type" in val: 2404 del val["network_type"] 2405 if "family" in val: 2406 del val["family"] 2407 obj = super(Network, cls).FromDict(val) 2408 return obj
2409
2410 2411 # need to inherit object in order to use super() 2412 -class SerializableConfigParser(ConfigParser.SafeConfigParser, object):
2413 """Simple wrapper over ConfigParse that allows serialization. 2414 2415 This class is basically ConfigParser.SafeConfigParser with two 2416 additional methods that allow it to serialize/unserialize to/from a 2417 buffer. 2418 2419 """
2420 - def Dumps(self):
2421 """Dump this instance and return the string representation.""" 2422 buf = StringIO() 2423 self.write(buf) 2424 return buf.getvalue()
2425 2426 @classmethod
2427 - def Loads(cls, data):
2428 """Load data from a string.""" 2429 buf = StringIO(data) 2430 cfp = cls() 2431 cfp.readfp(buf) 2432 return cfp
2433
2434 - def get(self, section, option, **kwargs):
2435 value = None 2436 try: 2437 value = super(SerializableConfigParser, self).get(section, option, 2438 **kwargs) 2439 if value.lower() == constants.VALUE_NONE: 2440 value = None 2441 except ConfigParser.NoOptionError: 2442 r = re.compile(r"(disk|nic)\d+_name|nic\d+_(network|vlan)") 2443 match = r.match(option) 2444 if match: 2445 pass 2446 else: 2447 raise 2448 2449 return value
2450
2451 2452 -class LvmPvInfo(ConfigObject):
2453 """Information about an LVM physical volume (PV). 2454 2455 @type name: string 2456 @ivar name: name of the PV 2457 @type vg_name: string 2458 @ivar vg_name: name of the volume group containing the PV 2459 @type size: float 2460 @ivar size: size of the PV in MiB 2461 @type free: float 2462 @ivar free: free space in the PV, in MiB 2463 @type attributes: string 2464 @ivar attributes: PV attributes 2465 @type lv_list: list of strings 2466 @ivar lv_list: names of the LVs hosted on the PV 2467 """ 2468 __slots__ = [ 2469 "name", 2470 "vg_name", 2471 "size", 2472 "free", 2473 "attributes", 2474 "lv_list" 2475 ] 2476
2477 - def IsEmpty(self):
2478 """Is this PV empty? 2479 2480 """ 2481 return self.size <= (self.free + 1)
2482
2483 - def IsAllocatable(self):
2484 """Is this PV allocatable? 2485 2486 """ 2487 return ("a" in self.attributes)
2488