Package ganeti :: Module objects
[hide private]
[frames] | no frames]

Source Code for Module ganeti.objects

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
   5  # 
   6  # This program is free software; you can redistribute it and/or modify 
   7  # it under the terms of the GNU General Public License as published by 
   8  # the Free Software Foundation; either version 2 of the License, or 
   9  # (at your option) any later version. 
  10  # 
  11  # This program is distributed in the hope that it will be useful, but 
  12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
  13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
  14  # General Public License for more details. 
  15  # 
  16  # You should have received a copy of the GNU General Public License 
  17  # along with this program; if not, write to the Free Software 
  18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
  19  # 02110-1301, USA. 
  20   
  21   
  22  """Transportable objects for Ganeti. 
  23   
  24  This module provides small, mostly data-only objects which are safe to 
  25  pass to and from external parties. 
  26   
  27  """ 
  28   
  29  # pylint: disable=E0203,W0201,R0902 
  30   
  31  # E0203: Access to member %r before its definition, since we use 
  32  # objects.py which doesn't explicitly initialise its members 
  33   
  34  # W0201: Attribute '%s' defined outside __init__ 
  35   
  36  # R0902: Allow instances of these objects to have more than 20 attributes 
  37   
  38  import ConfigParser 
  39  import re 
  40  import copy 
  41  import logging 
  42  import time 
  43  from cStringIO import StringIO 
  44   
  45  from ganeti import errors 
  46  from ganeti import constants 
  47  from ganeti import netutils 
  48  from ganeti import outils 
  49  from ganeti import utils 
  50   
  51  from socket import AF_INET 
  52   
  53   
  54  __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance", 
  55             "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"] 
  56   
  57  _TIMESTAMPS = ["ctime", "mtime"] 
  58  _UUID = ["uuid"] 
59 60 61 -def FillDict(defaults_dict, custom_dict, skip_keys=None):
62 """Basic function to apply settings on top a default dict. 63 64 @type defaults_dict: dict 65 @param defaults_dict: dictionary holding the default values 66 @type custom_dict: dict 67 @param custom_dict: dictionary holding customized value 68 @type skip_keys: list 69 @param skip_keys: which keys not to fill 70 @rtype: dict 71 @return: dict with the 'full' values 72 73 """ 74 ret_dict = copy.deepcopy(defaults_dict) 75 ret_dict.update(custom_dict) 76 if skip_keys: 77 for k in skip_keys: 78 try: 79 del ret_dict[k] 80 except KeyError: 81 pass 82 return ret_dict
83
84 85 -def FillIPolicy(default_ipolicy, custom_ipolicy, skip_keys=None):
86 """Fills an instance policy with defaults. 87 88 """ 89 assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS 90 ret_dict = {} 91 for key in constants.IPOLICY_ISPECS: 92 ret_dict[key] = FillDict(default_ipolicy[key], 93 custom_ipolicy.get(key, {}), 94 skip_keys=skip_keys) 95 # list items 96 for key in [constants.IPOLICY_DTS]: 97 ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key])) 98 # other items which we know we can directly copy (immutables) 99 for key in constants.IPOLICY_PARAMETERS: 100 ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key]) 101 102 return ret_dict
103
104 105 -def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
106 """Fills the disk parameter defaults. 107 108 @see: L{FillDict} for parameters and return value 109 110 """ 111 assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES 112 113 return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}), 114 skip_keys=skip_keys)) 115 for dt in constants.DISK_TEMPLATES)
116
117 118 -def UpgradeGroupedParams(target, defaults):
119 """Update all groups for the target parameter. 120 121 @type target: dict of dicts 122 @param target: {group: {parameter: value}} 123 @type defaults: dict 124 @param defaults: default parameter values 125 126 """ 127 if target is None: 128 target = {constants.PP_DEFAULT: defaults} 129 else: 130 for group in target: 131 target[group] = FillDict(defaults, target[group]) 132 return target
133
134 135 -def UpgradeBeParams(target):
136 """Update the be parameters dict to the new format. 137 138 @type target: dict 139 @param target: "be" parameters dict 140 141 """ 142 if constants.BE_MEMORY in target: 143 memory = target[constants.BE_MEMORY] 144 target[constants.BE_MAXMEM] = memory 145 target[constants.BE_MINMEM] = memory 146 del target[constants.BE_MEMORY]
147
148 149 -def UpgradeDiskParams(diskparams):
150 """Upgrade the disk parameters. 151 152 @type diskparams: dict 153 @param diskparams: disk parameters to upgrade 154 @rtype: dict 155 @return: the upgraded disk parameters dict 156 157 """ 158 if not diskparams: 159 result = {} 160 else: 161 result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams) 162 163 return result
164
165 166 -def UpgradeNDParams(ndparams):
167 """Upgrade ndparams structure. 168 169 @type ndparams: dict 170 @param ndparams: disk parameters to upgrade 171 @rtype: dict 172 @return: the upgraded node parameters dict 173 174 """ 175 if ndparams is None: 176 ndparams = {} 177 178 if (constants.ND_OOB_PROGRAM in ndparams and 179 ndparams[constants.ND_OOB_PROGRAM] is None): 180 # will be reset by the line below 181 del ndparams[constants.ND_OOB_PROGRAM] 182 return FillDict(constants.NDC_DEFAULTS, ndparams)
183
184 185 -def MakeEmptyIPolicy():
186 """Create empty IPolicy dictionary. 187 188 """ 189 return dict([ 190 (constants.ISPECS_MIN, {}), 191 (constants.ISPECS_MAX, {}), 192 (constants.ISPECS_STD, {}), 193 ])
194
195 196 -class ConfigObject(outils.ValidatedSlots):
197 """A generic config object. 198 199 It has the following properties: 200 201 - provides somewhat safe recursive unpickling and pickling for its classes 202 - unset attributes which are defined in slots are always returned 203 as None instead of raising an error 204 205 Classes derived from this must always declare __slots__ (we use many 206 config objects and the memory reduction is useful) 207 208 """ 209 __slots__ = [] 210
211 - def __getattr__(self, name):
212 if name not in self.GetAllSlots(): 213 raise AttributeError("Invalid object attribute %s.%s" % 214 (type(self).__name__, name)) 215 return None
216
217 - def __setstate__(self, state):
218 slots = self.GetAllSlots() 219 for name in state: 220 if name in slots: 221 setattr(self, name, state[name])
222
223 - def Validate(self):
224 """Validates the slots. 225 226 """
227
228 - def ToDict(self):
229 """Convert to a dict holding only standard python types. 230 231 The generic routine just dumps all of this object's attributes in 232 a dict. It does not work if the class has children who are 233 ConfigObjects themselves (e.g. the nics list in an Instance), in 234 which case the object should subclass the function in order to 235 make sure all objects returned are only standard python types. 236 237 """ 238 result = {} 239 for name in self.GetAllSlots(): 240 value = getattr(self, name, None) 241 if value is not None: 242 result[name] = value 243 return result
244 245 __getstate__ = ToDict 246 247 @classmethod
248 - def FromDict(cls, val):
249 """Create an object from a dictionary. 250 251 This generic routine takes a dict, instantiates a new instance of 252 the given class, and sets attributes based on the dict content. 253 254 As for `ToDict`, this does not work if the class has children 255 who are ConfigObjects themselves (e.g. the nics list in an 256 Instance), in which case the object should subclass the function 257 and alter the objects. 258 259 """ 260 if not isinstance(val, dict): 261 raise errors.ConfigurationError("Invalid object passed to FromDict:" 262 " expected dict, got %s" % type(val)) 263 val_str = dict([(str(k), v) for k, v in val.iteritems()]) 264 obj = cls(**val_str) # pylint: disable=W0142 265 return obj
266 267 @staticmethod
268 - def _ContainerToDicts(container):
269 """Convert the elements of a container to standard python types. 270 271 This method converts a container with elements derived from 272 ConfigData to standard python types. If the container is a dict, 273 we don't touch the keys, only the values. 274 275 """ 276 if isinstance(container, dict): 277 ret = dict([(k, v.ToDict()) for k, v in container.iteritems()]) 278 elif isinstance(container, (list, tuple, set, frozenset)): 279 ret = [elem.ToDict() for elem in container] 280 else: 281 raise TypeError("Invalid type %s passed to _ContainerToDicts" % 282 type(container)) 283 return ret
284 285 @staticmethod
286 - def _ContainerFromDicts(source, c_type, e_type):
287 """Convert a container from standard python types. 288 289 This method converts a container with standard python types to 290 ConfigData objects. If the container is a dict, we don't touch the 291 keys, only the values. 292 293 """ 294 if not isinstance(c_type, type): 295 raise TypeError("Container type %s passed to _ContainerFromDicts is" 296 " not a type" % type(c_type)) 297 if source is None: 298 source = c_type() 299 if c_type is dict: 300 ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()]) 301 elif c_type in (list, tuple, set, frozenset): 302 ret = c_type([e_type.FromDict(elem) for elem in source]) 303 else: 304 raise TypeError("Invalid container type %s passed to" 305 " _ContainerFromDicts" % c_type) 306 return ret
307
308 - def Copy(self):
309 """Makes a deep copy of the current object and its children. 310 311 """ 312 dict_form = self.ToDict() 313 clone_obj = self.__class__.FromDict(dict_form) 314 return clone_obj
315
316 - def __repr__(self):
317 """Implement __repr__ for ConfigObjects.""" 318 return repr(self.ToDict())
319
320 - def UpgradeConfig(self):
321 """Fill defaults for missing configuration values. 322 323 This method will be called at configuration load time, and its 324 implementation will be object dependent. 325 326 """ 327 pass
328
329 330 -class TaggableObject(ConfigObject):
331 """An generic class supporting tags. 332 333 """ 334 __slots__ = ["tags"] 335 VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$") 336 337 @classmethod
338 - def ValidateTag(cls, tag):
339 """Check if a tag is valid. 340 341 If the tag is invalid, an errors.TagError will be raised. The 342 function has no return value. 343 344 """ 345 if not isinstance(tag, basestring): 346 raise errors.TagError("Invalid tag type (not a string)") 347 if len(tag) > constants.MAX_TAG_LEN: 348 raise errors.TagError("Tag too long (>%d characters)" % 349 constants.MAX_TAG_LEN) 350 if not tag: 351 raise errors.TagError("Tags cannot be empty") 352 if not cls.VALID_TAG_RE.match(tag): 353 raise errors.TagError("Tag contains invalid characters")
354
355 - def GetTags(self):
356 """Return the tags list. 357 358 """ 359 tags = getattr(self, "tags", None) 360 if tags is None: 361 tags = self.tags = set() 362 return tags
363
364 - def AddTag(self, tag):
365 """Add a new tag. 366 367 """ 368 self.ValidateTag(tag) 369 tags = self.GetTags() 370 if len(tags) >= constants.MAX_TAGS_PER_OBJ: 371 raise errors.TagError("Too many tags") 372 self.GetTags().add(tag)
373
374 - def RemoveTag(self, tag):
375 """Remove a tag. 376 377 """ 378 self.ValidateTag(tag) 379 tags = self.GetTags() 380 try: 381 tags.remove(tag) 382 except KeyError: 383 raise errors.TagError("Tag not found")
384
385 - def ToDict(self):
386 """Taggable-object-specific conversion to standard python types. 387 388 This replaces the tags set with a list. 389 390 """ 391 bo = super(TaggableObject, self).ToDict() 392 393 tags = bo.get("tags", None) 394 if isinstance(tags, set): 395 bo["tags"] = list(tags) 396 return bo
397 398 @classmethod
399 - def FromDict(cls, val):
400 """Custom function for instances. 401 402 """ 403 obj = super(TaggableObject, cls).FromDict(val) 404 if hasattr(obj, "tags") and isinstance(obj.tags, list): 405 obj.tags = set(obj.tags) 406 return obj
407
408 409 -class MasterNetworkParameters(ConfigObject):
410 """Network configuration parameters for the master 411 412 @ivar name: master name 413 @ivar ip: master IP 414 @ivar netmask: master netmask 415 @ivar netdev: master network device 416 @ivar ip_family: master IP family 417 418 """ 419 __slots__ = [ 420 "name", 421 "ip", 422 "netmask", 423 "netdev", 424 "ip_family", 425 ]
426
427 428 -class ConfigData(ConfigObject):
429 """Top-level config object.""" 430 __slots__ = [ 431 "version", 432 "cluster", 433 "nodes", 434 "nodegroups", 435 "instances", 436 "networks", 437 "serial_no", 438 ] + _TIMESTAMPS 439
440 - def ToDict(self):
441 """Custom function for top-level config data. 442 443 This just replaces the list of instances, nodes and the cluster 444 with standard python types. 445 446 """ 447 mydict = super(ConfigData, self).ToDict() 448 mydict["cluster"] = mydict["cluster"].ToDict() 449 for key in "nodes", "instances", "nodegroups", "networks": 450 mydict[key] = self._ContainerToDicts(mydict[key]) 451 452 return mydict
453 454 @classmethod
455 - def FromDict(cls, val):
456 """Custom function for top-level config data 457 458 """ 459 obj = super(ConfigData, cls).FromDict(val) 460 obj.cluster = Cluster.FromDict(obj.cluster) 461 obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node) 462 obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance) 463 obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup) 464 obj.networks = cls._ContainerFromDicts(obj.networks, dict, Network) 465 return obj
466
467 - def HasAnyDiskOfType(self, dev_type):
468 """Check if in there is at disk of the given type in the configuration. 469 470 @type dev_type: L{constants.LDS_BLOCK} 471 @param dev_type: the type to look for 472 @rtype: boolean 473 @return: boolean indicating if a disk of the given type was found or not 474 475 """ 476 for instance in self.instances.values(): 477 for disk in instance.disks: 478 if disk.IsBasedOnDiskType(dev_type): 479 return True 480 return False
481
482 - def UpgradeConfig(self):
483 """Fill defaults for missing configuration values. 484 485 """ 486 self.cluster.UpgradeConfig() 487 for node in self.nodes.values(): 488 node.UpgradeConfig() 489 for instance in self.instances.values(): 490 instance.UpgradeConfig() 491 if self.nodegroups is None: 492 self.nodegroups = {} 493 for nodegroup in self.nodegroups.values(): 494 nodegroup.UpgradeConfig() 495 if self.cluster.drbd_usermode_helper is None: 496 # To decide if we set an helper let's check if at least one instance has 497 # a DRBD disk. This does not cover all the possible scenarios but it 498 # gives a good approximation. 499 if self.HasAnyDiskOfType(constants.LD_DRBD8): 500 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER 501 if self.networks is None: 502 self.networks = {} 503 for network in self.networks.values(): 504 network.UpgradeConfig()
505
506 507 -class NIC(ConfigObject):
508 """Config object representing a network card.""" 509 __slots__ = ["mac", "ip", "network", "nicparams", "netinfo"] 510 511 @classmethod
512 - def CheckParameterSyntax(cls, nicparams):
513 """Check the given parameters for validity. 514 515 @type nicparams: dict 516 @param nicparams: dictionary with parameter names/value 517 @raise errors.ConfigurationError: when a parameter is not valid 518 519 """ 520 mode = nicparams[constants.NIC_MODE] 521 if (mode not in constants.NIC_VALID_MODES and 522 mode != constants.VALUE_AUTO): 523 raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode) 524 525 if (mode == constants.NIC_MODE_BRIDGED and 526 not nicparams[constants.NIC_LINK]): 527 raise errors.ConfigurationError("Missing bridged NIC link")
528
529 530 -class Disk(ConfigObject):
531 """Config object representing a block device.""" 532 __slots__ = ["dev_type", "logical_id", "physical_id", 533 "children", "iv_name", "size", "mode", "params"] 534
535 - def CreateOnSecondary(self):
536 """Test if this device needs to be created on a secondary node.""" 537 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
538
539 - def AssembleOnSecondary(self):
540 """Test if this device needs to be assembled on a secondary node.""" 541 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
542
543 - def OpenOnSecondary(self):
544 """Test if this device needs to be opened on a secondary node.""" 545 return self.dev_type in (constants.LD_LV,)
546
547 - def StaticDevPath(self):
548 """Return the device path if this device type has a static one. 549 550 Some devices (LVM for example) live always at the same /dev/ path, 551 irrespective of their status. For such devices, we return this 552 path, for others we return None. 553 554 @warning: The path returned is not a normalized pathname; callers 555 should check that it is a valid path. 556 557 """ 558 if self.dev_type == constants.LD_LV: 559 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1]) 560 elif self.dev_type == constants.LD_BLOCKDEV: 561 return self.logical_id[1] 562 elif self.dev_type == constants.LD_RBD: 563 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1]) 564 return None
565
566 - def ChildrenNeeded(self):
567 """Compute the needed number of children for activation. 568 569 This method will return either -1 (all children) or a positive 570 number denoting the minimum number of children needed for 571 activation (only mirrored devices will usually return >=0). 572 573 Currently, only DRBD8 supports diskless activation (therefore we 574 return 0), for all other we keep the previous semantics and return 575 -1. 576 577 """ 578 if self.dev_type == constants.LD_DRBD8: 579 return 0 580 return -1
581
582 - def IsBasedOnDiskType(self, dev_type):
583 """Check if the disk or its children are based on the given type. 584 585 @type dev_type: L{constants.LDS_BLOCK} 586 @param dev_type: the type to look for 587 @rtype: boolean 588 @return: boolean indicating if a device of the given type was found or not 589 590 """ 591 if self.children: 592 for child in self.children: 593 if child.IsBasedOnDiskType(dev_type): 594 return True 595 return self.dev_type == dev_type
596
597 - def GetNodes(self, node):
598 """This function returns the nodes this device lives on. 599 600 Given the node on which the parent of the device lives on (or, in 601 case of a top-level device, the primary node of the devices' 602 instance), this function will return a list of nodes on which this 603 devices needs to (or can) be assembled. 604 605 """ 606 if self.dev_type in [constants.LD_LV, constants.LD_FILE, 607 constants.LD_BLOCKDEV, constants.LD_RBD, 608 constants.LD_EXT]: 609 result = [node] 610 elif self.dev_type in constants.LDS_DRBD: 611 result = [self.logical_id[0], self.logical_id[1]] 612 if node not in result: 613 raise errors.ConfigurationError("DRBD device passed unknown node") 614 else: 615 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type) 616 return result
617
618 - def ComputeNodeTree(self, parent_node):
619 """Compute the node/disk tree for this disk and its children. 620 621 This method, given the node on which the parent disk lives, will 622 return the list of all (node, disk) pairs which describe the disk 623 tree in the most compact way. For example, a drbd/lvm stack 624 will be returned as (primary_node, drbd) and (secondary_node, drbd) 625 which represents all the top-level devices on the nodes. 626 627 """ 628 my_nodes = self.GetNodes(parent_node) 629 result = [(node, self) for node in my_nodes] 630 if not self.children: 631 # leaf device 632 return result 633 for node in my_nodes: 634 for child in self.children: 635 child_result = child.ComputeNodeTree(node) 636 if len(child_result) == 1: 637 # child (and all its descendants) is simple, doesn't split 638 # over multiple hosts, so we don't need to describe it, our 639 # own entry for this node describes it completely 640 continue 641 else: 642 # check if child nodes differ from my nodes; note that 643 # subdisk can differ from the child itself, and be instead 644 # one of its descendants 645 for subnode, subdisk in child_result: 646 if subnode not in my_nodes: 647 result.append((subnode, subdisk)) 648 # otherwise child is under our own node, so we ignore this 649 # entry (but probably the other results in the list will 650 # be different) 651 return result
652
653 - def ComputeGrowth(self, amount):
654 """Compute the per-VG growth requirements. 655 656 This only works for VG-based disks. 657 658 @type amount: integer 659 @param amount: the desired increase in (user-visible) disk space 660 @rtype: dict 661 @return: a dictionary of volume-groups and the required size 662 663 """ 664 if self.dev_type == constants.LD_LV: 665 return {self.logical_id[0]: amount} 666 elif self.dev_type == constants.LD_DRBD8: 667 if self.children: 668 return self.children[0].ComputeGrowth(amount) 669 else: 670 return {} 671 else: 672 # Other disk types do not require VG space 673 return {}
674
675 - def RecordGrow(self, amount):
676 """Update the size of this disk after growth. 677 678 This method recurses over the disks's children and updates their 679 size correspondigly. The method needs to be kept in sync with the 680 actual algorithms from bdev. 681 682 """ 683 if self.dev_type in (constants.LD_LV, constants.LD_FILE, 684 constants.LD_RBD, constants.LD_EXT): 685 self.size += amount 686 elif self.dev_type == constants.LD_DRBD8: 687 if self.children: 688 self.children[0].RecordGrow(amount) 689 self.size += amount 690 else: 691 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported" 692 " disk type %s" % self.dev_type)
693
694 - def Update(self, size=None, mode=None):
695 """Apply changes to size and mode. 696 697 """ 698 if self.dev_type == constants.LD_DRBD8: 699 if self.children: 700 self.children[0].Update(size=size, mode=mode) 701 else: 702 assert not self.children 703 704 if size is not None: 705 self.size = size 706 if mode is not None: 707 self.mode = mode
708
709 - def UnsetSize(self):
710 """Sets recursively the size to zero for the disk and its children. 711 712 """ 713 if self.children: 714 for child in self.children: 715 child.UnsetSize() 716 self.size = 0
717
718 - def SetPhysicalID(self, target_node, nodes_ip):
719 """Convert the logical ID to the physical ID. 720 721 This is used only for drbd, which needs ip/port configuration. 722 723 The routine descends down and updates its children also, because 724 this helps when the only the top device is passed to the remote 725 node. 726 727 Arguments: 728 - target_node: the node we wish to configure for 729 - nodes_ip: a mapping of node name to ip 730 731 The target_node must exist in in nodes_ip, and must be one of the 732 nodes in the logical ID for each of the DRBD devices encountered 733 in the disk tree. 734 735 """ 736 if self.children: 737 for child in self.children: 738 child.SetPhysicalID(target_node, nodes_ip) 739 740 if self.logical_id is None and self.physical_id is not None: 741 return 742 if self.dev_type in constants.LDS_DRBD: 743 pnode, snode, port, pminor, sminor, secret = self.logical_id 744 if target_node not in (pnode, snode): 745 raise errors.ConfigurationError("DRBD device not knowing node %s" % 746 target_node) 747 pnode_ip = nodes_ip.get(pnode, None) 748 snode_ip = nodes_ip.get(snode, None) 749 if pnode_ip is None or snode_ip is None: 750 raise errors.ConfigurationError("Can't find primary or secondary node" 751 " for %s" % str(self)) 752 p_data = (pnode_ip, port) 753 s_data = (snode_ip, port) 754 if pnode == target_node: 755 self.physical_id = p_data + s_data + (pminor, secret) 756 else: # it must be secondary, we tested above 757 self.physical_id = s_data + p_data + (sminor, secret) 758 else: 759 self.physical_id = self.logical_id 760 return
761
762 - def ToDict(self):
763 """Disk-specific conversion to standard python types. 764 765 This replaces the children lists of objects with lists of 766 standard python types. 767 768 """ 769 bo = super(Disk, self).ToDict() 770 771 for attr in ("children",): 772 alist = bo.get(attr, None) 773 if alist: 774 bo[attr] = self._ContainerToDicts(alist) 775 return bo
776 777 @classmethod
778 - def FromDict(cls, val):
779 """Custom function for Disks 780 781 """ 782 obj = super(Disk, cls).FromDict(val) 783 if obj.children: 784 obj.children = cls._ContainerFromDicts(obj.children, list, Disk) 785 if obj.logical_id and isinstance(obj.logical_id, list): 786 obj.logical_id = tuple(obj.logical_id) 787 if obj.physical_id and isinstance(obj.physical_id, list): 788 obj.physical_id = tuple(obj.physical_id) 789 if obj.dev_type in constants.LDS_DRBD: 790 # we need a tuple of length six here 791 if len(obj.logical_id) < 6: 792 obj.logical_id += (None,) * (6 - len(obj.logical_id)) 793 return obj
794
795 - def __str__(self):
796 """Custom str() formatter for disks. 797 798 """ 799 if self.dev_type == constants.LD_LV: 800 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id 801 elif self.dev_type in constants.LDS_DRBD: 802 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5] 803 val = "<DRBD8(" 804 if self.physical_id is None: 805 phy = "unconfigured" 806 else: 807 phy = ("configured as %s:%s %s:%s" % 808 (self.physical_id[0], self.physical_id[1], 809 self.physical_id[2], self.physical_id[3])) 810 811 val += ("hosts=%s/%d-%s/%d, port=%s, %s, " % 812 (node_a, minor_a, node_b, minor_b, port, phy)) 813 if self.children and self.children.count(None) == 0: 814 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1]) 815 else: 816 val += "no local storage" 817 else: 818 val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" % 819 (self.dev_type, self.logical_id, self.physical_id, self.children)) 820 if self.iv_name is None: 821 val += ", not visible" 822 else: 823 val += ", visible as /dev/%s" % self.iv_name 824 if isinstance(self.size, int): 825 val += ", size=%dm)>" % self.size 826 else: 827 val += ", size='%s')>" % (self.size,) 828 return val
829
830 - def Verify(self):
831 """Checks that this disk is correctly configured. 832 833 """ 834 all_errors = [] 835 if self.mode not in constants.DISK_ACCESS_SET: 836 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, )) 837 return all_errors
838
839 - def UpgradeConfig(self):
840 """Fill defaults for missing configuration values. 841 842 """ 843 if self.children: 844 for child in self.children: 845 child.UpgradeConfig() 846 847 # FIXME: Make this configurable in Ganeti 2.7 848 self.params = {}
849 # add here config upgrade for this disk 850 851 @staticmethod
852 - def ComputeLDParams(disk_template, disk_params):
853 """Computes Logical Disk parameters from Disk Template parameters. 854 855 @type disk_template: string 856 @param disk_template: disk template, one of L{constants.DISK_TEMPLATES} 857 @type disk_params: dict 858 @param disk_params: disk template parameters; 859 dict(template_name -> parameters 860 @rtype: list(dict) 861 @return: a list of dicts, one for each node of the disk hierarchy. Each dict 862 contains the LD parameters of the node. The tree is flattened in-order. 863 864 """ 865 if disk_template not in constants.DISK_TEMPLATES: 866 raise errors.ProgrammerError("Unknown disk template %s" % disk_template) 867 868 assert disk_template in disk_params 869 870 result = list() 871 dt_params = disk_params[disk_template] 872 if disk_template == constants.DT_DRBD8: 873 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], { 874 constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE], 875 constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS], 876 constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS], 877 constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG], 878 constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM], 879 constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM], 880 constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC], 881 constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD], 882 constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET], 883 constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET], 884 constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE], 885 constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE], 886 })) 887 888 # data LV 889 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], { 890 constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES], 891 })) 892 893 # metadata LV 894 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], { 895 constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES], 896 })) 897 898 elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE): 899 result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE]) 900 901 elif disk_template == constants.DT_PLAIN: 902 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], { 903 constants.LDP_STRIPES: dt_params[constants.LV_STRIPES], 904 })) 905 906 elif disk_template == constants.DT_BLOCK: 907 result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV]) 908 909 elif disk_template == constants.DT_RBD: 910 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], { 911 constants.LDP_POOL: dt_params[constants.RBD_POOL], 912 })) 913 914 elif disk_template == constants.DT_EXT: 915 result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT]) 916 917 return result
918
919 920 -class InstancePolicy(ConfigObject):
921 """Config object representing instance policy limits dictionary. 922 923 924 Note that this object is not actually used in the config, it's just 925 used as a placeholder for a few functions. 926 927 """ 928 @classmethod
929 - def CheckParameterSyntax(cls, ipolicy, check_std):
930 """ Check the instance policy for validity. 931 932 """ 933 for param in constants.ISPECS_PARAMETERS: 934 InstancePolicy.CheckISpecSyntax(ipolicy, param, check_std) 935 if constants.IPOLICY_DTS in ipolicy: 936 InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS]) 937 for key in constants.IPOLICY_PARAMETERS: 938 if key in ipolicy: 939 InstancePolicy.CheckParameter(key, ipolicy[key]) 940 wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS 941 if wrong_keys: 942 raise errors.ConfigurationError("Invalid keys in ipolicy: %s" % 943 utils.CommaJoin(wrong_keys))
944 945 @classmethod
946 - def CheckISpecSyntax(cls, ipolicy, name, check_std):
947 """Check the instance policy for validity on a given key. 948 949 We check if the instance policy makes sense for a given key, that is 950 if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name]. 951 952 @type ipolicy: dict 953 @param ipolicy: dictionary with min, max, std specs 954 @type name: string 955 @param name: what are the limits for 956 @type check_std: bool 957 @param check_std: Whether to check std value or just assume compliance 958 @raise errors.ConfigureError: when specs for given name are not valid 959 960 """ 961 min_v = ipolicy[constants.ISPECS_MIN].get(name, 0) 962 963 if check_std: 964 std_v = ipolicy[constants.ISPECS_STD].get(name, min_v) 965 std_msg = std_v 966 else: 967 std_v = min_v 968 std_msg = "-" 969 970 max_v = ipolicy[constants.ISPECS_MAX].get(name, std_v) 971 err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" % 972 (name, 973 ipolicy[constants.ISPECS_MIN].get(name, "-"), 974 ipolicy[constants.ISPECS_MAX].get(name, "-"), 975 std_msg)) 976 if min_v > std_v or std_v > max_v: 977 raise errors.ConfigurationError(err)
978 979 @classmethod
980 - def CheckDiskTemplates(cls, disk_templates):
981 """Checks the disk templates for validity. 982 983 """ 984 if not disk_templates: 985 raise errors.ConfigurationError("Instance policy must contain" + 986 " at least one disk template") 987 wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES) 988 if wrong: 989 raise errors.ConfigurationError("Invalid disk template(s) %s" % 990 utils.CommaJoin(wrong))
991 992 @classmethod
993 - def CheckParameter(cls, key, value):
994 """Checks a parameter. 995 996 Currently we expect all parameters to be float values. 997 998 """ 999 try: 1000 float(value) 1001 except (TypeError, ValueError), err: 1002 raise errors.ConfigurationError("Invalid value for key" " '%s':" 1003 " '%s', error: %s" % (key, value, err))
1004
1005 1006 -class Instance(TaggableObject):
1007 """Config object representing an instance.""" 1008 __slots__ = [ 1009 "name", 1010 "primary_node", 1011 "os", 1012 "hypervisor", 1013 "hvparams", 1014 "beparams", 1015 "osparams", 1016 "admin_state", 1017 "nics", 1018 "disks", 1019 "disk_template", 1020 "network_port", 1021 "serial_no", 1022 ] + _TIMESTAMPS + _UUID 1023
1024 - def _ComputeSecondaryNodes(self):
1025 """Compute the list of secondary nodes. 1026 1027 This is a simple wrapper over _ComputeAllNodes. 1028 1029 """ 1030 all_nodes = set(self._ComputeAllNodes()) 1031 all_nodes.discard(self.primary_node) 1032 return tuple(all_nodes)
1033 1034 secondary_nodes = property(_ComputeSecondaryNodes, None, None, 1035 "List of names of secondary nodes") 1036
1037 - def _ComputeAllNodes(self):
1038 """Compute the list of all nodes. 1039 1040 Since the data is already there (in the drbd disks), keeping it as 1041 a separate normal attribute is redundant and if not properly 1042 synchronised can cause problems. Thus it's better to compute it 1043 dynamically. 1044 1045 """ 1046 def _Helper(nodes, device): 1047 """Recursively computes nodes given a top device.""" 1048 if device.dev_type in constants.LDS_DRBD: 1049 nodea, nodeb = device.logical_id[:2] 1050 nodes.add(nodea) 1051 nodes.add(nodeb) 1052 if device.children: 1053 for child in device.children: 1054 _Helper(nodes, child)
1055 1056 all_nodes = set() 1057 all_nodes.add(self.primary_node) 1058 for device in self.disks: 1059 _Helper(all_nodes, device) 1060 return tuple(all_nodes)
1061 1062 all_nodes = property(_ComputeAllNodes, None, None, 1063 "List of names of all the nodes of the instance") 1064
1065 - def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1066 """Provide a mapping of nodes to LVs this instance owns. 1067 1068 This function figures out what logical volumes should belong on 1069 which nodes, recursing through a device tree. 1070 1071 @param lvmap: optional dictionary to receive the 1072 'node' : ['lv', ...] data. 1073 1074 @return: None if lvmap arg is given, otherwise, a dictionary of 1075 the form { 'nodename' : ['volume1', 'volume2', ...], ... }; 1076 volumeN is of the form "vg_name/lv_name", compatible with 1077 GetVolumeList() 1078 1079 """ 1080 if node is None: 1081 node = self.primary_node 1082 1083 if lvmap is None: 1084 lvmap = { 1085 node: [], 1086 } 1087 ret = lvmap 1088 else: 1089 if not node in lvmap: 1090 lvmap[node] = [] 1091 ret = None 1092 1093 if not devs: 1094 devs = self.disks 1095 1096 for dev in devs: 1097 if dev.dev_type == constants.LD_LV: 1098 lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1]) 1099 1100 elif dev.dev_type in constants.LDS_DRBD: 1101 if dev.children: 1102 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0]) 1103 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1]) 1104 1105 elif dev.children: 1106 self.MapLVsByNode(lvmap, dev.children, node) 1107 1108 return ret
1109
1110 - def FindDisk(self, idx):
1111 """Find a disk given having a specified index. 1112 1113 This is just a wrapper that does validation of the index. 1114 1115 @type idx: int 1116 @param idx: the disk index 1117 @rtype: L{Disk} 1118 @return: the corresponding disk 1119 @raise errors.OpPrereqError: when the given index is not valid 1120 1121 """ 1122 try: 1123 idx = int(idx) 1124 return self.disks[idx] 1125 except (TypeError, ValueError), err: 1126 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err), 1127 errors.ECODE_INVAL) 1128 except IndexError: 1129 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks" 1130 " 0 to %d" % (idx, len(self.disks) - 1), 1131 errors.ECODE_INVAL)
1132
1133 - def ToDict(self):
1134 """Instance-specific conversion to standard python types. 1135 1136 This replaces the children lists of objects with lists of standard 1137 python types. 1138 1139 """ 1140 bo = super(Instance, self).ToDict() 1141 1142 for attr in "nics", "disks": 1143 alist = bo.get(attr, None) 1144 if alist: 1145 nlist = self._ContainerToDicts(alist) 1146 else: 1147 nlist = [] 1148 bo[attr] = nlist 1149 return bo
1150 1151 @classmethod
1152 - def FromDict(cls, val):
1153 """Custom function for instances. 1154 1155 """ 1156 if "admin_state" not in val: 1157 if val.get("admin_up", False): 1158 val["admin_state"] = constants.ADMINST_UP 1159 else: 1160 val["admin_state"] = constants.ADMINST_DOWN 1161 if "admin_up" in val: 1162 del val["admin_up"] 1163 obj = super(Instance, cls).FromDict(val) 1164 obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC) 1165 obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk) 1166 return obj
1167
1168 - def UpgradeConfig(self):
1169 """Fill defaults for missing configuration values. 1170 1171 """ 1172 for nic in self.nics: 1173 nic.UpgradeConfig() 1174 for disk in self.disks: 1175 disk.UpgradeConfig() 1176 if self.hvparams: 1177 for key in constants.HVC_GLOBALS: 1178 try: 1179 del self.hvparams[key] 1180 except KeyError: 1181 pass 1182 if self.osparams is None: 1183 self.osparams = {} 1184 UpgradeBeParams(self.beparams)
1185
1186 1187 -class OS(ConfigObject):
1188 """Config object representing an operating system. 1189 1190 @type supported_parameters: list 1191 @ivar supported_parameters: a list of tuples, name and description, 1192 containing the supported parameters by this OS 1193 1194 @type VARIANT_DELIM: string 1195 @cvar VARIANT_DELIM: the variant delimiter 1196 1197 """ 1198 __slots__ = [ 1199 "name", 1200 "path", 1201 "api_versions", 1202 "create_script", 1203 "export_script", 1204 "import_script", 1205 "rename_script", 1206 "verify_script", 1207 "supported_variants", 1208 "supported_parameters", 1209 ] 1210 1211 VARIANT_DELIM = "+" 1212 1213 @classmethod
1214 - def SplitNameVariant(cls, name):
1215 """Splits the name into the proper name and variant. 1216 1217 @param name: the OS (unprocessed) name 1218 @rtype: list 1219 @return: a list of two elements; if the original name didn't 1220 contain a variant, it's returned as an empty string 1221 1222 """ 1223 nv = name.split(cls.VARIANT_DELIM, 1) 1224 if len(nv) == 1: 1225 nv.append("") 1226 return nv
1227 1228 @classmethod
1229 - def GetName(cls, name):
1230 """Returns the proper name of the os (without the variant). 1231 1232 @param name: the OS (unprocessed) name 1233 1234 """ 1235 return cls.SplitNameVariant(name)[0]
1236 1237 @classmethod
1238 - def GetVariant(cls, name):
1239 """Returns the variant the os (without the base name). 1240 1241 @param name: the OS (unprocessed) name 1242 1243 """ 1244 return cls.SplitNameVariant(name)[1]
1245
1246 1247 -class ExtStorage(ConfigObject):
1248 """Config object representing an External Storage Provider. 1249 1250 """ 1251 __slots__ = [ 1252 "name", 1253 "path", 1254 "create_script", 1255 "remove_script", 1256 "grow_script", 1257 "attach_script", 1258 "detach_script", 1259 "setinfo_script", 1260 "verify_script", 1261 "supported_parameters", 1262 ]
1263
1264 1265 -class NodeHvState(ConfigObject):
1266 """Hypvervisor state on a node. 1267 1268 @ivar mem_total: Total amount of memory 1269 @ivar mem_node: Memory used by, or reserved for, the node itself (not always 1270 available) 1271 @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation 1272 rounding 1273 @ivar mem_inst: Memory used by instances living on node 1274 @ivar cpu_total: Total node CPU core count 1275 @ivar cpu_node: Number of CPU cores reserved for the node itself 1276 1277 """ 1278 __slots__ = [ 1279 "mem_total", 1280 "mem_node", 1281 "mem_hv", 1282 "mem_inst", 1283 "cpu_total", 1284 "cpu_node", 1285 ] + _TIMESTAMPS
1286
1287 1288 -class NodeDiskState(ConfigObject):
1289 """Disk state on a node. 1290 1291 """ 1292 __slots__ = [ 1293 "total", 1294 "reserved", 1295 "overhead", 1296 ] + _TIMESTAMPS
1297
1298 1299 -class Node(TaggableObject):
1300 """Config object representing a node. 1301 1302 @ivar hv_state: Hypervisor state (e.g. number of CPUs) 1303 @ivar hv_state_static: Hypervisor state overriden by user 1304 @ivar disk_state: Disk state (e.g. free space) 1305 @ivar disk_state_static: Disk state overriden by user 1306 1307 """ 1308 __slots__ = [ 1309 "name", 1310 "primary_ip", 1311 "secondary_ip", 1312 "serial_no", 1313 "master_candidate", 1314 "offline", 1315 "drained", 1316 "group", 1317 "master_capable", 1318 "vm_capable", 1319 "ndparams", 1320 "powered", 1321 "hv_state", 1322 "hv_state_static", 1323 "disk_state", 1324 "disk_state_static", 1325 ] + _TIMESTAMPS + _UUID 1326
1327 - def UpgradeConfig(self):
1328 """Fill defaults for missing configuration values. 1329 1330 """ 1331 # pylint: disable=E0203 1332 # because these are "defined" via slots, not manually 1333 if self.master_capable is None: 1334 self.master_capable = True 1335 1336 if self.vm_capable is None: 1337 self.vm_capable = True 1338 1339 if self.ndparams is None: 1340 self.ndparams = {} 1341 # And remove any global parameter 1342 for key in constants.NDC_GLOBALS: 1343 if key in self.ndparams: 1344 logging.warning("Ignoring %s node parameter for node %s", 1345 key, self.name) 1346 del self.ndparams[key] 1347 1348 if self.powered is None: 1349 self.powered = True
1350
1351 - def ToDict(self):
1352 """Custom function for serializing. 1353 1354 """ 1355 data = super(Node, self).ToDict() 1356 1357 hv_state = data.get("hv_state", None) 1358 if hv_state is not None: 1359 data["hv_state"] = self._ContainerToDicts(hv_state) 1360 1361 disk_state = data.get("disk_state", None) 1362 if disk_state is not None: 1363 data["disk_state"] = \ 1364 dict((key, self._ContainerToDicts(value)) 1365 for (key, value) in disk_state.items()) 1366 1367 return data
1368 1369 @classmethod
1370 - def FromDict(cls, val):
1371 """Custom function for deserializing. 1372 1373 """ 1374 obj = super(Node, cls).FromDict(val) 1375 1376 if obj.hv_state is not None: 1377 obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState) 1378 1379 if obj.disk_state is not None: 1380 obj.disk_state = \ 1381 dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState)) 1382 for (key, value) in obj.disk_state.items()) 1383 1384 return obj
1385
1386 1387 -class NodeGroup(TaggableObject):
1388 """Config object representing a node group.""" 1389 __slots__ = [ 1390 "name", 1391 "members", 1392 "ndparams", 1393 "diskparams", 1394 "ipolicy", 1395 "serial_no", 1396 "hv_state_static", 1397 "disk_state_static", 1398 "alloc_policy", 1399 "networks", 1400 ] + _TIMESTAMPS + _UUID 1401
1402 - def ToDict(self):
1403 """Custom function for nodegroup. 1404 1405 This discards the members object, which gets recalculated and is only kept 1406 in memory. 1407 1408 """ 1409 mydict = super(NodeGroup, self).ToDict() 1410 del mydict["members"] 1411 return mydict
1412 1413 @classmethod
1414 - def FromDict(cls, val):
1415 """Custom function for nodegroup. 1416 1417 The members slot is initialized to an empty list, upon deserialization. 1418 1419 """ 1420 obj = super(NodeGroup, cls).FromDict(val) 1421 obj.members = [] 1422 return obj
1423
1424 - def UpgradeConfig(self):
1425 """Fill defaults for missing configuration values. 1426 1427 """ 1428 if self.ndparams is None: 1429 self.ndparams = {} 1430 1431 if self.serial_no is None: 1432 self.serial_no = 1 1433 1434 if self.alloc_policy is None: 1435 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED 1436 1437 # We only update mtime, and not ctime, since we would not be able 1438 # to provide a correct value for creation time. 1439 if self.mtime is None: 1440 self.mtime = time.time() 1441 1442 if self.diskparams is None: 1443 self.diskparams = {} 1444 if self.ipolicy is None: 1445 self.ipolicy = MakeEmptyIPolicy() 1446 1447 if self.networks is None: 1448 self.networks = {}
1449
1450 - def FillND(self, node):
1451 """Return filled out ndparams for L{objects.Node} 1452 1453 @type node: L{objects.Node} 1454 @param node: A Node object to fill 1455 @return a copy of the node's ndparams with defaults filled 1456 1457 """ 1458 return self.SimpleFillND(node.ndparams)
1459
1460 - def SimpleFillND(self, ndparams):
1461 """Fill a given ndparams dict with defaults. 1462 1463 @type ndparams: dict 1464 @param ndparams: the dict to fill 1465 @rtype: dict 1466 @return: a copy of the passed in ndparams with missing keys filled 1467 from the node group defaults 1468 1469 """ 1470 return FillDict(self.ndparams, ndparams)
1471
1472 1473 -class Cluster(TaggableObject):
1474 """Config object representing the cluster.""" 1475 __slots__ = [ 1476 "serial_no", 1477 "rsahostkeypub", 1478 "highest_used_port", 1479 "tcpudp_port_pool", 1480 "mac_prefix", 1481 "volume_group_name", 1482 "reserved_lvs", 1483 "drbd_usermode_helper", 1484 "default_bridge", 1485 "default_hypervisor", 1486 "master_node", 1487 "master_ip", 1488 "master_netdev", 1489 "master_netmask", 1490 "use_external_mip_script", 1491 "cluster_name", 1492 "file_storage_dir", 1493 "shared_file_storage_dir", 1494 "enabled_hypervisors", 1495 "hvparams", 1496 "ipolicy", 1497 "os_hvp", 1498 "beparams", 1499 "osparams", 1500 "nicparams", 1501 "ndparams", 1502 "diskparams", 1503 "candidate_pool_size", 1504 "modify_etc_hosts", 1505 "modify_ssh_setup", 1506 "maintain_node_health", 1507 "uid_pool", 1508 "default_iallocator", 1509 "hidden_os", 1510 "blacklisted_os", 1511 "primary_ip_family", 1512 "prealloc_wipe_disks", 1513 "hv_state_static", 1514 "disk_state_static", 1515 ] + _TIMESTAMPS + _UUID 1516
1517 - def UpgradeConfig(self):
1518 """Fill defaults for missing configuration values. 1519 1520 """ 1521 # pylint: disable=E0203 1522 # because these are "defined" via slots, not manually 1523 if self.hvparams is None: 1524 self.hvparams = constants.HVC_DEFAULTS 1525 else: 1526 for hypervisor in self.hvparams: 1527 self.hvparams[hypervisor] = FillDict( 1528 constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor]) 1529 1530 if self.os_hvp is None: 1531 self.os_hvp = {} 1532 1533 # osparams added before 2.2 1534 if self.osparams is None: 1535 self.osparams = {} 1536 1537 self.ndparams = UpgradeNDParams(self.ndparams) 1538 1539 self.beparams = UpgradeGroupedParams(self.beparams, 1540 constants.BEC_DEFAULTS) 1541 for beparams_group in self.beparams: 1542 UpgradeBeParams(self.beparams[beparams_group]) 1543 1544 migrate_default_bridge = not self.nicparams 1545 self.nicparams = UpgradeGroupedParams(self.nicparams, 1546 constants.NICC_DEFAULTS) 1547 if migrate_default_bridge: 1548 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \ 1549 self.default_bridge 1550 1551 if self.modify_etc_hosts is None: 1552 self.modify_etc_hosts = True 1553 1554 if self.modify_ssh_setup is None: 1555 self.modify_ssh_setup = True 1556 1557 # default_bridge is no longer used in 2.1. The slot is left there to 1558 # support auto-upgrading. It can be removed once we decide to deprecate 1559 # upgrading straight from 2.0. 1560 if self.default_bridge is not None: 1561 self.default_bridge = None 1562 1563 # default_hypervisor is just the first enabled one in 2.1. This slot and 1564 # code can be removed once upgrading straight from 2.0 is deprecated. 1565 if self.default_hypervisor is not None: 1566 self.enabled_hypervisors = ([self.default_hypervisor] + 1567 [hvname for hvname in self.enabled_hypervisors 1568 if hvname != self.default_hypervisor]) 1569 self.default_hypervisor = None 1570 1571 # maintain_node_health added after 2.1.1 1572 if self.maintain_node_health is None: 1573 self.maintain_node_health = False 1574 1575 if self.uid_pool is None: 1576 self.uid_pool = [] 1577 1578 if self.default_iallocator is None: 1579 self.default_iallocator = "" 1580 1581 # reserved_lvs added before 2.2 1582 if self.reserved_lvs is None: 1583 self.reserved_lvs = [] 1584 1585 # hidden and blacklisted operating systems added before 2.2.1 1586 if self.hidden_os is None: 1587 self.hidden_os = [] 1588 1589 if self.blacklisted_os is None: 1590 self.blacklisted_os = [] 1591 1592 # primary_ip_family added before 2.3 1593 if self.primary_ip_family is None: 1594 self.primary_ip_family = AF_INET 1595 1596 if self.master_netmask is None: 1597 ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family) 1598 self.master_netmask = ipcls.iplen 1599 1600 if self.prealloc_wipe_disks is None: 1601 self.prealloc_wipe_disks = False 1602 1603 # shared_file_storage_dir added before 2.5 1604 if self.shared_file_storage_dir is None: 1605 self.shared_file_storage_dir = "" 1606 1607 if self.use_external_mip_script is None: 1608 self.use_external_mip_script = False 1609 1610 if self.diskparams: 1611 self.diskparams = UpgradeDiskParams(self.diskparams) 1612 else: 1613 self.diskparams = constants.DISK_DT_DEFAULTS.copy() 1614 1615 # instance policy added before 2.6 1616 if self.ipolicy is None: 1617 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {}) 1618 else: 1619 # we can either make sure to upgrade the ipolicy always, or only 1620 # do it in some corner cases (e.g. missing keys); note that this 1621 # will break any removal of keys from the ipolicy dict 1622 wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS 1623 if wrongkeys: 1624 # These keys would be silently removed by FillIPolicy() 1625 msg = ("Cluster instance policy contains spurious keys: %s" % 1626 utils.CommaJoin(wrongkeys)) 1627 raise errors.ConfigurationError(msg) 1628 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1629 1630 @property
1631 - def primary_hypervisor(self):
1632 """The first hypervisor is the primary. 1633 1634 Useful, for example, for L{Node}'s hv/disk state. 1635 1636 """ 1637 return self.enabled_hypervisors[0]
1638
1639 - def ToDict(self):
1640 """Custom function for cluster. 1641 1642 """ 1643 mydict = super(Cluster, self).ToDict() 1644 mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool) 1645 return mydict
1646 1647 @classmethod
1648 - def FromDict(cls, val):
1649 """Custom function for cluster. 1650 1651 """ 1652 obj = super(Cluster, cls).FromDict(val) 1653 if not isinstance(obj.tcpudp_port_pool, set): 1654 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool) 1655 return obj
1656
1657 - def SimpleFillDP(self, diskparams):
1658 """Fill a given diskparams dict with cluster defaults. 1659 1660 @param diskparams: The diskparams 1661 @return: The defaults dict 1662 1663 """ 1664 return FillDiskParams(self.diskparams, diskparams)
1665
1666 - def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1667 """Get the default hypervisor parameters for the cluster. 1668 1669 @param hypervisor: the hypervisor name 1670 @param os_name: if specified, we'll also update the defaults for this OS 1671 @param skip_keys: if passed, list of keys not to use 1672 @return: the defaults dict 1673 1674 """ 1675 if skip_keys is None: 1676 skip_keys = [] 1677 1678 fill_stack = [self.hvparams.get(hypervisor, {})] 1679 if os_name is not None: 1680 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {}) 1681 fill_stack.append(os_hvp) 1682 1683 ret_dict = {} 1684 for o_dict in fill_stack: 1685 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys) 1686 1687 return ret_dict
1688
1689 - def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1690 """Fill a given hvparams dict with cluster defaults. 1691 1692 @type hv_name: string 1693 @param hv_name: the hypervisor to use 1694 @type os_name: string 1695 @param os_name: the OS to use for overriding the hypervisor defaults 1696 @type skip_globals: boolean 1697 @param skip_globals: if True, the global hypervisor parameters will 1698 not be filled 1699 @rtype: dict 1700 @return: a copy of the given hvparams with missing keys filled from 1701 the cluster defaults 1702 1703 """ 1704 if skip_globals: 1705 skip_keys = constants.HVC_GLOBALS 1706 else: 1707 skip_keys = [] 1708 1709 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys) 1710 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1711
1712 - def FillHV(self, instance, skip_globals=False):
1713 """Fill an instance's hvparams dict with cluster defaults. 1714 1715 @type instance: L{objects.Instance} 1716 @param instance: the instance parameter to fill 1717 @type skip_globals: boolean 1718 @param skip_globals: if True, the global hypervisor parameters will 1719 not be filled 1720 @rtype: dict 1721 @return: a copy of the instance's hvparams with missing keys filled from 1722 the cluster defaults 1723 1724 """ 1725 return self.SimpleFillHV(instance.hypervisor, instance.os, 1726 instance.hvparams, skip_globals)
1727
1728 - def SimpleFillBE(self, beparams):
1729 """Fill a given beparams dict with cluster defaults. 1730 1731 @type beparams: dict 1732 @param beparams: the dict to fill 1733 @rtype: dict 1734 @return: a copy of the passed in beparams with missing keys filled 1735 from the cluster defaults 1736 1737 """ 1738 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1739
1740 - def FillBE(self, instance):
1741 """Fill an instance's beparams dict with cluster defaults. 1742 1743 @type instance: L{objects.Instance} 1744 @param instance: the instance parameter to fill 1745 @rtype: dict 1746 @return: a copy of the instance's beparams with missing keys filled from 1747 the cluster defaults 1748 1749 """ 1750 return self.SimpleFillBE(instance.beparams)
1751
1752 - def SimpleFillNIC(self, nicparams):
1753 """Fill a given nicparams dict with cluster defaults. 1754 1755 @type nicparams: dict 1756 @param nicparams: the dict to fill 1757 @rtype: dict 1758 @return: a copy of the passed in nicparams with missing keys filled 1759 from the cluster defaults 1760 1761 """ 1762 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1763
1764 - def SimpleFillOS(self, os_name, os_params):
1765 """Fill an instance's osparams dict with cluster defaults. 1766 1767 @type os_name: string 1768 @param os_name: the OS name to use 1769 @type os_params: dict 1770 @param os_params: the dict to fill with default values 1771 @rtype: dict 1772 @return: a copy of the instance's osparams with missing keys filled from 1773 the cluster defaults 1774 1775 """ 1776 name_only = os_name.split("+", 1)[0] 1777 # base OS 1778 result = self.osparams.get(name_only, {}) 1779 # OS with variant 1780 result = FillDict(result, self.osparams.get(os_name, {})) 1781 # specified params 1782 return FillDict(result, os_params)
1783 1784 @staticmethod
1785 - def SimpleFillHvState(hv_state):
1786 """Fill an hv_state sub dict with cluster defaults. 1787 1788 """ 1789 return FillDict(constants.HVST_DEFAULTS, hv_state)
1790 1791 @staticmethod
1792 - def SimpleFillDiskState(disk_state):
1793 """Fill an disk_state sub dict with cluster defaults. 1794 1795 """ 1796 return FillDict(constants.DS_DEFAULTS, disk_state)
1797
1798 - def FillND(self, node, nodegroup):
1799 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node} 1800 1801 @type node: L{objects.Node} 1802 @param node: A Node object to fill 1803 @type nodegroup: L{objects.NodeGroup} 1804 @param nodegroup: A Node object to fill 1805 @return a copy of the node's ndparams with defaults filled 1806 1807 """ 1808 return self.SimpleFillND(nodegroup.FillND(node))
1809
1810 - def SimpleFillND(self, ndparams):
1811 """Fill a given ndparams dict with defaults. 1812 1813 @type ndparams: dict 1814 @param ndparams: the dict to fill 1815 @rtype: dict 1816 @return: a copy of the passed in ndparams with missing keys filled 1817 from the cluster defaults 1818 1819 """ 1820 return FillDict(self.ndparams, ndparams)
1821
1822 - def SimpleFillIPolicy(self, ipolicy):
1823 """ Fill instance policy dict with defaults. 1824 1825 @type ipolicy: dict 1826 @param ipolicy: the dict to fill 1827 @rtype: dict 1828 @return: a copy of passed ipolicy with missing keys filled from 1829 the cluster defaults 1830 1831 """ 1832 return FillIPolicy(self.ipolicy, ipolicy)
1833
1834 1835 -class BlockDevStatus(ConfigObject):
1836 """Config object representing the status of a block device.""" 1837 __slots__ = [ 1838 "dev_path", 1839 "major", 1840 "minor", 1841 "sync_percent", 1842 "estimated_time", 1843 "is_degraded", 1844 "ldisk_status", 1845 ]
1846
1847 1848 -class ImportExportStatus(ConfigObject):
1849 """Config object representing the status of an import or export.""" 1850 __slots__ = [ 1851 "recent_output", 1852 "listen_port", 1853 "connected", 1854 "progress_mbytes", 1855 "progress_throughput", 1856 "progress_eta", 1857 "progress_percent", 1858 "exit_status", 1859 "error_message", 1860 ] + _TIMESTAMPS
1861
1862 1863 -class ImportExportOptions(ConfigObject):
1864 """Options for import/export daemon 1865 1866 @ivar key_name: X509 key name (None for cluster certificate) 1867 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate) 1868 @ivar compress: Compression method (one of L{constants.IEC_ALL}) 1869 @ivar magic: Used to ensure the connection goes to the right disk 1870 @ivar ipv6: Whether to use IPv6 1871 @ivar connect_timeout: Number of seconds for establishing connection 1872 1873 """ 1874 __slots__ = [ 1875 "key_name", 1876 "ca_pem", 1877 "compress", 1878 "magic", 1879 "ipv6", 1880 "connect_timeout", 1881 ]
1882
1883 1884 -class ConfdRequest(ConfigObject):
1885 """Object holding a confd request. 1886 1887 @ivar protocol: confd protocol version 1888 @ivar type: confd query type 1889 @ivar query: query request 1890 @ivar rsalt: requested reply salt 1891 1892 """ 1893 __slots__ = [ 1894 "protocol", 1895 "type", 1896 "query", 1897 "rsalt", 1898 ]
1899
1900 1901 -class ConfdReply(ConfigObject):
1902 """Object holding a confd reply. 1903 1904 @ivar protocol: confd protocol version 1905 @ivar status: reply status code (ok, error) 1906 @ivar answer: confd query reply 1907 @ivar serial: configuration serial number 1908 1909 """ 1910 __slots__ = [ 1911 "protocol", 1912 "status", 1913 "answer", 1914 "serial", 1915 ]
1916
1917 1918 -class QueryFieldDefinition(ConfigObject):
1919 """Object holding a query field definition. 1920 1921 @ivar name: Field name 1922 @ivar title: Human-readable title 1923 @ivar kind: Field type 1924 @ivar doc: Human-readable description 1925 1926 """ 1927 __slots__ = [ 1928 "name", 1929 "title", 1930 "kind", 1931 "doc", 1932 ]
1933
1934 1935 -class _QueryResponseBase(ConfigObject):
1936 __slots__ = [ 1937 "fields", 1938 ] 1939
1940 - def ToDict(self):
1941 """Custom function for serializing. 1942 1943 """ 1944 mydict = super(_QueryResponseBase, self).ToDict() 1945 mydict["fields"] = self._ContainerToDicts(mydict["fields"]) 1946 return mydict
1947 1948 @classmethod
1949 - def FromDict(cls, val):
1950 """Custom function for de-serializing. 1951 1952 """ 1953 obj = super(_QueryResponseBase, cls).FromDict(val) 1954 obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition) 1955 return obj
1956
1957 1958 -class QueryResponse(_QueryResponseBase):
1959 """Object holding the response to a query. 1960 1961 @ivar fields: List of L{QueryFieldDefinition} objects 1962 @ivar data: Requested data 1963 1964 """ 1965 __slots__ = [ 1966 "data", 1967 ]
1968
1969 1970 -class QueryFieldsRequest(ConfigObject):
1971 """Object holding a request for querying available fields. 1972 1973 """ 1974 __slots__ = [ 1975 "what", 1976 "fields", 1977 ]
1978
1979 1980 -class QueryFieldsResponse(_QueryResponseBase):
1981 """Object holding the response to a query for fields. 1982 1983 @ivar fields: List of L{QueryFieldDefinition} objects 1984 1985 """ 1986 __slots__ = []
1987
1988 1989 -class MigrationStatus(ConfigObject):
1990 """Object holding the status of a migration. 1991 1992 """ 1993 __slots__ = [ 1994 "status", 1995 "transferred_ram", 1996 "total_ram", 1997 ]
1998
1999 2000 -class InstanceConsole(ConfigObject):
2001 """Object describing how to access the console of an instance. 2002 2003 """ 2004 __slots__ = [ 2005 "instance", 2006 "kind", 2007 "message", 2008 "host", 2009 "port", 2010 "user", 2011 "command", 2012 "display", 2013 ] 2014
2015 - def Validate(self):
2016 """Validates contents of this object. 2017 2018 """ 2019 assert self.kind in constants.CONS_ALL, "Unknown console type" 2020 assert self.instance, "Missing instance name" 2021 assert self.message or self.kind in [constants.CONS_SSH, 2022 constants.CONS_SPICE, 2023 constants.CONS_VNC] 2024 assert self.host or self.kind == constants.CONS_MESSAGE 2025 assert self.port or self.kind in [constants.CONS_MESSAGE, 2026 constants.CONS_SSH] 2027 assert self.user or self.kind in [constants.CONS_MESSAGE, 2028 constants.CONS_SPICE, 2029 constants.CONS_VNC] 2030 assert self.command or self.kind in [constants.CONS_MESSAGE, 2031 constants.CONS_SPICE, 2032 constants.CONS_VNC] 2033 assert self.display or self.kind in [constants.CONS_MESSAGE, 2034 constants.CONS_SPICE, 2035 constants.CONS_SSH] 2036 return True
2037
2038 2039 -class Network(TaggableObject):
2040 """Object representing a network definition for ganeti. 2041 2042 """ 2043 __slots__ = [ 2044 "name", 2045 "serial_no", 2046 "mac_prefix", 2047 "network", 2048 "network6", 2049 "gateway", 2050 "gateway6", 2051 "reservations", 2052 "ext_reservations", 2053 ] + _TIMESTAMPS + _UUID 2054
2055 - def HooksDict(self, prefix=""):
2056 """Export a dictionary used by hooks with a network's information. 2057 2058 @type prefix: String 2059 @param prefix: Prefix to prepend to the dict entries 2060 2061 """ 2062 result = { 2063 "%sNETWORK_NAME" % prefix: self.name, 2064 "%sNETWORK_UUID" % prefix: self.uuid, 2065 "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()), 2066 } 2067 if self.network: 2068 result["%sNETWORK_SUBNET" % prefix] = self.network 2069 if self.gateway: 2070 result["%sNETWORK_GATEWAY" % prefix] = self.gateway 2071 if self.network6: 2072 result["%sNETWORK_SUBNET6" % prefix] = self.network6 2073 if self.gateway6: 2074 result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6 2075 if self.mac_prefix: 2076 result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix 2077 2078 return result
2079 2080 @classmethod
2081 - def FromDict(cls, val):
2082 """Custom function for networks. 2083 2084 Remove deprecated network_type and family. 2085 2086 """ 2087 if "network_type" in val: 2088 del val["network_type"] 2089 if "family" in val: 2090 del val["family"] 2091 obj = super(Network, cls).FromDict(val) 2092 return obj
2093
2094 2095 -class SerializableConfigParser(ConfigParser.SafeConfigParser):
2096 """Simple wrapper over ConfigParse that allows serialization. 2097 2098 This class is basically ConfigParser.SafeConfigParser with two 2099 additional methods that allow it to serialize/unserialize to/from a 2100 buffer. 2101 2102 """
2103 - def Dumps(self):
2104 """Dump this instance and return the string representation.""" 2105 buf = StringIO() 2106 self.write(buf) 2107 return buf.getvalue()
2108 2109 @classmethod
2110 - def Loads(cls, data):
2111 """Load data from a string.""" 2112 buf = StringIO(data) 2113 cfp = cls() 2114 cfp.readfp(buf) 2115 return cfp
2116
2117 2118 -class LvmPvInfo(ConfigObject):
2119 """Information about an LVM physical volume (PV). 2120 2121 @type name: string 2122 @ivar name: name of the PV 2123 @type vg_name: string 2124 @ivar vg_name: name of the volume group containing the PV 2125 @type size: float 2126 @ivar size: size of the PV in MiB 2127 @type free: float 2128 @ivar free: free space in the PV, in MiB 2129 @type attributes: string 2130 @ivar attributes: PV attributes 2131 @type lv_list: list of strings 2132 @ivar lv_list: names of the LVs hosted on the PV 2133 """ 2134 __slots__ = [ 2135 "name", 2136 "vg_name", 2137 "size", 2138 "free", 2139 "attributes", 2140 "lv_list" 2141 ] 2142
2143 - def IsEmpty(self):
2144 """Is this PV empty? 2145 2146 """ 2147 return self.size <= (self.free + 1)
2148
2149 - def IsAllocatable(self):
2150 """Is this PV allocatable? 2151 2152 """ 2153 return ("a" in self.attributes)
2154