Package ganeti :: Module objects
[hide private]
[frames] | no frames]

Source Code for Module ganeti.objects

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc. 
   5  # 
   6  # This program is free software; you can redistribute it and/or modify 
   7  # it under the terms of the GNU General Public License as published by 
   8  # the Free Software Foundation; either version 2 of the License, or 
   9  # (at your option) any later version. 
  10  # 
  11  # This program is distributed in the hope that it will be useful, but 
  12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
  13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
  14  # General Public License for more details. 
  15  # 
  16  # You should have received a copy of the GNU General Public License 
  17  # along with this program; if not, write to the Free Software 
  18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
  19  # 02110-1301, USA. 
  20   
  21   
  22  """Transportable objects for Ganeti. 
  23   
  24  This module provides small, mostly data-only objects which are safe to 
  25  pass to and from external parties. 
  26   
  27  """ 
  28   
  29  # pylint: disable=E0203,W0201,R0902 
  30   
  31  # E0203: Access to member %r before its definition, since we use 
  32  # objects.py which doesn't explicitly initialise its members 
  33   
  34  # W0201: Attribute '%s' defined outside __init__ 
  35   
  36  # R0902: Allow instances of these objects to have more than 20 attributes 
  37   
  38  import ConfigParser 
  39  import re 
  40  import copy 
  41  import time 
  42  from cStringIO import StringIO 
  43   
  44  from ganeti import errors 
  45  from ganeti import constants 
  46  from ganeti import netutils 
  47  from ganeti import utils 
  48   
  49  from socket import AF_INET 
  50   
  51   
  52  __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance", 
  53             "OS", "Node", "NodeGroup", "Cluster", "FillDict"] 
  54   
  55  _TIMESTAMPS = ["ctime", "mtime"] 
  56  _UUID = ["uuid"] 
57 58 59 -def FillDict(defaults_dict, custom_dict, skip_keys=None):
60 """Basic function to apply settings on top a default dict. 61 62 @type defaults_dict: dict 63 @param defaults_dict: dictionary holding the default values 64 @type custom_dict: dict 65 @param custom_dict: dictionary holding customized value 66 @type skip_keys: list 67 @param skip_keys: which keys not to fill 68 @rtype: dict 69 @return: dict with the 'full' values 70 71 """ 72 ret_dict = copy.deepcopy(defaults_dict) 73 ret_dict.update(custom_dict) 74 if skip_keys: 75 for k in skip_keys: 76 try: 77 del ret_dict[k] 78 except KeyError: 79 pass 80 return ret_dict
81
82 83 -def FillIPolicy(default_ipolicy, custom_ipolicy, skip_keys=None):
84 """Fills an instance policy with defaults. 85 86 """ 87 assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS 88 ret_dict = {} 89 for key in constants.IPOLICY_ISPECS: 90 ret_dict[key] = FillDict(default_ipolicy[key], 91 custom_ipolicy.get(key, {}), 92 skip_keys=skip_keys) 93 # list items 94 for key in [constants.IPOLICY_DTS]: 95 ret_dict[key] = list(custom_ipolicy.get(key, default_ipolicy[key])) 96 # other items which we know we can directly copy (immutables) 97 for key in constants.IPOLICY_PARAMETERS: 98 ret_dict[key] = custom_ipolicy.get(key, default_ipolicy[key]) 99 100 return ret_dict
101
102 103 -def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
104 """Fills the disk parameter defaults. 105 106 @see: L{FillDict} for parameters and return value 107 108 """ 109 assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES 110 111 return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}), 112 skip_keys=skip_keys)) 113 for dt in constants.DISK_TEMPLATES)
114
115 116 -def UpgradeGroupedParams(target, defaults):
117 """Update all groups for the target parameter. 118 119 @type target: dict of dicts 120 @param target: {group: {parameter: value}} 121 @type defaults: dict 122 @param defaults: default parameter values 123 124 """ 125 if target is None: 126 target = {constants.PP_DEFAULT: defaults} 127 else: 128 for group in target: 129 target[group] = FillDict(defaults, target[group]) 130 return target
131
132 133 -def UpgradeBeParams(target):
134 """Update the be parameters dict to the new format. 135 136 @type target: dict 137 @param target: "be" parameters dict 138 139 """ 140 if constants.BE_MEMORY in target: 141 memory = target[constants.BE_MEMORY] 142 target[constants.BE_MAXMEM] = memory 143 target[constants.BE_MINMEM] = memory 144 del target[constants.BE_MEMORY]
145
146 147 -def UpgradeDiskParams(diskparams):
148 """Upgrade the disk parameters. 149 150 @type diskparams: dict 151 @param diskparams: disk parameters to upgrade 152 @rtype: dict 153 @return: the upgraded disk parameters dict 154 155 """ 156 if not diskparams: 157 result = {} 158 else: 159 result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams) 160 161 return result
162
163 164 -def UpgradeNDParams(ndparams):
165 """Upgrade ndparams structure. 166 167 @type ndparams: dict 168 @param ndparams: disk parameters to upgrade 169 @rtype: dict 170 @return: the upgraded node parameters dict 171 172 """ 173 if ndparams is None: 174 ndparams = {} 175 176 return FillDict(constants.NDC_DEFAULTS, ndparams)
177
178 179 -def MakeEmptyIPolicy():
180 """Create empty IPolicy dictionary. 181 182 """ 183 return dict([ 184 (constants.ISPECS_MIN, {}), 185 (constants.ISPECS_MAX, {}), 186 (constants.ISPECS_STD, {}), 187 ])
188
189 190 -class ConfigObject(object):
191 """A generic config object. 192 193 It has the following properties: 194 195 - provides somewhat safe recursive unpickling and pickling for its classes 196 - unset attributes which are defined in slots are always returned 197 as None instead of raising an error 198 199 Classes derived from this must always declare __slots__ (we use many 200 config objects and the memory reduction is useful) 201 202 """ 203 __slots__ = [] 204
205 - def __init__(self, **kwargs):
206 for k, v in kwargs.iteritems(): 207 setattr(self, k, v)
208
209 - def __getattr__(self, name):
210 if name not in self._all_slots(): 211 raise AttributeError("Invalid object attribute %s.%s" % 212 (type(self).__name__, name)) 213 return None
214
215 - def __setstate__(self, state):
216 slots = self._all_slots() 217 for name in state: 218 if name in slots: 219 setattr(self, name, state[name])
220 221 @classmethod
222 - def _all_slots(cls):
223 """Compute the list of all declared slots for a class. 224 225 """ 226 slots = [] 227 for parent in cls.__mro__: 228 slots.extend(getattr(parent, "__slots__", [])) 229 return slots
230 231 #: Public getter for the defined slots 232 GetAllSlots = _all_slots 233
234 - def ToDict(self):
235 """Convert to a dict holding only standard python types. 236 237 The generic routine just dumps all of this object's attributes in 238 a dict. It does not work if the class has children who are 239 ConfigObjects themselves (e.g. the nics list in an Instance), in 240 which case the object should subclass the function in order to 241 make sure all objects returned are only standard python types. 242 243 """ 244 result = {} 245 for name in self._all_slots(): 246 value = getattr(self, name, None) 247 if value is not None: 248 result[name] = value 249 return result
250 251 __getstate__ = ToDict 252 253 @classmethod
254 - def FromDict(cls, val):
255 """Create an object from a dictionary. 256 257 This generic routine takes a dict, instantiates a new instance of 258 the given class, and sets attributes based on the dict content. 259 260 As for `ToDict`, this does not work if the class has children 261 who are ConfigObjects themselves (e.g. the nics list in an 262 Instance), in which case the object should subclass the function 263 and alter the objects. 264 265 """ 266 if not isinstance(val, dict): 267 raise errors.ConfigurationError("Invalid object passed to FromDict:" 268 " expected dict, got %s" % type(val)) 269 val_str = dict([(str(k), v) for k, v in val.iteritems()]) 270 obj = cls(**val_str) # pylint: disable=W0142 271 return obj
272 273 @staticmethod
274 - def _ContainerToDicts(container):
275 """Convert the elements of a container to standard python types. 276 277 This method converts a container with elements derived from 278 ConfigData to standard python types. If the container is a dict, 279 we don't touch the keys, only the values. 280 281 """ 282 if isinstance(container, dict): 283 ret = dict([(k, v.ToDict()) for k, v in container.iteritems()]) 284 elif isinstance(container, (list, tuple, set, frozenset)): 285 ret = [elem.ToDict() for elem in container] 286 else: 287 raise TypeError("Invalid type %s passed to _ContainerToDicts" % 288 type(container)) 289 return ret
290 291 @staticmethod
292 - def _ContainerFromDicts(source, c_type, e_type):
293 """Convert a container from standard python types. 294 295 This method converts a container with standard python types to 296 ConfigData objects. If the container is a dict, we don't touch the 297 keys, only the values. 298 299 """ 300 if not isinstance(c_type, type): 301 raise TypeError("Container type %s passed to _ContainerFromDicts is" 302 " not a type" % type(c_type)) 303 if source is None: 304 source = c_type() 305 if c_type is dict: 306 ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()]) 307 elif c_type in (list, tuple, set, frozenset): 308 ret = c_type([e_type.FromDict(elem) for elem in source]) 309 else: 310 raise TypeError("Invalid container type %s passed to" 311 " _ContainerFromDicts" % c_type) 312 return ret
313
314 - def Copy(self):
315 """Makes a deep copy of the current object and its children. 316 317 """ 318 dict_form = self.ToDict() 319 clone_obj = self.__class__.FromDict(dict_form) 320 return clone_obj
321
322 - def __repr__(self):
323 """Implement __repr__ for ConfigObjects.""" 324 return repr(self.ToDict())
325
326 - def UpgradeConfig(self):
327 """Fill defaults for missing configuration values. 328 329 This method will be called at configuration load time, and its 330 implementation will be object dependent. 331 332 """ 333 pass
334
335 336 -class TaggableObject(ConfigObject):
337 """An generic class supporting tags. 338 339 """ 340 __slots__ = ["tags"] 341 VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$") 342 343 @classmethod
344 - def ValidateTag(cls, tag):
345 """Check if a tag is valid. 346 347 If the tag is invalid, an errors.TagError will be raised. The 348 function has no return value. 349 350 """ 351 if not isinstance(tag, basestring): 352 raise errors.TagError("Invalid tag type (not a string)") 353 if len(tag) > constants.MAX_TAG_LEN: 354 raise errors.TagError("Tag too long (>%d characters)" % 355 constants.MAX_TAG_LEN) 356 if not tag: 357 raise errors.TagError("Tags cannot be empty") 358 if not cls.VALID_TAG_RE.match(tag): 359 raise errors.TagError("Tag contains invalid characters")
360
361 - def GetTags(self):
362 """Return the tags list. 363 364 """ 365 tags = getattr(self, "tags", None) 366 if tags is None: 367 tags = self.tags = set() 368 return tags
369
370 - def AddTag(self, tag):
371 """Add a new tag. 372 373 """ 374 self.ValidateTag(tag) 375 tags = self.GetTags() 376 if len(tags) >= constants.MAX_TAGS_PER_OBJ: 377 raise errors.TagError("Too many tags") 378 self.GetTags().add(tag)
379
380 - def RemoveTag(self, tag):
381 """Remove a tag. 382 383 """ 384 self.ValidateTag(tag) 385 tags = self.GetTags() 386 try: 387 tags.remove(tag) 388 except KeyError: 389 raise errors.TagError("Tag not found")
390
391 - def ToDict(self):
392 """Taggable-object-specific conversion to standard python types. 393 394 This replaces the tags set with a list. 395 396 """ 397 bo = super(TaggableObject, self).ToDict() 398 399 tags = bo.get("tags", None) 400 if isinstance(tags, set): 401 bo["tags"] = list(tags) 402 return bo
403 404 @classmethod
405 - def FromDict(cls, val):
406 """Custom function for instances. 407 408 """ 409 obj = super(TaggableObject, cls).FromDict(val) 410 if hasattr(obj, "tags") and isinstance(obj.tags, list): 411 obj.tags = set(obj.tags) 412 return obj
413
414 415 -class MasterNetworkParameters(ConfigObject):
416 """Network configuration parameters for the master 417 418 @ivar name: master name 419 @ivar ip: master IP 420 @ivar netmask: master netmask 421 @ivar netdev: master network device 422 @ivar ip_family: master IP family 423 424 """ 425 __slots__ = [ 426 "name", 427 "ip", 428 "netmask", 429 "netdev", 430 "ip_family" 431 ]
432
433 434 -class ConfigData(ConfigObject):
435 """Top-level config object.""" 436 __slots__ = [ 437 "version", 438 "cluster", 439 "nodes", 440 "nodegroups", 441 "instances", 442 "serial_no", 443 ] + _TIMESTAMPS 444
445 - def ToDict(self):
446 """Custom function for top-level config data. 447 448 This just replaces the list of instances, nodes and the cluster 449 with standard python types. 450 451 """ 452 mydict = super(ConfigData, self).ToDict() 453 mydict["cluster"] = mydict["cluster"].ToDict() 454 for key in "nodes", "instances", "nodegroups": 455 mydict[key] = self._ContainerToDicts(mydict[key]) 456 457 return mydict
458 459 @classmethod
460 - def FromDict(cls, val):
461 """Custom function for top-level config data 462 463 """ 464 obj = super(ConfigData, cls).FromDict(val) 465 obj.cluster = Cluster.FromDict(obj.cluster) 466 obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node) 467 obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance) 468 obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup) 469 return obj
470
471 - def HasAnyDiskOfType(self, dev_type):
472 """Check if in there is at disk of the given type in the configuration. 473 474 @type dev_type: L{constants.LDS_BLOCK} 475 @param dev_type: the type to look for 476 @rtype: boolean 477 @return: boolean indicating if a disk of the given type was found or not 478 479 """ 480 for instance in self.instances.values(): 481 for disk in instance.disks: 482 if disk.IsBasedOnDiskType(dev_type): 483 return True 484 return False
485
486 - def UpgradeConfig(self):
487 """Fill defaults for missing configuration values. 488 489 """ 490 self.cluster.UpgradeConfig() 491 for node in self.nodes.values(): 492 node.UpgradeConfig() 493 for instance in self.instances.values(): 494 instance.UpgradeConfig() 495 if self.nodegroups is None: 496 self.nodegroups = {} 497 for nodegroup in self.nodegroups.values(): 498 nodegroup.UpgradeConfig() 499 if self.cluster.drbd_usermode_helper is None: 500 # To decide if we set an helper let's check if at least one instance has 501 # a DRBD disk. This does not cover all the possible scenarios but it 502 # gives a good approximation. 503 if self.HasAnyDiskOfType(constants.LD_DRBD8): 504 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
505
506 507 -class NIC(ConfigObject):
508 """Config object representing a network card.""" 509 __slots__ = ["mac", "ip", "nicparams"] 510 511 @classmethod
512 - def CheckParameterSyntax(cls, nicparams):
513 """Check the given parameters for validity. 514 515 @type nicparams: dict 516 @param nicparams: dictionary with parameter names/value 517 @raise errors.ConfigurationError: when a parameter is not valid 518 519 """ 520 if (nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES and 521 nicparams[constants.NIC_MODE] != constants.VALUE_AUTO): 522 err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE] 523 raise errors.ConfigurationError(err) 524 525 if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and 526 not nicparams[constants.NIC_LINK]): 527 err = "Missing bridged nic link" 528 raise errors.ConfigurationError(err)
529
530 531 -class Disk(ConfigObject):
532 """Config object representing a block device.""" 533 __slots__ = ["dev_type", "logical_id", "physical_id", 534 "children", "iv_name", "size", "mode", "params"] 535
536 - def CreateOnSecondary(self):
537 """Test if this device needs to be created on a secondary node.""" 538 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
539
540 - def AssembleOnSecondary(self):
541 """Test if this device needs to be assembled on a secondary node.""" 542 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
543
544 - def OpenOnSecondary(self):
545 """Test if this device needs to be opened on a secondary node.""" 546 return self.dev_type in (constants.LD_LV,)
547
548 - def StaticDevPath(self):
549 """Return the device path if this device type has a static one. 550 551 Some devices (LVM for example) live always at the same /dev/ path, 552 irrespective of their status. For such devices, we return this 553 path, for others we return None. 554 555 @warning: The path returned is not a normalized pathname; callers 556 should check that it is a valid path. 557 558 """ 559 if self.dev_type == constants.LD_LV: 560 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1]) 561 elif self.dev_type == constants.LD_BLOCKDEV: 562 return self.logical_id[1] 563 elif self.dev_type == constants.LD_RBD: 564 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1]) 565 return None
566
567 - def ChildrenNeeded(self):
568 """Compute the needed number of children for activation. 569 570 This method will return either -1 (all children) or a positive 571 number denoting the minimum number of children needed for 572 activation (only mirrored devices will usually return >=0). 573 574 Currently, only DRBD8 supports diskless activation (therefore we 575 return 0), for all other we keep the previous semantics and return 576 -1. 577 578 """ 579 if self.dev_type == constants.LD_DRBD8: 580 return 0 581 return -1
582
583 - def IsBasedOnDiskType(self, dev_type):
584 """Check if the disk or its children are based on the given type. 585 586 @type dev_type: L{constants.LDS_BLOCK} 587 @param dev_type: the type to look for 588 @rtype: boolean 589 @return: boolean indicating if a device of the given type was found or not 590 591 """ 592 if self.children: 593 for child in self.children: 594 if child.IsBasedOnDiskType(dev_type): 595 return True 596 return self.dev_type == dev_type
597
598 - def GetNodes(self, node):
599 """This function returns the nodes this device lives on. 600 601 Given the node on which the parent of the device lives on (or, in 602 case of a top-level device, the primary node of the devices' 603 instance), this function will return a list of nodes on which this 604 devices needs to (or can) be assembled. 605 606 """ 607 if self.dev_type in [constants.LD_LV, constants.LD_FILE, 608 constants.LD_BLOCKDEV, constants.LD_RBD]: 609 result = [node] 610 elif self.dev_type in constants.LDS_DRBD: 611 result = [self.logical_id[0], self.logical_id[1]] 612 if node not in result: 613 raise errors.ConfigurationError("DRBD device passed unknown node") 614 else: 615 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type) 616 return result
617
618 - def ComputeNodeTree(self, parent_node):
619 """Compute the node/disk tree for this disk and its children. 620 621 This method, given the node on which the parent disk lives, will 622 return the list of all (node, disk) pairs which describe the disk 623 tree in the most compact way. For example, a drbd/lvm stack 624 will be returned as (primary_node, drbd) and (secondary_node, drbd) 625 which represents all the top-level devices on the nodes. 626 627 """ 628 my_nodes = self.GetNodes(parent_node) 629 result = [(node, self) for node in my_nodes] 630 if not self.children: 631 # leaf device 632 return result 633 for node in my_nodes: 634 for child in self.children: 635 child_result = child.ComputeNodeTree(node) 636 if len(child_result) == 1: 637 # child (and all its descendants) is simple, doesn't split 638 # over multiple hosts, so we don't need to describe it, our 639 # own entry for this node describes it completely 640 continue 641 else: 642 # check if child nodes differ from my nodes; note that 643 # subdisk can differ from the child itself, and be instead 644 # one of its descendants 645 for subnode, subdisk in child_result: 646 if subnode not in my_nodes: 647 result.append((subnode, subdisk)) 648 # otherwise child is under our own node, so we ignore this 649 # entry (but probably the other results in the list will 650 # be different) 651 return result
652
653 - def ComputeGrowth(self, amount):
654 """Compute the per-VG growth requirements. 655 656 This only works for VG-based disks. 657 658 @type amount: integer 659 @param amount: the desired increase in (user-visible) disk space 660 @rtype: dict 661 @return: a dictionary of volume-groups and the required size 662 663 """ 664 if self.dev_type == constants.LD_LV: 665 return {self.logical_id[0]: amount} 666 elif self.dev_type == constants.LD_DRBD8: 667 if self.children: 668 return self.children[0].ComputeGrowth(amount) 669 else: 670 return {} 671 else: 672 # Other disk types do not require VG space 673 return {}
674
675 - def RecordGrow(self, amount):
676 """Update the size of this disk after growth. 677 678 This method recurses over the disks's children and updates their 679 size correspondigly. The method needs to be kept in sync with the 680 actual algorithms from bdev. 681 682 """ 683 if self.dev_type in (constants.LD_LV, constants.LD_FILE, 684 constants.LD_RBD): 685 self.size += amount 686 elif self.dev_type == constants.LD_DRBD8: 687 if self.children: 688 self.children[0].RecordGrow(amount) 689 self.size += amount 690 else: 691 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported" 692 " disk type %s" % self.dev_type)
693
694 - def Update(self, size=None, mode=None):
695 """Apply changes to size and mode. 696 697 """ 698 if self.dev_type == constants.LD_DRBD8: 699 if self.children: 700 self.children[0].Update(size=size, mode=mode) 701 else: 702 assert not self.children 703 704 if size is not None: 705 self.size = size 706 if mode is not None: 707 self.mode = mode
708
709 - def UnsetSize(self):
710 """Sets recursively the size to zero for the disk and its children. 711 712 """ 713 if self.children: 714 for child in self.children: 715 child.UnsetSize() 716 self.size = 0
717
718 - def SetPhysicalID(self, target_node, nodes_ip):
719 """Convert the logical ID to the physical ID. 720 721 This is used only for drbd, which needs ip/port configuration. 722 723 The routine descends down and updates its children also, because 724 this helps when the only the top device is passed to the remote 725 node. 726 727 Arguments: 728 - target_node: the node we wish to configure for 729 - nodes_ip: a mapping of node name to ip 730 731 The target_node must exist in in nodes_ip, and must be one of the 732 nodes in the logical ID for each of the DRBD devices encountered 733 in the disk tree. 734 735 """ 736 if self.children: 737 for child in self.children: 738 child.SetPhysicalID(target_node, nodes_ip) 739 740 if self.logical_id is None and self.physical_id is not None: 741 return 742 if self.dev_type in constants.LDS_DRBD: 743 pnode, snode, port, pminor, sminor, secret = self.logical_id 744 if target_node not in (pnode, snode): 745 raise errors.ConfigurationError("DRBD device not knowing node %s" % 746 target_node) 747 pnode_ip = nodes_ip.get(pnode, None) 748 snode_ip = nodes_ip.get(snode, None) 749 if pnode_ip is None or snode_ip is None: 750 raise errors.ConfigurationError("Can't find primary or secondary node" 751 " for %s" % str(self)) 752 p_data = (pnode_ip, port) 753 s_data = (snode_ip, port) 754 if pnode == target_node: 755 self.physical_id = p_data + s_data + (pminor, secret) 756 else: # it must be secondary, we tested above 757 self.physical_id = s_data + p_data + (sminor, secret) 758 else: 759 self.physical_id = self.logical_id 760 return
761
762 - def ToDict(self):
763 """Disk-specific conversion to standard python types. 764 765 This replaces the children lists of objects with lists of 766 standard python types. 767 768 """ 769 bo = super(Disk, self).ToDict() 770 771 for attr in ("children",): 772 alist = bo.get(attr, None) 773 if alist: 774 bo[attr] = self._ContainerToDicts(alist) 775 return bo
776 777 @classmethod
778 - def FromDict(cls, val):
779 """Custom function for Disks 780 781 """ 782 obj = super(Disk, cls).FromDict(val) 783 if obj.children: 784 obj.children = cls._ContainerFromDicts(obj.children, list, Disk) 785 if obj.logical_id and isinstance(obj.logical_id, list): 786 obj.logical_id = tuple(obj.logical_id) 787 if obj.physical_id and isinstance(obj.physical_id, list): 788 obj.physical_id = tuple(obj.physical_id) 789 if obj.dev_type in constants.LDS_DRBD: 790 # we need a tuple of length six here 791 if len(obj.logical_id) < 6: 792 obj.logical_id += (None,) * (6 - len(obj.logical_id)) 793 return obj
794
795 - def __str__(self):
796 """Custom str() formatter for disks. 797 798 """ 799 if self.dev_type == constants.LD_LV: 800 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id 801 elif self.dev_type in constants.LDS_DRBD: 802 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5] 803 val = "<DRBD8(" 804 if self.physical_id is None: 805 phy = "unconfigured" 806 else: 807 phy = ("configured as %s:%s %s:%s" % 808 (self.physical_id[0], self.physical_id[1], 809 self.physical_id[2], self.physical_id[3])) 810 811 val += ("hosts=%s/%d-%s/%d, port=%s, %s, " % 812 (node_a, minor_a, node_b, minor_b, port, phy)) 813 if self.children and self.children.count(None) == 0: 814 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1]) 815 else: 816 val += "no local storage" 817 else: 818 val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" % 819 (self.dev_type, self.logical_id, self.physical_id, self.children)) 820 if self.iv_name is None: 821 val += ", not visible" 822 else: 823 val += ", visible as /dev/%s" % self.iv_name 824 if isinstance(self.size, int): 825 val += ", size=%dm)>" % self.size 826 else: 827 val += ", size='%s')>" % (self.size,) 828 return val
829
830 - def Verify(self):
831 """Checks that this disk is correctly configured. 832 833 """ 834 all_errors = [] 835 if self.mode not in constants.DISK_ACCESS_SET: 836 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, )) 837 return all_errors
838
839 - def UpgradeConfig(self):
840 """Fill defaults for missing configuration values. 841 842 """ 843 if self.children: 844 for child in self.children: 845 child.UpgradeConfig() 846 847 # FIXME: Make this configurable in Ganeti 2.7 848 self.params = {}
849 # add here config upgrade for this disk 850 851 @staticmethod
852 - def ComputeLDParams(disk_template, disk_params):
853 """Computes Logical Disk parameters from Disk Template parameters. 854 855 @type disk_template: string 856 @param disk_template: disk template, one of L{constants.DISK_TEMPLATES} 857 @type disk_params: dict 858 @param disk_params: disk template parameters; 859 dict(template_name -> parameters 860 @rtype: list(dict) 861 @return: a list of dicts, one for each node of the disk hierarchy. Each dict 862 contains the LD parameters of the node. The tree is flattened in-order. 863 864 """ 865 if disk_template not in constants.DISK_TEMPLATES: 866 raise errors.ProgrammerError("Unknown disk template %s" % disk_template) 867 868 assert disk_template in disk_params 869 870 result = list() 871 dt_params = disk_params[disk_template] 872 if disk_template == constants.DT_DRBD8: 873 drbd_params = { 874 constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE], 875 constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS], 876 constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS], 877 constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG], 878 constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM], 879 constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM], 880 constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC], 881 constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD], 882 constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET], 883 constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET], 884 constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE], 885 constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE], 886 } 887 888 drbd_params = \ 889 FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], 890 drbd_params) 891 892 result.append(drbd_params) 893 894 # data LV 895 data_params = { 896 constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES], 897 } 898 data_params = \ 899 FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], 900 data_params) 901 result.append(data_params) 902 903 # metadata LV 904 meta_params = { 905 constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES], 906 } 907 meta_params = \ 908 FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], 909 meta_params) 910 result.append(meta_params) 911 912 elif (disk_template == constants.DT_FILE or 913 disk_template == constants.DT_SHARED_FILE): 914 result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE]) 915 916 elif disk_template == constants.DT_PLAIN: 917 params = { 918 constants.LDP_STRIPES: dt_params[constants.LV_STRIPES], 919 } 920 params = \ 921 FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], 922 params) 923 result.append(params) 924 925 elif disk_template == constants.DT_BLOCK: 926 result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV]) 927 928 elif disk_template == constants.DT_RBD: 929 params = { 930 constants.LDP_POOL: dt_params[constants.RBD_POOL] 931 } 932 params = \ 933 FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], 934 params) 935 result.append(params) 936 937 return result
938
939 940 -class InstancePolicy(ConfigObject):
941 """Config object representing instance policy limits dictionary. 942 943 944 Note that this object is not actually used in the config, it's just 945 used as a placeholder for a few functions. 946 947 """ 948 @classmethod
949 - def CheckParameterSyntax(cls, ipolicy, check_std):
950 """ Check the instance policy for validity. 951 952 """ 953 for param in constants.ISPECS_PARAMETERS: 954 InstancePolicy.CheckISpecSyntax(ipolicy, param, check_std) 955 if constants.IPOLICY_DTS in ipolicy: 956 InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS]) 957 for key in constants.IPOLICY_PARAMETERS: 958 if key in ipolicy: 959 InstancePolicy.CheckParameter(key, ipolicy[key]) 960 wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS 961 if wrong_keys: 962 raise errors.ConfigurationError("Invalid keys in ipolicy: %s" % 963 utils.CommaJoin(wrong_keys))
964 965 @classmethod
966 - def CheckISpecSyntax(cls, ipolicy, name, check_std):
967 """Check the instance policy for validity on a given key. 968 969 We check if the instance policy makes sense for a given key, that is 970 if ipolicy[min][name] <= ipolicy[std][name] <= ipolicy[max][name]. 971 972 @type ipolicy: dict 973 @param ipolicy: dictionary with min, max, std specs 974 @type name: string 975 @param name: what are the limits for 976 @type check_std: bool 977 @param check_std: Whether to check std value or just assume compliance 978 @raise errors.ConfigureError: when specs for given name are not valid 979 980 """ 981 min_v = ipolicy[constants.ISPECS_MIN].get(name, 0) 982 983 if check_std: 984 std_v = ipolicy[constants.ISPECS_STD].get(name, min_v) 985 std_msg = std_v 986 else: 987 std_v = min_v 988 std_msg = "-" 989 990 max_v = ipolicy[constants.ISPECS_MAX].get(name, std_v) 991 err = ("Invalid specification of min/max/std values for %s: %s/%s/%s" % 992 (name, 993 ipolicy[constants.ISPECS_MIN].get(name, "-"), 994 ipolicy[constants.ISPECS_MAX].get(name, "-"), 995 std_msg)) 996 if min_v > std_v or std_v > max_v: 997 raise errors.ConfigurationError(err)
998 999 @classmethod
1000 - def CheckDiskTemplates(cls, disk_templates):
1001 """Checks the disk templates for validity. 1002 1003 """ 1004 wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES) 1005 if wrong: 1006 raise errors.ConfigurationError("Invalid disk template(s) %s" % 1007 utils.CommaJoin(wrong))
1008 1009 @classmethod
1010 - def CheckParameter(cls, key, value):
1011 """Checks a parameter. 1012 1013 Currently we expect all parameters to be float values. 1014 1015 """ 1016 try: 1017 float(value) 1018 except (TypeError, ValueError), err: 1019 raise errors.ConfigurationError("Invalid value for key" " '%s':" 1020 " '%s', error: %s" % (key, value, err))
1021
1022 1023 -class Instance(TaggableObject):
1024 """Config object representing an instance.""" 1025 __slots__ = [ 1026 "name", 1027 "primary_node", 1028 "os", 1029 "hypervisor", 1030 "hvparams", 1031 "beparams", 1032 "osparams", 1033 "admin_state", 1034 "nics", 1035 "disks", 1036 "disk_template", 1037 "network_port", 1038 "serial_no", 1039 ] + _TIMESTAMPS + _UUID 1040
1041 - def _ComputeSecondaryNodes(self):
1042 """Compute the list of secondary nodes. 1043 1044 This is a simple wrapper over _ComputeAllNodes. 1045 1046 """ 1047 all_nodes = set(self._ComputeAllNodes()) 1048 all_nodes.discard(self.primary_node) 1049 return tuple(all_nodes)
1050 1051 secondary_nodes = property(_ComputeSecondaryNodes, None, None, 1052 "List of secondary nodes") 1053
1054 - def _ComputeAllNodes(self):
1055 """Compute the list of all nodes. 1056 1057 Since the data is already there (in the drbd disks), keeping it as 1058 a separate normal attribute is redundant and if not properly 1059 synchronised can cause problems. Thus it's better to compute it 1060 dynamically. 1061 1062 """ 1063 def _Helper(nodes, device): 1064 """Recursively computes nodes given a top device.""" 1065 if device.dev_type in constants.LDS_DRBD: 1066 nodea, nodeb = device.logical_id[:2] 1067 nodes.add(nodea) 1068 nodes.add(nodeb) 1069 if device.children: 1070 for child in device.children: 1071 _Helper(nodes, child)
1072 1073 all_nodes = set() 1074 all_nodes.add(self.primary_node) 1075 for device in self.disks: 1076 _Helper(all_nodes, device) 1077 return tuple(all_nodes)
1078 1079 all_nodes = property(_ComputeAllNodes, None, None, 1080 "List of all nodes of the instance") 1081
1082 - def MapLVsByNode(self, lvmap=None, devs=None, node=None):
1083 """Provide a mapping of nodes to LVs this instance owns. 1084 1085 This function figures out what logical volumes should belong on 1086 which nodes, recursing through a device tree. 1087 1088 @param lvmap: optional dictionary to receive the 1089 'node' : ['lv', ...] data. 1090 1091 @return: None if lvmap arg is given, otherwise, a dictionary of 1092 the form { 'nodename' : ['volume1', 'volume2', ...], ... }; 1093 volumeN is of the form "vg_name/lv_name", compatible with 1094 GetVolumeList() 1095 1096 """ 1097 if node == None: 1098 node = self.primary_node 1099 1100 if lvmap is None: 1101 lvmap = { 1102 node: [], 1103 } 1104 ret = lvmap 1105 else: 1106 if not node in lvmap: 1107 lvmap[node] = [] 1108 ret = None 1109 1110 if not devs: 1111 devs = self.disks 1112 1113 for dev in devs: 1114 if dev.dev_type == constants.LD_LV: 1115 lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1]) 1116 1117 elif dev.dev_type in constants.LDS_DRBD: 1118 if dev.children: 1119 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0]) 1120 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1]) 1121 1122 elif dev.children: 1123 self.MapLVsByNode(lvmap, dev.children, node) 1124 1125 return ret
1126
1127 - def FindDisk(self, idx):
1128 """Find a disk given having a specified index. 1129 1130 This is just a wrapper that does validation of the index. 1131 1132 @type idx: int 1133 @param idx: the disk index 1134 @rtype: L{Disk} 1135 @return: the corresponding disk 1136 @raise errors.OpPrereqError: when the given index is not valid 1137 1138 """ 1139 try: 1140 idx = int(idx) 1141 return self.disks[idx] 1142 except (TypeError, ValueError), err: 1143 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err), 1144 errors.ECODE_INVAL) 1145 except IndexError: 1146 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks" 1147 " 0 to %d" % (idx, len(self.disks) - 1), 1148 errors.ECODE_INVAL)
1149
1150 - def ToDict(self):
1151 """Instance-specific conversion to standard python types. 1152 1153 This replaces the children lists of objects with lists of standard 1154 python types. 1155 1156 """ 1157 bo = super(Instance, self).ToDict() 1158 1159 for attr in "nics", "disks": 1160 alist = bo.get(attr, None) 1161 if alist: 1162 nlist = self._ContainerToDicts(alist) 1163 else: 1164 nlist = [] 1165 bo[attr] = nlist 1166 return bo
1167 1168 @classmethod
1169 - def FromDict(cls, val):
1170 """Custom function for instances. 1171 1172 """ 1173 if "admin_state" not in val: 1174 if val.get("admin_up", False): 1175 val["admin_state"] = constants.ADMINST_UP 1176 else: 1177 val["admin_state"] = constants.ADMINST_DOWN 1178 if "admin_up" in val: 1179 del val["admin_up"] 1180 obj = super(Instance, cls).FromDict(val) 1181 obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC) 1182 obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk) 1183 return obj
1184
1185 - def UpgradeConfig(self):
1186 """Fill defaults for missing configuration values. 1187 1188 """ 1189 for nic in self.nics: 1190 nic.UpgradeConfig() 1191 for disk in self.disks: 1192 disk.UpgradeConfig() 1193 if self.hvparams: 1194 for key in constants.HVC_GLOBALS: 1195 try: 1196 del self.hvparams[key] 1197 except KeyError: 1198 pass 1199 if self.osparams is None: 1200 self.osparams = {} 1201 UpgradeBeParams(self.beparams)
1202
1203 1204 -class OS(ConfigObject):
1205 """Config object representing an operating system. 1206 1207 @type supported_parameters: list 1208 @ivar supported_parameters: a list of tuples, name and description, 1209 containing the supported parameters by this OS 1210 1211 @type VARIANT_DELIM: string 1212 @cvar VARIANT_DELIM: the variant delimiter 1213 1214 """ 1215 __slots__ = [ 1216 "name", 1217 "path", 1218 "api_versions", 1219 "create_script", 1220 "export_script", 1221 "import_script", 1222 "rename_script", 1223 "verify_script", 1224 "supported_variants", 1225 "supported_parameters", 1226 ] 1227 1228 VARIANT_DELIM = "+" 1229 1230 @classmethod
1231 - def SplitNameVariant(cls, name):
1232 """Splits the name into the proper name and variant. 1233 1234 @param name: the OS (unprocessed) name 1235 @rtype: list 1236 @return: a list of two elements; if the original name didn't 1237 contain a variant, it's returned as an empty string 1238 1239 """ 1240 nv = name.split(cls.VARIANT_DELIM, 1) 1241 if len(nv) == 1: 1242 nv.append("") 1243 return nv
1244 1245 @classmethod
1246 - def GetName(cls, name):
1247 """Returns the proper name of the os (without the variant). 1248 1249 @param name: the OS (unprocessed) name 1250 1251 """ 1252 return cls.SplitNameVariant(name)[0]
1253 1254 @classmethod
1255 - def GetVariant(cls, name):
1256 """Returns the variant the os (without the base name). 1257 1258 @param name: the OS (unprocessed) name 1259 1260 """ 1261 return cls.SplitNameVariant(name)[1]
1262
1263 1264 -class NodeHvState(ConfigObject):
1265 """Hypvervisor state on a node. 1266 1267 @ivar mem_total: Total amount of memory 1268 @ivar mem_node: Memory used by, or reserved for, the node itself (not always 1269 available) 1270 @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation 1271 rounding 1272 @ivar mem_inst: Memory used by instances living on node 1273 @ivar cpu_total: Total node CPU core count 1274 @ivar cpu_node: Number of CPU cores reserved for the node itself 1275 1276 """ 1277 __slots__ = [ 1278 "mem_total", 1279 "mem_node", 1280 "mem_hv", 1281 "mem_inst", 1282 "cpu_total", 1283 "cpu_node", 1284 ] + _TIMESTAMPS
1285
1286 1287 -class NodeDiskState(ConfigObject):
1288 """Disk state on a node. 1289 1290 """ 1291 __slots__ = [ 1292 "total", 1293 "reserved", 1294 "overhead", 1295 ] + _TIMESTAMPS
1296
1297 1298 -class Node(TaggableObject):
1299 """Config object representing a node. 1300 1301 @ivar hv_state: Hypervisor state (e.g. number of CPUs) 1302 @ivar hv_state_static: Hypervisor state overriden by user 1303 @ivar disk_state: Disk state (e.g. free space) 1304 @ivar disk_state_static: Disk state overriden by user 1305 1306 """ 1307 __slots__ = [ 1308 "name", 1309 "primary_ip", 1310 "secondary_ip", 1311 "serial_no", 1312 "master_candidate", 1313 "offline", 1314 "drained", 1315 "group", 1316 "master_capable", 1317 "vm_capable", 1318 "ndparams", 1319 "powered", 1320 "hv_state", 1321 "hv_state_static", 1322 "disk_state", 1323 "disk_state_static", 1324 ] + _TIMESTAMPS + _UUID 1325
1326 - def UpgradeConfig(self):
1327 """Fill defaults for missing configuration values. 1328 1329 """ 1330 # pylint: disable=E0203 1331 # because these are "defined" via slots, not manually 1332 if self.master_capable is None: 1333 self.master_capable = True 1334 1335 if self.vm_capable is None: 1336 self.vm_capable = True 1337 1338 if self.ndparams is None: 1339 self.ndparams = {} 1340 1341 if self.powered is None: 1342 self.powered = True
1343
1344 - def ToDict(self):
1345 """Custom function for serializing. 1346 1347 """ 1348 data = super(Node, self).ToDict() 1349 1350 hv_state = data.get("hv_state", None) 1351 if hv_state is not None: 1352 data["hv_state"] = self._ContainerToDicts(hv_state) 1353 1354 disk_state = data.get("disk_state", None) 1355 if disk_state is not None: 1356 data["disk_state"] = \ 1357 dict((key, self._ContainerToDicts(value)) 1358 for (key, value) in disk_state.items()) 1359 1360 return data
1361 1362 @classmethod
1363 - def FromDict(cls, val):
1364 """Custom function for deserializing. 1365 1366 """ 1367 obj = super(Node, cls).FromDict(val) 1368 1369 if obj.hv_state is not None: 1370 obj.hv_state = cls._ContainerFromDicts(obj.hv_state, dict, NodeHvState) 1371 1372 if obj.disk_state is not None: 1373 obj.disk_state = \ 1374 dict((key, cls._ContainerFromDicts(value, dict, NodeDiskState)) 1375 for (key, value) in obj.disk_state.items()) 1376 1377 return obj
1378
1379 1380 -class NodeGroup(TaggableObject):
1381 """Config object representing a node group.""" 1382 __slots__ = [ 1383 "name", 1384 "members", 1385 "ndparams", 1386 "diskparams", 1387 "ipolicy", 1388 "serial_no", 1389 "hv_state_static", 1390 "disk_state_static", 1391 "alloc_policy", 1392 ] + _TIMESTAMPS + _UUID 1393
1394 - def ToDict(self):
1395 """Custom function for nodegroup. 1396 1397 This discards the members object, which gets recalculated and is only kept 1398 in memory. 1399 1400 """ 1401 mydict = super(NodeGroup, self).ToDict() 1402 del mydict["members"] 1403 return mydict
1404 1405 @classmethod
1406 - def FromDict(cls, val):
1407 """Custom function for nodegroup. 1408 1409 The members slot is initialized to an empty list, upon deserialization. 1410 1411 """ 1412 obj = super(NodeGroup, cls).FromDict(val) 1413 obj.members = [] 1414 return obj
1415
1416 - def UpgradeConfig(self):
1417 """Fill defaults for missing configuration values. 1418 1419 """ 1420 if self.ndparams is None: 1421 self.ndparams = {} 1422 1423 if self.serial_no is None: 1424 self.serial_no = 1 1425 1426 if self.alloc_policy is None: 1427 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED 1428 1429 # We only update mtime, and not ctime, since we would not be able 1430 # to provide a correct value for creation time. 1431 if self.mtime is None: 1432 self.mtime = time.time() 1433 1434 if self.diskparams is None: 1435 self.diskparams = {} 1436 if self.ipolicy is None: 1437 self.ipolicy = MakeEmptyIPolicy()
1438
1439 - def FillND(self, node):
1440 """Return filled out ndparams for L{objects.Node} 1441 1442 @type node: L{objects.Node} 1443 @param node: A Node object to fill 1444 @return a copy of the node's ndparams with defaults filled 1445 1446 """ 1447 return self.SimpleFillND(node.ndparams)
1448
1449 - def SimpleFillND(self, ndparams):
1450 """Fill a given ndparams dict with defaults. 1451 1452 @type ndparams: dict 1453 @param ndparams: the dict to fill 1454 @rtype: dict 1455 @return: a copy of the passed in ndparams with missing keys filled 1456 from the node group defaults 1457 1458 """ 1459 return FillDict(self.ndparams, ndparams)
1460
1461 1462 -class Cluster(TaggableObject):
1463 """Config object representing the cluster.""" 1464 __slots__ = [ 1465 "serial_no", 1466 "rsahostkeypub", 1467 "highest_used_port", 1468 "tcpudp_port_pool", 1469 "mac_prefix", 1470 "volume_group_name", 1471 "reserved_lvs", 1472 "drbd_usermode_helper", 1473 "default_bridge", 1474 "default_hypervisor", 1475 "master_node", 1476 "master_ip", 1477 "master_netdev", 1478 "master_netmask", 1479 "use_external_mip_script", 1480 "cluster_name", 1481 "file_storage_dir", 1482 "shared_file_storage_dir", 1483 "enabled_hypervisors", 1484 "hvparams", 1485 "ipolicy", 1486 "os_hvp", 1487 "beparams", 1488 "osparams", 1489 "nicparams", 1490 "ndparams", 1491 "diskparams", 1492 "candidate_pool_size", 1493 "modify_etc_hosts", 1494 "modify_ssh_setup", 1495 "maintain_node_health", 1496 "uid_pool", 1497 "default_iallocator", 1498 "hidden_os", 1499 "blacklisted_os", 1500 "primary_ip_family", 1501 "prealloc_wipe_disks", 1502 "hv_state_static", 1503 "disk_state_static", 1504 ] + _TIMESTAMPS + _UUID 1505
1506 - def UpgradeConfig(self):
1507 """Fill defaults for missing configuration values. 1508 1509 """ 1510 # pylint: disable=E0203 1511 # because these are "defined" via slots, not manually 1512 if self.hvparams is None: 1513 self.hvparams = constants.HVC_DEFAULTS 1514 else: 1515 for hypervisor in self.hvparams: 1516 self.hvparams[hypervisor] = FillDict( 1517 constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor]) 1518 1519 if self.os_hvp is None: 1520 self.os_hvp = {} 1521 1522 # osparams added before 2.2 1523 if self.osparams is None: 1524 self.osparams = {} 1525 1526 self.ndparams = UpgradeNDParams(self.ndparams) 1527 1528 self.beparams = UpgradeGroupedParams(self.beparams, 1529 constants.BEC_DEFAULTS) 1530 for beparams_group in self.beparams: 1531 UpgradeBeParams(self.beparams[beparams_group]) 1532 1533 migrate_default_bridge = not self.nicparams 1534 self.nicparams = UpgradeGroupedParams(self.nicparams, 1535 constants.NICC_DEFAULTS) 1536 if migrate_default_bridge: 1537 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \ 1538 self.default_bridge 1539 1540 if self.modify_etc_hosts is None: 1541 self.modify_etc_hosts = True 1542 1543 if self.modify_ssh_setup is None: 1544 self.modify_ssh_setup = True 1545 1546 # default_bridge is no longer used in 2.1. The slot is left there to 1547 # support auto-upgrading. It can be removed once we decide to deprecate 1548 # upgrading straight from 2.0. 1549 if self.default_bridge is not None: 1550 self.default_bridge = None 1551 1552 # default_hypervisor is just the first enabled one in 2.1. This slot and 1553 # code can be removed once upgrading straight from 2.0 is deprecated. 1554 if self.default_hypervisor is not None: 1555 self.enabled_hypervisors = ([self.default_hypervisor] + 1556 [hvname for hvname in self.enabled_hypervisors 1557 if hvname != self.default_hypervisor]) 1558 self.default_hypervisor = None 1559 1560 # maintain_node_health added after 2.1.1 1561 if self.maintain_node_health is None: 1562 self.maintain_node_health = False 1563 1564 if self.uid_pool is None: 1565 self.uid_pool = [] 1566 1567 if self.default_iallocator is None: 1568 self.default_iallocator = "" 1569 1570 # reserved_lvs added before 2.2 1571 if self.reserved_lvs is None: 1572 self.reserved_lvs = [] 1573 1574 # hidden and blacklisted operating systems added before 2.2.1 1575 if self.hidden_os is None: 1576 self.hidden_os = [] 1577 1578 if self.blacklisted_os is None: 1579 self.blacklisted_os = [] 1580 1581 # primary_ip_family added before 2.3 1582 if self.primary_ip_family is None: 1583 self.primary_ip_family = AF_INET 1584 1585 if self.master_netmask is None: 1586 ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family) 1587 self.master_netmask = ipcls.iplen 1588 1589 if self.prealloc_wipe_disks is None: 1590 self.prealloc_wipe_disks = False 1591 1592 # shared_file_storage_dir added before 2.5 1593 if self.shared_file_storage_dir is None: 1594 self.shared_file_storage_dir = "" 1595 1596 if self.use_external_mip_script is None: 1597 self.use_external_mip_script = False 1598 1599 if self.diskparams: 1600 self.diskparams = UpgradeDiskParams(self.diskparams) 1601 else: 1602 self.diskparams = constants.DISK_DT_DEFAULTS.copy() 1603 1604 # instance policy added before 2.6 1605 if self.ipolicy is None: 1606 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {}) 1607 else: 1608 # we can either make sure to upgrade the ipolicy always, or only 1609 # do it in some corner cases (e.g. missing keys); note that this 1610 # will break any removal of keys from the ipolicy dict 1611 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1612 1613 @property
1614 - def primary_hypervisor(self):
1615 """The first hypervisor is the primary. 1616 1617 Useful, for example, for L{Node}'s hv/disk state. 1618 1619 """ 1620 return self.enabled_hypervisors[0]
1621
1622 - def ToDict(self):
1623 """Custom function for cluster. 1624 1625 """ 1626 mydict = super(Cluster, self).ToDict() 1627 mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool) 1628 return mydict
1629 1630 @classmethod
1631 - def FromDict(cls, val):
1632 """Custom function for cluster. 1633 1634 """ 1635 obj = super(Cluster, cls).FromDict(val) 1636 if not isinstance(obj.tcpudp_port_pool, set): 1637 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool) 1638 return obj
1639
1640 - def SimpleFillDP(self, diskparams):
1641 """Fill a given diskparams dict with cluster defaults. 1642 1643 @param diskparams: The diskparams 1644 @return: The defaults dict 1645 1646 """ 1647 return FillDiskParams(self.diskparams, diskparams)
1648
1649 - def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1650 """Get the default hypervisor parameters for the cluster. 1651 1652 @param hypervisor: the hypervisor name 1653 @param os_name: if specified, we'll also update the defaults for this OS 1654 @param skip_keys: if passed, list of keys not to use 1655 @return: the defaults dict 1656 1657 """ 1658 if skip_keys is None: 1659 skip_keys = [] 1660 1661 fill_stack = [self.hvparams.get(hypervisor, {})] 1662 if os_name is not None: 1663 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {}) 1664 fill_stack.append(os_hvp) 1665 1666 ret_dict = {} 1667 for o_dict in fill_stack: 1668 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys) 1669 1670 return ret_dict
1671
1672 - def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1673 """Fill a given hvparams dict with cluster defaults. 1674 1675 @type hv_name: string 1676 @param hv_name: the hypervisor to use 1677 @type os_name: string 1678 @param os_name: the OS to use for overriding the hypervisor defaults 1679 @type skip_globals: boolean 1680 @param skip_globals: if True, the global hypervisor parameters will 1681 not be filled 1682 @rtype: dict 1683 @return: a copy of the given hvparams with missing keys filled from 1684 the cluster defaults 1685 1686 """ 1687 if skip_globals: 1688 skip_keys = constants.HVC_GLOBALS 1689 else: 1690 skip_keys = [] 1691 1692 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys) 1693 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1694
1695 - def FillHV(self, instance, skip_globals=False):
1696 """Fill an instance's hvparams dict with cluster defaults. 1697 1698 @type instance: L{objects.Instance} 1699 @param instance: the instance parameter to fill 1700 @type skip_globals: boolean 1701 @param skip_globals: if True, the global hypervisor parameters will 1702 not be filled 1703 @rtype: dict 1704 @return: a copy of the instance's hvparams with missing keys filled from 1705 the cluster defaults 1706 1707 """ 1708 return self.SimpleFillHV(instance.hypervisor, instance.os, 1709 instance.hvparams, skip_globals)
1710
1711 - def SimpleFillBE(self, beparams):
1712 """Fill a given beparams dict with cluster defaults. 1713 1714 @type beparams: dict 1715 @param beparams: the dict to fill 1716 @rtype: dict 1717 @return: a copy of the passed in beparams with missing keys filled 1718 from the cluster defaults 1719 1720 """ 1721 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1722
1723 - def FillBE(self, instance):
1724 """Fill an instance's beparams dict with cluster defaults. 1725 1726 @type instance: L{objects.Instance} 1727 @param instance: the instance parameter to fill 1728 @rtype: dict 1729 @return: a copy of the instance's beparams with missing keys filled from 1730 the cluster defaults 1731 1732 """ 1733 return self.SimpleFillBE(instance.beparams)
1734
1735 - def SimpleFillNIC(self, nicparams):
1736 """Fill a given nicparams dict with cluster defaults. 1737 1738 @type nicparams: dict 1739 @param nicparams: the dict to fill 1740 @rtype: dict 1741 @return: a copy of the passed in nicparams with missing keys filled 1742 from the cluster defaults 1743 1744 """ 1745 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1746
1747 - def SimpleFillOS(self, os_name, os_params):
1748 """Fill an instance's osparams dict with cluster defaults. 1749 1750 @type os_name: string 1751 @param os_name: the OS name to use 1752 @type os_params: dict 1753 @param os_params: the dict to fill with default values 1754 @rtype: dict 1755 @return: a copy of the instance's osparams with missing keys filled from 1756 the cluster defaults 1757 1758 """ 1759 name_only = os_name.split("+", 1)[0] 1760 # base OS 1761 result = self.osparams.get(name_only, {}) 1762 # OS with variant 1763 result = FillDict(result, self.osparams.get(os_name, {})) 1764 # specified params 1765 return FillDict(result, os_params)
1766 1767 @staticmethod
1768 - def SimpleFillHvState(hv_state):
1769 """Fill an hv_state sub dict with cluster defaults. 1770 1771 """ 1772 return FillDict(constants.HVST_DEFAULTS, hv_state)
1773 1774 @staticmethod
1775 - def SimpleFillDiskState(disk_state):
1776 """Fill an disk_state sub dict with cluster defaults. 1777 1778 """ 1779 return FillDict(constants.DS_DEFAULTS, disk_state)
1780
1781 - def FillND(self, node, nodegroup):
1782 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node} 1783 1784 @type node: L{objects.Node} 1785 @param node: A Node object to fill 1786 @type nodegroup: L{objects.NodeGroup} 1787 @param nodegroup: A Node object to fill 1788 @return a copy of the node's ndparams with defaults filled 1789 1790 """ 1791 return self.SimpleFillND(nodegroup.FillND(node))
1792
1793 - def SimpleFillND(self, ndparams):
1794 """Fill a given ndparams dict with defaults. 1795 1796 @type ndparams: dict 1797 @param ndparams: the dict to fill 1798 @rtype: dict 1799 @return: a copy of the passed in ndparams with missing keys filled 1800 from the cluster defaults 1801 1802 """ 1803 return FillDict(self.ndparams, ndparams)
1804
1805 - def SimpleFillIPolicy(self, ipolicy):
1806 """ Fill instance policy dict with defaults. 1807 1808 @type ipolicy: dict 1809 @param ipolicy: the dict to fill 1810 @rtype: dict 1811 @return: a copy of passed ipolicy with missing keys filled from 1812 the cluster defaults 1813 1814 """ 1815 return FillIPolicy(self.ipolicy, ipolicy)
1816
1817 1818 -class BlockDevStatus(ConfigObject):
1819 """Config object representing the status of a block device.""" 1820 __slots__ = [ 1821 "dev_path", 1822 "major", 1823 "minor", 1824 "sync_percent", 1825 "estimated_time", 1826 "is_degraded", 1827 "ldisk_status", 1828 ]
1829
1830 1831 -class ImportExportStatus(ConfigObject):
1832 """Config object representing the status of an import or export.""" 1833 __slots__ = [ 1834 "recent_output", 1835 "listen_port", 1836 "connected", 1837 "progress_mbytes", 1838 "progress_throughput", 1839 "progress_eta", 1840 "progress_percent", 1841 "exit_status", 1842 "error_message", 1843 ] + _TIMESTAMPS
1844
1845 1846 -class ImportExportOptions(ConfigObject):
1847 """Options for import/export daemon 1848 1849 @ivar key_name: X509 key name (None for cluster certificate) 1850 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate) 1851 @ivar compress: Compression method (one of L{constants.IEC_ALL}) 1852 @ivar magic: Used to ensure the connection goes to the right disk 1853 @ivar ipv6: Whether to use IPv6 1854 @ivar connect_timeout: Number of seconds for establishing connection 1855 1856 """ 1857 __slots__ = [ 1858 "key_name", 1859 "ca_pem", 1860 "compress", 1861 "magic", 1862 "ipv6", 1863 "connect_timeout", 1864 ]
1865
1866 1867 -class ConfdRequest(ConfigObject):
1868 """Object holding a confd request. 1869 1870 @ivar protocol: confd protocol version 1871 @ivar type: confd query type 1872 @ivar query: query request 1873 @ivar rsalt: requested reply salt 1874 1875 """ 1876 __slots__ = [ 1877 "protocol", 1878 "type", 1879 "query", 1880 "rsalt", 1881 ]
1882
1883 1884 -class ConfdReply(ConfigObject):
1885 """Object holding a confd reply. 1886 1887 @ivar protocol: confd protocol version 1888 @ivar status: reply status code (ok, error) 1889 @ivar answer: confd query reply 1890 @ivar serial: configuration serial number 1891 1892 """ 1893 __slots__ = [ 1894 "protocol", 1895 "status", 1896 "answer", 1897 "serial", 1898 ]
1899
1900 1901 -class QueryFieldDefinition(ConfigObject):
1902 """Object holding a query field definition. 1903 1904 @ivar name: Field name 1905 @ivar title: Human-readable title 1906 @ivar kind: Field type 1907 @ivar doc: Human-readable description 1908 1909 """ 1910 __slots__ = [ 1911 "name", 1912 "title", 1913 "kind", 1914 "doc", 1915 ]
1916
1917 1918 -class _QueryResponseBase(ConfigObject):
1919 __slots__ = [ 1920 "fields", 1921 ] 1922
1923 - def ToDict(self):
1924 """Custom function for serializing. 1925 1926 """ 1927 mydict = super(_QueryResponseBase, self).ToDict() 1928 mydict["fields"] = self._ContainerToDicts(mydict["fields"]) 1929 return mydict
1930 1931 @classmethod
1932 - def FromDict(cls, val):
1933 """Custom function for de-serializing. 1934 1935 """ 1936 obj = super(_QueryResponseBase, cls).FromDict(val) 1937 obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition) 1938 return obj
1939
1940 1941 -class QueryResponse(_QueryResponseBase):
1942 """Object holding the response to a query. 1943 1944 @ivar fields: List of L{QueryFieldDefinition} objects 1945 @ivar data: Requested data 1946 1947 """ 1948 __slots__ = [ 1949 "data", 1950 ]
1951
1952 1953 -class QueryFieldsRequest(ConfigObject):
1954 """Object holding a request for querying available fields. 1955 1956 """ 1957 __slots__ = [ 1958 "what", 1959 "fields", 1960 ]
1961
1962 1963 -class QueryFieldsResponse(_QueryResponseBase):
1964 """Object holding the response to a query for fields. 1965 1966 @ivar fields: List of L{QueryFieldDefinition} objects 1967 1968 """ 1969 __slots__ = [ 1970 ]
1971
1972 1973 -class MigrationStatus(ConfigObject):
1974 """Object holding the status of a migration. 1975 1976 """ 1977 __slots__ = [ 1978 "status", 1979 "transferred_ram", 1980 "total_ram", 1981 ]
1982
1983 1984 -class InstanceConsole(ConfigObject):
1985 """Object describing how to access the console of an instance. 1986 1987 """ 1988 __slots__ = [ 1989 "instance", 1990 "kind", 1991 "message", 1992 "host", 1993 "port", 1994 "user", 1995 "command", 1996 "display", 1997 ] 1998
1999 - def Validate(self):
2000 """Validates contents of this object. 2001 2002 """ 2003 assert self.kind in constants.CONS_ALL, "Unknown console type" 2004 assert self.instance, "Missing instance name" 2005 assert self.message or self.kind in [constants.CONS_SSH, 2006 constants.CONS_SPICE, 2007 constants.CONS_VNC] 2008 assert self.host or self.kind == constants.CONS_MESSAGE 2009 assert self.port or self.kind in [constants.CONS_MESSAGE, 2010 constants.CONS_SSH] 2011 assert self.user or self.kind in [constants.CONS_MESSAGE, 2012 constants.CONS_SPICE, 2013 constants.CONS_VNC] 2014 assert self.command or self.kind in [constants.CONS_MESSAGE, 2015 constants.CONS_SPICE, 2016 constants.CONS_VNC] 2017 assert self.display or self.kind in [constants.CONS_MESSAGE, 2018 constants.CONS_SPICE, 2019 constants.CONS_SSH] 2020 return True
2021
2022 2023 -class SerializableConfigParser(ConfigParser.SafeConfigParser):
2024 """Simple wrapper over ConfigParse that allows serialization. 2025 2026 This class is basically ConfigParser.SafeConfigParser with two 2027 additional methods that allow it to serialize/unserialize to/from a 2028 buffer. 2029 2030 """
2031 - def Dumps(self):
2032 """Dump this instance and return the string representation.""" 2033 buf = StringIO() 2034 self.write(buf) 2035 return buf.getvalue()
2036 2037 @classmethod
2038 - def Loads(cls, data):
2039 """Load data from a string.""" 2040 buf = StringIO(data) 2041 cfp = cls() 2042 cfp.readfp(buf) 2043 return cfp
2044