Package ganeti :: Module objects
[hide private]
[frames] | no frames]

Source Code for Module ganeti.objects

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc. 
   5  # 
   6  # This program is free software; you can redistribute it and/or modify 
   7  # it under the terms of the GNU General Public License as published by 
   8  # the Free Software Foundation; either version 2 of the License, or 
   9  # (at your option) any later version. 
  10  # 
  11  # This program is distributed in the hope that it will be useful, but 
  12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
  13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
  14  # General Public License for more details. 
  15  # 
  16  # You should have received a copy of the GNU General Public License 
  17  # along with this program; if not, write to the Free Software 
  18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
  19  # 02110-1301, USA. 
  20   
  21   
  22  """Transportable objects for Ganeti. 
  23   
  24  This module provides small, mostly data-only objects which are safe to 
  25  pass to and from external parties. 
  26   
  27  """ 
  28   
  29  # pylint: disable-msg=E0203,W0201 
  30   
  31  # E0203: Access to member %r before its definition, since we use 
  32  # objects.py which doesn't explicitely initialise its members 
  33   
  34  # W0201: Attribute '%s' defined outside __init__ 
  35   
  36  import ConfigParser 
  37  import re 
  38  import copy 
  39  import time 
  40  from cStringIO import StringIO 
  41   
  42  from ganeti import errors 
  43  from ganeti import constants 
  44   
  45  from socket import AF_INET 
  46   
  47   
  48  __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance", 
  49             "OS", "Node", "NodeGroup", "Cluster", "FillDict"] 
  50   
  51  _TIMESTAMPS = ["ctime", "mtime"] 
  52  _UUID = ["uuid"] 
53 54 55 -def FillDict(defaults_dict, custom_dict, skip_keys=None):
56 """Basic function to apply settings on top a default dict. 57 58 @type defaults_dict: dict 59 @param defaults_dict: dictionary holding the default values 60 @type custom_dict: dict 61 @param custom_dict: dictionary holding customized value 62 @type skip_keys: list 63 @param skip_keys: which keys not to fill 64 @rtype: dict 65 @return: dict with the 'full' values 66 67 """ 68 ret_dict = copy.deepcopy(defaults_dict) 69 ret_dict.update(custom_dict) 70 if skip_keys: 71 for k in skip_keys: 72 try: 73 del ret_dict[k] 74 except KeyError: 75 pass 76 return ret_dict
77
78 79 -def UpgradeGroupedParams(target, defaults):
80 """Update all groups for the target parameter. 81 82 @type target: dict of dicts 83 @param target: {group: {parameter: value}} 84 @type defaults: dict 85 @param defaults: default parameter values 86 87 """ 88 if target is None: 89 target = {constants.PP_DEFAULT: defaults} 90 else: 91 for group in target: 92 target[group] = FillDict(defaults, target[group]) 93 return target
94
95 96 -class ConfigObject(object):
97 """A generic config object. 98 99 It has the following properties: 100 101 - provides somewhat safe recursive unpickling and pickling for its classes 102 - unset attributes which are defined in slots are always returned 103 as None instead of raising an error 104 105 Classes derived from this must always declare __slots__ (we use many 106 config objects and the memory reduction is useful) 107 108 """ 109 __slots__ = [] 110
111 - def __init__(self, **kwargs):
112 for k, v in kwargs.iteritems(): 113 setattr(self, k, v)
114
115 - def __getattr__(self, name):
116 if name not in self._all_slots(): 117 raise AttributeError("Invalid object attribute %s.%s" % 118 (type(self).__name__, name)) 119 return None
120
121 - def __setstate__(self, state):
122 slots = self._all_slots() 123 for name in state: 124 if name in slots: 125 setattr(self, name, state[name])
126 127 @classmethod
128 - def _all_slots(cls):
129 """Compute the list of all declared slots for a class. 130 131 """ 132 slots = [] 133 for parent in cls.__mro__: 134 slots.extend(getattr(parent, "__slots__", [])) 135 return slots
136
137 - def ToDict(self):
138 """Convert to a dict holding only standard python types. 139 140 The generic routine just dumps all of this object's attributes in 141 a dict. It does not work if the class has children who are 142 ConfigObjects themselves (e.g. the nics list in an Instance), in 143 which case the object should subclass the function in order to 144 make sure all objects returned are only standard python types. 145 146 """ 147 result = {} 148 for name in self._all_slots(): 149 value = getattr(self, name, None) 150 if value is not None: 151 result[name] = value 152 return result
153 154 __getstate__ = ToDict 155 156 @classmethod
157 - def FromDict(cls, val):
158 """Create an object from a dictionary. 159 160 This generic routine takes a dict, instantiates a new instance of 161 the given class, and sets attributes based on the dict content. 162 163 As for `ToDict`, this does not work if the class has children 164 who are ConfigObjects themselves (e.g. the nics list in an 165 Instance), in which case the object should subclass the function 166 and alter the objects. 167 168 """ 169 if not isinstance(val, dict): 170 raise errors.ConfigurationError("Invalid object passed to FromDict:" 171 " expected dict, got %s" % type(val)) 172 val_str = dict([(str(k), v) for k, v in val.iteritems()]) 173 obj = cls(**val_str) # pylint: disable-msg=W0142 174 return obj
175 176 @staticmethod
177 - def _ContainerToDicts(container):
178 """Convert the elements of a container to standard python types. 179 180 This method converts a container with elements derived from 181 ConfigData to standard python types. If the container is a dict, 182 we don't touch the keys, only the values. 183 184 """ 185 if isinstance(container, dict): 186 ret = dict([(k, v.ToDict()) for k, v in container.iteritems()]) 187 elif isinstance(container, (list, tuple, set, frozenset)): 188 ret = [elem.ToDict() for elem in container] 189 else: 190 raise TypeError("Invalid type %s passed to _ContainerToDicts" % 191 type(container)) 192 return ret
193 194 @staticmethod
195 - def _ContainerFromDicts(source, c_type, e_type):
196 """Convert a container from standard python types. 197 198 This method converts a container with standard python types to 199 ConfigData objects. If the container is a dict, we don't touch the 200 keys, only the values. 201 202 """ 203 if not isinstance(c_type, type): 204 raise TypeError("Container type %s passed to _ContainerFromDicts is" 205 " not a type" % type(c_type)) 206 if source is None: 207 source = c_type() 208 if c_type is dict: 209 ret = dict([(k, e_type.FromDict(v)) for k, v in source.iteritems()]) 210 elif c_type in (list, tuple, set, frozenset): 211 ret = c_type([e_type.FromDict(elem) for elem in source]) 212 else: 213 raise TypeError("Invalid container type %s passed to" 214 " _ContainerFromDicts" % c_type) 215 return ret
216
217 - def Copy(self):
218 """Makes a deep copy of the current object and its children. 219 220 """ 221 dict_form = self.ToDict() 222 clone_obj = self.__class__.FromDict(dict_form) 223 return clone_obj
224
225 - def __repr__(self):
226 """Implement __repr__ for ConfigObjects.""" 227 return repr(self.ToDict())
228
229 - def UpgradeConfig(self):
230 """Fill defaults for missing configuration values. 231 232 This method will be called at configuration load time, and its 233 implementation will be object dependent. 234 235 """ 236 pass
237
238 239 -class TaggableObject(ConfigObject):
240 """An generic class supporting tags. 241 242 """ 243 __slots__ = ["tags"] 244 VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$") 245 246 @classmethod
247 - def ValidateTag(cls, tag):
248 """Check if a tag is valid. 249 250 If the tag is invalid, an errors.TagError will be raised. The 251 function has no return value. 252 253 """ 254 if not isinstance(tag, basestring): 255 raise errors.TagError("Invalid tag type (not a string)") 256 if len(tag) > constants.MAX_TAG_LEN: 257 raise errors.TagError("Tag too long (>%d characters)" % 258 constants.MAX_TAG_LEN) 259 if not tag: 260 raise errors.TagError("Tags cannot be empty") 261 if not cls.VALID_TAG_RE.match(tag): 262 raise errors.TagError("Tag contains invalid characters")
263
264 - def GetTags(self):
265 """Return the tags list. 266 267 """ 268 tags = getattr(self, "tags", None) 269 if tags is None: 270 tags = self.tags = set() 271 return tags
272
273 - def AddTag(self, tag):
274 """Add a new tag. 275 276 """ 277 self.ValidateTag(tag) 278 tags = self.GetTags() 279 if len(tags) >= constants.MAX_TAGS_PER_OBJ: 280 raise errors.TagError("Too many tags") 281 self.GetTags().add(tag)
282
283 - def RemoveTag(self, tag):
284 """Remove a tag. 285 286 """ 287 self.ValidateTag(tag) 288 tags = self.GetTags() 289 try: 290 tags.remove(tag) 291 except KeyError: 292 raise errors.TagError("Tag not found")
293
294 - def ToDict(self):
295 """Taggable-object-specific conversion to standard python types. 296 297 This replaces the tags set with a list. 298 299 """ 300 bo = super(TaggableObject, self).ToDict() 301 302 tags = bo.get("tags", None) 303 if isinstance(tags, set): 304 bo["tags"] = list(tags) 305 return bo
306 307 @classmethod
308 - def FromDict(cls, val):
309 """Custom function for instances. 310 311 """ 312 obj = super(TaggableObject, cls).FromDict(val) 313 if hasattr(obj, "tags") and isinstance(obj.tags, list): 314 obj.tags = set(obj.tags) 315 return obj
316
317 318 -class ConfigData(ConfigObject):
319 """Top-level config object.""" 320 __slots__ = [ 321 "version", 322 "cluster", 323 "nodes", 324 "nodegroups", 325 "instances", 326 "serial_no", 327 ] + _TIMESTAMPS 328
329 - def ToDict(self):
330 """Custom function for top-level config data. 331 332 This just replaces the list of instances, nodes and the cluster 333 with standard python types. 334 335 """ 336 mydict = super(ConfigData, self).ToDict() 337 mydict["cluster"] = mydict["cluster"].ToDict() 338 for key in "nodes", "instances", "nodegroups": 339 mydict[key] = self._ContainerToDicts(mydict[key]) 340 341 return mydict
342 343 @classmethod
344 - def FromDict(cls, val):
345 """Custom function for top-level config data 346 347 """ 348 obj = super(ConfigData, cls).FromDict(val) 349 obj.cluster = Cluster.FromDict(obj.cluster) 350 obj.nodes = cls._ContainerFromDicts(obj.nodes, dict, Node) 351 obj.instances = cls._ContainerFromDicts(obj.instances, dict, Instance) 352 obj.nodegroups = cls._ContainerFromDicts(obj.nodegroups, dict, NodeGroup) 353 return obj
354
355 - def HasAnyDiskOfType(self, dev_type):
356 """Check if in there is at disk of the given type in the configuration. 357 358 @type dev_type: L{constants.LDS_BLOCK} 359 @param dev_type: the type to look for 360 @rtype: boolean 361 @return: boolean indicating if a disk of the given type was found or not 362 363 """ 364 for instance in self.instances.values(): 365 for disk in instance.disks: 366 if disk.IsBasedOnDiskType(dev_type): 367 return True 368 return False
369
370 - def UpgradeConfig(self):
371 """Fill defaults for missing configuration values. 372 373 """ 374 self.cluster.UpgradeConfig() 375 for node in self.nodes.values(): 376 node.UpgradeConfig() 377 for instance in self.instances.values(): 378 instance.UpgradeConfig() 379 if self.nodegroups is None: 380 self.nodegroups = {} 381 for nodegroup in self.nodegroups.values(): 382 nodegroup.UpgradeConfig() 383 if self.cluster.drbd_usermode_helper is None: 384 # To decide if we set an helper let's check if at least one instance has 385 # a DRBD disk. This does not cover all the possible scenarios but it 386 # gives a good approximation. 387 if self.HasAnyDiskOfType(constants.LD_DRBD8): 388 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER
389
390 391 -class NIC(ConfigObject):
392 """Config object representing a network card.""" 393 __slots__ = ["mac", "ip", "nicparams"] 394 395 @classmethod
396 - def CheckParameterSyntax(cls, nicparams):
397 """Check the given parameters for validity. 398 399 @type nicparams: dict 400 @param nicparams: dictionary with parameter names/value 401 @raise errors.ConfigurationError: when a parameter is not valid 402 403 """ 404 if nicparams[constants.NIC_MODE] not in constants.NIC_VALID_MODES: 405 err = "Invalid nic mode: %s" % nicparams[constants.NIC_MODE] 406 raise errors.ConfigurationError(err) 407 408 if (nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED and 409 not nicparams[constants.NIC_LINK]): 410 err = "Missing bridged nic link" 411 raise errors.ConfigurationError(err)
412
413 414 -class Disk(ConfigObject):
415 """Config object representing a block device.""" 416 __slots__ = ["dev_type", "logical_id", "physical_id", 417 "children", "iv_name", "size", "mode"] 418
419 - def CreateOnSecondary(self):
420 """Test if this device needs to be created on a secondary node.""" 421 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
422
423 - def AssembleOnSecondary(self):
424 """Test if this device needs to be assembled on a secondary node.""" 425 return self.dev_type in (constants.LD_DRBD8, constants.LD_LV)
426
427 - def OpenOnSecondary(self):
428 """Test if this device needs to be opened on a secondary node.""" 429 return self.dev_type in (constants.LD_LV,)
430
431 - def StaticDevPath(self):
432 """Return the device path if this device type has a static one. 433 434 Some devices (LVM for example) live always at the same /dev/ path, 435 irrespective of their status. For such devices, we return this 436 path, for others we return None. 437 438 @warning: The path returned is not a normalized pathname; callers 439 should check that it is a valid path. 440 441 """ 442 if self.dev_type == constants.LD_LV: 443 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1]) 444 return None
445
446 - def ChildrenNeeded(self):
447 """Compute the needed number of children for activation. 448 449 This method will return either -1 (all children) or a positive 450 number denoting the minimum number of children needed for 451 activation (only mirrored devices will usually return >=0). 452 453 Currently, only DRBD8 supports diskless activation (therefore we 454 return 0), for all other we keep the previous semantics and return 455 -1. 456 457 """ 458 if self.dev_type == constants.LD_DRBD8: 459 return 0 460 return -1
461
462 - def IsBasedOnDiskType(self, dev_type):
463 """Check if the disk or its children are based on the given type. 464 465 @type dev_type: L{constants.LDS_BLOCK} 466 @param dev_type: the type to look for 467 @rtype: boolean 468 @return: boolean indicating if a device of the given type was found or not 469 470 """ 471 if self.children: 472 for child in self.children: 473 if child.IsBasedOnDiskType(dev_type): 474 return True 475 return self.dev_type == dev_type
476
477 - def GetNodes(self, node):
478 """This function returns the nodes this device lives on. 479 480 Given the node on which the parent of the device lives on (or, in 481 case of a top-level device, the primary node of the devices' 482 instance), this function will return a list of nodes on which this 483 devices needs to (or can) be assembled. 484 485 """ 486 if self.dev_type in [constants.LD_LV, constants.LD_FILE]: 487 result = [node] 488 elif self.dev_type in constants.LDS_DRBD: 489 result = [self.logical_id[0], self.logical_id[1]] 490 if node not in result: 491 raise errors.ConfigurationError("DRBD device passed unknown node") 492 else: 493 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type) 494 return result
495
496 - def ComputeNodeTree(self, parent_node):
497 """Compute the node/disk tree for this disk and its children. 498 499 This method, given the node on which the parent disk lives, will 500 return the list of all (node, disk) pairs which describe the disk 501 tree in the most compact way. For example, a drbd/lvm stack 502 will be returned as (primary_node, drbd) and (secondary_node, drbd) 503 which represents all the top-level devices on the nodes. 504 505 """ 506 my_nodes = self.GetNodes(parent_node) 507 result = [(node, self) for node in my_nodes] 508 if not self.children: 509 # leaf device 510 return result 511 for node in my_nodes: 512 for child in self.children: 513 child_result = child.ComputeNodeTree(node) 514 if len(child_result) == 1: 515 # child (and all its descendants) is simple, doesn't split 516 # over multiple hosts, so we don't need to describe it, our 517 # own entry for this node describes it completely 518 continue 519 else: 520 # check if child nodes differ from my nodes; note that 521 # subdisk can differ from the child itself, and be instead 522 # one of its descendants 523 for subnode, subdisk in child_result: 524 if subnode not in my_nodes: 525 result.append((subnode, subdisk)) 526 # otherwise child is under our own node, so we ignore this 527 # entry (but probably the other results in the list will 528 # be different) 529 return result
530
531 - def ComputeGrowth(self, amount):
532 """Compute the per-VG growth requirements. 533 534 This only works for VG-based disks. 535 536 @type amount: integer 537 @param amount: the desired increase in (user-visible) disk space 538 @rtype: dict 539 @return: a dictionary of volume-groups and the required size 540 541 """ 542 if self.dev_type == constants.LD_LV: 543 return {self.logical_id[0]: amount} 544 elif self.dev_type == constants.LD_DRBD8: 545 if self.children: 546 return self.children[0].ComputeGrowth(amount) 547 else: 548 return {} 549 else: 550 # Other disk types do not require VG space 551 return {}
552
553 - def RecordGrow(self, amount):
554 """Update the size of this disk after growth. 555 556 This method recurses over the disks's children and updates their 557 size correspondigly. The method needs to be kept in sync with the 558 actual algorithms from bdev. 559 560 """ 561 if self.dev_type == constants.LD_LV or self.dev_type == constants.LD_FILE: 562 self.size += amount 563 elif self.dev_type == constants.LD_DRBD8: 564 if self.children: 565 self.children[0].RecordGrow(amount) 566 self.size += amount 567 else: 568 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported" 569 " disk type %s" % self.dev_type)
570
571 - def UnsetSize(self):
572 """Sets recursively the size to zero for the disk and its children. 573 574 """ 575 if self.children: 576 for child in self.children: 577 child.UnsetSize() 578 self.size = 0
579
580 - def SetPhysicalID(self, target_node, nodes_ip):
581 """Convert the logical ID to the physical ID. 582 583 This is used only for drbd, which needs ip/port configuration. 584 585 The routine descends down and updates its children also, because 586 this helps when the only the top device is passed to the remote 587 node. 588 589 Arguments: 590 - target_node: the node we wish to configure for 591 - nodes_ip: a mapping of node name to ip 592 593 The target_node must exist in in nodes_ip, and must be one of the 594 nodes in the logical ID for each of the DRBD devices encountered 595 in the disk tree. 596 597 """ 598 if self.children: 599 for child in self.children: 600 child.SetPhysicalID(target_node, nodes_ip) 601 602 if self.logical_id is None and self.physical_id is not None: 603 return 604 if self.dev_type in constants.LDS_DRBD: 605 pnode, snode, port, pminor, sminor, secret = self.logical_id 606 if target_node not in (pnode, snode): 607 raise errors.ConfigurationError("DRBD device not knowing node %s" % 608 target_node) 609 pnode_ip = nodes_ip.get(pnode, None) 610 snode_ip = nodes_ip.get(snode, None) 611 if pnode_ip is None or snode_ip is None: 612 raise errors.ConfigurationError("Can't find primary or secondary node" 613 " for %s" % str(self)) 614 p_data = (pnode_ip, port) 615 s_data = (snode_ip, port) 616 if pnode == target_node: 617 self.physical_id = p_data + s_data + (pminor, secret) 618 else: # it must be secondary, we tested above 619 self.physical_id = s_data + p_data + (sminor, secret) 620 else: 621 self.physical_id = self.logical_id 622 return
623
624 - def ToDict(self):
625 """Disk-specific conversion to standard python types. 626 627 This replaces the children lists of objects with lists of 628 standard python types. 629 630 """ 631 bo = super(Disk, self).ToDict() 632 633 for attr in ("children",): 634 alist = bo.get(attr, None) 635 if alist: 636 bo[attr] = self._ContainerToDicts(alist) 637 return bo
638 639 @classmethod
640 - def FromDict(cls, val):
641 """Custom function for Disks 642 643 """ 644 obj = super(Disk, cls).FromDict(val) 645 if obj.children: 646 obj.children = cls._ContainerFromDicts(obj.children, list, Disk) 647 if obj.logical_id and isinstance(obj.logical_id, list): 648 obj.logical_id = tuple(obj.logical_id) 649 if obj.physical_id and isinstance(obj.physical_id, list): 650 obj.physical_id = tuple(obj.physical_id) 651 if obj.dev_type in constants.LDS_DRBD: 652 # we need a tuple of length six here 653 if len(obj.logical_id) < 6: 654 obj.logical_id += (None,) * (6 - len(obj.logical_id)) 655 return obj
656
657 - def __str__(self):
658 """Custom str() formatter for disks. 659 660 """ 661 if self.dev_type == constants.LD_LV: 662 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id 663 elif self.dev_type in constants.LDS_DRBD: 664 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5] 665 val = "<DRBD8(" 666 if self.physical_id is None: 667 phy = "unconfigured" 668 else: 669 phy = ("configured as %s:%s %s:%s" % 670 (self.physical_id[0], self.physical_id[1], 671 self.physical_id[2], self.physical_id[3])) 672 673 val += ("hosts=%s/%d-%s/%d, port=%s, %s, " % 674 (node_a, minor_a, node_b, minor_b, port, phy)) 675 if self.children and self.children.count(None) == 0: 676 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1]) 677 else: 678 val += "no local storage" 679 else: 680 val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" % 681 (self.dev_type, self.logical_id, self.physical_id, self.children)) 682 if self.iv_name is None: 683 val += ", not visible" 684 else: 685 val += ", visible as /dev/%s" % self.iv_name 686 if isinstance(self.size, int): 687 val += ", size=%dm)>" % self.size 688 else: 689 val += ", size='%s')>" % (self.size,) 690 return val
691
692 - def Verify(self):
693 """Checks that this disk is correctly configured. 694 695 """ 696 all_errors = [] 697 if self.mode not in constants.DISK_ACCESS_SET: 698 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, )) 699 return all_errors
700
701 - def UpgradeConfig(self):
702 """Fill defaults for missing configuration values. 703 704 """ 705 if self.children: 706 for child in self.children: 707 child.UpgradeConfig()
708 # add here config upgrade for this disk
709 710 711 -class Instance(TaggableObject):
712 """Config object representing an instance.""" 713 __slots__ = [ 714 "name", 715 "primary_node", 716 "os", 717 "hypervisor", 718 "hvparams", 719 "beparams", 720 "osparams", 721 "admin_up", 722 "nics", 723 "disks", 724 "disk_template", 725 "network_port", 726 "serial_no", 727 ] + _TIMESTAMPS + _UUID 728
729 - def _ComputeSecondaryNodes(self):
730 """Compute the list of secondary nodes. 731 732 This is a simple wrapper over _ComputeAllNodes. 733 734 """ 735 all_nodes = set(self._ComputeAllNodes()) 736 all_nodes.discard(self.primary_node) 737 return tuple(all_nodes)
738 739 secondary_nodes = property(_ComputeSecondaryNodes, None, None, 740 "List of secondary nodes") 741
742 - def _ComputeAllNodes(self):
743 """Compute the list of all nodes. 744 745 Since the data is already there (in the drbd disks), keeping it as 746 a separate normal attribute is redundant and if not properly 747 synchronised can cause problems. Thus it's better to compute it 748 dynamically. 749 750 """ 751 def _Helper(nodes, device): 752 """Recursively computes nodes given a top device.""" 753 if device.dev_type in constants.LDS_DRBD: 754 nodea, nodeb = device.logical_id[:2] 755 nodes.add(nodea) 756 nodes.add(nodeb) 757 if device.children: 758 for child in device.children: 759 _Helper(nodes, child)
760 761 all_nodes = set() 762 all_nodes.add(self.primary_node) 763 for device in self.disks: 764 _Helper(all_nodes, device) 765 return tuple(all_nodes)
766 767 all_nodes = property(_ComputeAllNodes, None, None, 768 "List of all nodes of the instance") 769
770 - def MapLVsByNode(self, lvmap=None, devs=None, node=None):
771 """Provide a mapping of nodes to LVs this instance owns. 772 773 This function figures out what logical volumes should belong on 774 which nodes, recursing through a device tree. 775 776 @param lvmap: optional dictionary to receive the 777 'node' : ['lv', ...] data. 778 779 @return: None if lvmap arg is given, otherwise, a dictionary of 780 the form { 'nodename' : ['volume1', 'volume2', ...], ... }; 781 volumeN is of the form "vg_name/lv_name", compatible with 782 GetVolumeList() 783 784 """ 785 if node == None: 786 node = self.primary_node 787 788 if lvmap is None: 789 lvmap = { node : [] } 790 ret = lvmap 791 else: 792 if not node in lvmap: 793 lvmap[node] = [] 794 ret = None 795 796 if not devs: 797 devs = self.disks 798 799 for dev in devs: 800 if dev.dev_type == constants.LD_LV: 801 lvmap[node].append(dev.logical_id[0]+"/"+dev.logical_id[1]) 802 803 elif dev.dev_type in constants.LDS_DRBD: 804 if dev.children: 805 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0]) 806 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1]) 807 808 elif dev.children: 809 self.MapLVsByNode(lvmap, dev.children, node) 810 811 return ret
812
813 - def FindDisk(self, idx):
814 """Find a disk given having a specified index. 815 816 This is just a wrapper that does validation of the index. 817 818 @type idx: int 819 @param idx: the disk index 820 @rtype: L{Disk} 821 @return: the corresponding disk 822 @raise errors.OpPrereqError: when the given index is not valid 823 824 """ 825 try: 826 idx = int(idx) 827 return self.disks[idx] 828 except (TypeError, ValueError), err: 829 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err), 830 errors.ECODE_INVAL) 831 except IndexError: 832 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks" 833 " 0 to %d" % (idx, len(self.disks) - 1), 834 errors.ECODE_INVAL)
835
836 - def ToDict(self):
837 """Instance-specific conversion to standard python types. 838 839 This replaces the children lists of objects with lists of standard 840 python types. 841 842 """ 843 bo = super(Instance, self).ToDict() 844 845 for attr in "nics", "disks": 846 alist = bo.get(attr, None) 847 if alist: 848 nlist = self._ContainerToDicts(alist) 849 else: 850 nlist = [] 851 bo[attr] = nlist 852 return bo
853 854 @classmethod
855 - def FromDict(cls, val):
856 """Custom function for instances. 857 858 """ 859 obj = super(Instance, cls).FromDict(val) 860 obj.nics = cls._ContainerFromDicts(obj.nics, list, NIC) 861 obj.disks = cls._ContainerFromDicts(obj.disks, list, Disk) 862 return obj
863
864 - def UpgradeConfig(self):
865 """Fill defaults for missing configuration values. 866 867 """ 868 for nic in self.nics: 869 nic.UpgradeConfig() 870 for disk in self.disks: 871 disk.UpgradeConfig() 872 if self.hvparams: 873 for key in constants.HVC_GLOBALS: 874 try: 875 del self.hvparams[key] 876 except KeyError: 877 pass 878 if self.osparams is None: 879 self.osparams = {}
880
881 882 -class OS(ConfigObject):
883 """Config object representing an operating system. 884 885 @type supported_parameters: list 886 @ivar supported_parameters: a list of tuples, name and description, 887 containing the supported parameters by this OS 888 889 @type VARIANT_DELIM: string 890 @cvar VARIANT_DELIM: the variant delimiter 891 892 """ 893 __slots__ = [ 894 "name", 895 "path", 896 "api_versions", 897 "create_script", 898 "export_script", 899 "import_script", 900 "rename_script", 901 "verify_script", 902 "supported_variants", 903 "supported_parameters", 904 ] 905 906 VARIANT_DELIM = "+" 907 908 @classmethod
909 - def SplitNameVariant(cls, name):
910 """Splits the name into the proper name and variant. 911 912 @param name: the OS (unprocessed) name 913 @rtype: list 914 @return: a list of two elements; if the original name didn't 915 contain a variant, it's returned as an empty string 916 917 """ 918 nv = name.split(cls.VARIANT_DELIM, 1) 919 if len(nv) == 1: 920 nv.append("") 921 return nv
922 923 @classmethod
924 - def GetName(cls, name):
925 """Returns the proper name of the os (without the variant). 926 927 @param name: the OS (unprocessed) name 928 929 """ 930 return cls.SplitNameVariant(name)[0]
931 932 @classmethod
933 - def GetVariant(cls, name):
934 """Returns the variant the os (without the base name). 935 936 @param name: the OS (unprocessed) name 937 938 """ 939 return cls.SplitNameVariant(name)[1]
940
941 942 -class Node(TaggableObject):
943 """Config object representing a node.""" 944 __slots__ = [ 945 "name", 946 "primary_ip", 947 "secondary_ip", 948 "serial_no", 949 "master_candidate", 950 "offline", 951 "drained", 952 "group", 953 "master_capable", 954 "vm_capable", 955 "ndparams", 956 "powered", 957 ] + _TIMESTAMPS + _UUID 958
959 - def UpgradeConfig(self):
960 """Fill defaults for missing configuration values. 961 962 """ 963 # pylint: disable-msg=E0203 964 # because these are "defined" via slots, not manually 965 if self.master_capable is None: 966 self.master_capable = True 967 968 if self.vm_capable is None: 969 self.vm_capable = True 970 971 if self.ndparams is None: 972 self.ndparams = {} 973 974 if self.powered is None: 975 self.powered = True
976
977 978 -class NodeGroup(ConfigObject):
979 """Config object representing a node group.""" 980 __slots__ = [ 981 "name", 982 "members", 983 "ndparams", 984 "serial_no", 985 "alloc_policy", 986 ] + _TIMESTAMPS + _UUID 987
988 - def ToDict(self):
989 """Custom function for nodegroup. 990 991 This discards the members object, which gets recalculated and is only kept 992 in memory. 993 994 """ 995 mydict = super(NodeGroup, self).ToDict() 996 del mydict["members"] 997 return mydict
998 999 @classmethod
1000 - def FromDict(cls, val):
1001 """Custom function for nodegroup. 1002 1003 The members slot is initialized to an empty list, upon deserialization. 1004 1005 """ 1006 obj = super(NodeGroup, cls).FromDict(val) 1007 obj.members = [] 1008 return obj
1009
1010 - def UpgradeConfig(self):
1011 """Fill defaults for missing configuration values. 1012 1013 """ 1014 if self.ndparams is None: 1015 self.ndparams = {} 1016 1017 if self.serial_no is None: 1018 self.serial_no = 1 1019 1020 if self.alloc_policy is None: 1021 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED 1022 1023 # We only update mtime, and not ctime, since we would not be able to provide 1024 # a correct value for creation time. 1025 if self.mtime is None: 1026 self.mtime = time.time()
1027
1028 - def FillND(self, node):
1029 """Return filled out ndparams for L{object.Node} 1030 1031 @type node: L{objects.Node} 1032 @param node: A Node object to fill 1033 @return a copy of the node's ndparams with defaults filled 1034 1035 """ 1036 return self.SimpleFillND(node.ndparams)
1037
1038 - def SimpleFillND(self, ndparams):
1039 """Fill a given ndparams dict with defaults. 1040 1041 @type ndparams: dict 1042 @param ndparams: the dict to fill 1043 @rtype: dict 1044 @return: a copy of the passed in ndparams with missing keys filled 1045 from the node group defaults 1046 1047 """ 1048 return FillDict(self.ndparams, ndparams)
1049
1050 1051 -class Cluster(TaggableObject):
1052 """Config object representing the cluster.""" 1053 __slots__ = [ 1054 "serial_no", 1055 "rsahostkeypub", 1056 "highest_used_port", 1057 "tcpudp_port_pool", 1058 "mac_prefix", 1059 "volume_group_name", 1060 "reserved_lvs", 1061 "drbd_usermode_helper", 1062 "default_bridge", 1063 "default_hypervisor", 1064 "master_node", 1065 "master_ip", 1066 "master_netdev", 1067 "cluster_name", 1068 "file_storage_dir", 1069 "enabled_hypervisors", 1070 "hvparams", 1071 "os_hvp", 1072 "beparams", 1073 "osparams", 1074 "nicparams", 1075 "ndparams", 1076 "candidate_pool_size", 1077 "modify_etc_hosts", 1078 "modify_ssh_setup", 1079 "maintain_node_health", 1080 "uid_pool", 1081 "default_iallocator", 1082 "hidden_os", 1083 "blacklisted_os", 1084 "primary_ip_family", 1085 "prealloc_wipe_disks", 1086 ] + _TIMESTAMPS + _UUID 1087
1088 - def UpgradeConfig(self):
1089 """Fill defaults for missing configuration values. 1090 1091 """ 1092 # pylint: disable-msg=E0203 1093 # because these are "defined" via slots, not manually 1094 if self.hvparams is None: 1095 self.hvparams = constants.HVC_DEFAULTS 1096 else: 1097 for hypervisor in self.hvparams: 1098 self.hvparams[hypervisor] = FillDict( 1099 constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor]) 1100 1101 if self.os_hvp is None: 1102 self.os_hvp = {} 1103 1104 # osparams added before 2.2 1105 if self.osparams is None: 1106 self.osparams = {} 1107 1108 if self.ndparams is None: 1109 self.ndparams = constants.NDC_DEFAULTS 1110 1111 self.beparams = UpgradeGroupedParams(self.beparams, 1112 constants.BEC_DEFAULTS) 1113 migrate_default_bridge = not self.nicparams 1114 self.nicparams = UpgradeGroupedParams(self.nicparams, 1115 constants.NICC_DEFAULTS) 1116 if migrate_default_bridge: 1117 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \ 1118 self.default_bridge 1119 1120 if self.modify_etc_hosts is None: 1121 self.modify_etc_hosts = True 1122 1123 if self.modify_ssh_setup is None: 1124 self.modify_ssh_setup = True 1125 1126 # default_bridge is no longer used in 2.1. The slot is left there to 1127 # support auto-upgrading. It can be removed once we decide to deprecate 1128 # upgrading straight from 2.0. 1129 if self.default_bridge is not None: 1130 self.default_bridge = None 1131 1132 # default_hypervisor is just the first enabled one in 2.1. This slot and 1133 # code can be removed once upgrading straight from 2.0 is deprecated. 1134 if self.default_hypervisor is not None: 1135 self.enabled_hypervisors = ([self.default_hypervisor] + 1136 [hvname for hvname in self.enabled_hypervisors 1137 if hvname != self.default_hypervisor]) 1138 self.default_hypervisor = None 1139 1140 # maintain_node_health added after 2.1.1 1141 if self.maintain_node_health is None: 1142 self.maintain_node_health = False 1143 1144 if self.uid_pool is None: 1145 self.uid_pool = [] 1146 1147 if self.default_iallocator is None: 1148 self.default_iallocator = "" 1149 1150 # reserved_lvs added before 2.2 1151 if self.reserved_lvs is None: 1152 self.reserved_lvs = [] 1153 1154 # hidden and blacklisted operating systems added before 2.2.1 1155 if self.hidden_os is None: 1156 self.hidden_os = [] 1157 1158 if self.blacklisted_os is None: 1159 self.blacklisted_os = [] 1160 1161 # primary_ip_family added before 2.3 1162 if self.primary_ip_family is None: 1163 self.primary_ip_family = AF_INET 1164 1165 if self.prealloc_wipe_disks is None: 1166 self.prealloc_wipe_disks = False
1167
1168 - def ToDict(self):
1169 """Custom function for cluster. 1170 1171 """ 1172 mydict = super(Cluster, self).ToDict() 1173 mydict["tcpudp_port_pool"] = list(self.tcpudp_port_pool) 1174 return mydict
1175 1176 @classmethod
1177 - def FromDict(cls, val):
1178 """Custom function for cluster. 1179 1180 """ 1181 obj = super(Cluster, cls).FromDict(val) 1182 if not isinstance(obj.tcpudp_port_pool, set): 1183 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool) 1184 return obj
1185
1186 - def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1187 """Get the default hypervisor parameters for the cluster. 1188 1189 @param hypervisor: the hypervisor name 1190 @param os_name: if specified, we'll also update the defaults for this OS 1191 @param skip_keys: if passed, list of keys not to use 1192 @return: the defaults dict 1193 1194 """ 1195 if skip_keys is None: 1196 skip_keys = [] 1197 1198 fill_stack = [self.hvparams.get(hypervisor, {})] 1199 if os_name is not None: 1200 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {}) 1201 fill_stack.append(os_hvp) 1202 1203 ret_dict = {} 1204 for o_dict in fill_stack: 1205 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys) 1206 1207 return ret_dict
1208
1209 - def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1210 """Fill a given hvparams dict with cluster defaults. 1211 1212 @type hv_name: string 1213 @param hv_name: the hypervisor to use 1214 @type os_name: string 1215 @param os_name: the OS to use for overriding the hypervisor defaults 1216 @type skip_globals: boolean 1217 @param skip_globals: if True, the global hypervisor parameters will 1218 not be filled 1219 @rtype: dict 1220 @return: a copy of the given hvparams with missing keys filled from 1221 the cluster defaults 1222 1223 """ 1224 if skip_globals: 1225 skip_keys = constants.HVC_GLOBALS 1226 else: 1227 skip_keys = [] 1228 1229 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys) 1230 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1231
1232 - def FillHV(self, instance, skip_globals=False):
1233 """Fill an instance's hvparams dict with cluster defaults. 1234 1235 @type instance: L{objects.Instance} 1236 @param instance: the instance parameter to fill 1237 @type skip_globals: boolean 1238 @param skip_globals: if True, the global hypervisor parameters will 1239 not be filled 1240 @rtype: dict 1241 @return: a copy of the instance's hvparams with missing keys filled from 1242 the cluster defaults 1243 1244 """ 1245 return self.SimpleFillHV(instance.hypervisor, instance.os, 1246 instance.hvparams, skip_globals)
1247
1248 - def SimpleFillBE(self, beparams):
1249 """Fill a given beparams dict with cluster defaults. 1250 1251 @type beparams: dict 1252 @param beparams: the dict to fill 1253 @rtype: dict 1254 @return: a copy of the passed in beparams with missing keys filled 1255 from the cluster defaults 1256 1257 """ 1258 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1259
1260 - def FillBE(self, instance):
1261 """Fill an instance's beparams dict with cluster defaults. 1262 1263 @type instance: L{objects.Instance} 1264 @param instance: the instance parameter to fill 1265 @rtype: dict 1266 @return: a copy of the instance's beparams with missing keys filled from 1267 the cluster defaults 1268 1269 """ 1270 return self.SimpleFillBE(instance.beparams)
1271
1272 - def SimpleFillNIC(self, nicparams):
1273 """Fill a given nicparams dict with cluster defaults. 1274 1275 @type nicparams: dict 1276 @param nicparams: the dict to fill 1277 @rtype: dict 1278 @return: a copy of the passed in nicparams with missing keys filled 1279 from the cluster defaults 1280 1281 """ 1282 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1283
1284 - def SimpleFillOS(self, os_name, os_params):
1285 """Fill an instance's osparams dict with cluster defaults. 1286 1287 @type os_name: string 1288 @param os_name: the OS name to use 1289 @type os_params: dict 1290 @param os_params: the dict to fill with default values 1291 @rtype: dict 1292 @return: a copy of the instance's osparams with missing keys filled from 1293 the cluster defaults 1294 1295 """ 1296 name_only = os_name.split("+", 1)[0] 1297 # base OS 1298 result = self.osparams.get(name_only, {}) 1299 # OS with variant 1300 result = FillDict(result, self.osparams.get(os_name, {})) 1301 # specified params 1302 return FillDict(result, os_params)
1303
1304 - def FillND(self, node, nodegroup):
1305 """Return filled out ndparams for L{objects.NodeGroup} and L{object.Node} 1306 1307 @type node: L{objects.Node} 1308 @param node: A Node object to fill 1309 @type nodegroup: L{objects.NodeGroup} 1310 @param nodegroup: A Node object to fill 1311 @return a copy of the node's ndparams with defaults filled 1312 1313 """ 1314 return self.SimpleFillND(nodegroup.FillND(node))
1315
1316 - def SimpleFillND(self, ndparams):
1317 """Fill a given ndparams dict with defaults. 1318 1319 @type ndparams: dict 1320 @param ndparams: the dict to fill 1321 @rtype: dict 1322 @return: a copy of the passed in ndparams with missing keys filled 1323 from the cluster defaults 1324 1325 """ 1326 return FillDict(self.ndparams, ndparams)
1327
1328 1329 -class BlockDevStatus(ConfigObject):
1330 """Config object representing the status of a block device.""" 1331 __slots__ = [ 1332 "dev_path", 1333 "major", 1334 "minor", 1335 "sync_percent", 1336 "estimated_time", 1337 "is_degraded", 1338 "ldisk_status", 1339 ]
1340
1341 1342 -class ImportExportStatus(ConfigObject):
1343 """Config object representing the status of an import or export.""" 1344 __slots__ = [ 1345 "recent_output", 1346 "listen_port", 1347 "connected", 1348 "progress_mbytes", 1349 "progress_throughput", 1350 "progress_eta", 1351 "progress_percent", 1352 "exit_status", 1353 "error_message", 1354 ] + _TIMESTAMPS
1355
1356 1357 -class ImportExportOptions(ConfigObject):
1358 """Options for import/export daemon 1359 1360 @ivar key_name: X509 key name (None for cluster certificate) 1361 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate) 1362 @ivar compress: Compression method (one of L{constants.IEC_ALL}) 1363 @ivar magic: Used to ensure the connection goes to the right disk 1364 @ivar ipv6: Whether to use IPv6 1365 @ivar connect_timeout: Number of seconds for establishing connection 1366 1367 """ 1368 __slots__ = [ 1369 "key_name", 1370 "ca_pem", 1371 "compress", 1372 "magic", 1373 "ipv6", 1374 "connect_timeout", 1375 ]
1376
1377 1378 -class ConfdRequest(ConfigObject):
1379 """Object holding a confd request. 1380 1381 @ivar protocol: confd protocol version 1382 @ivar type: confd query type 1383 @ivar query: query request 1384 @ivar rsalt: requested reply salt 1385 1386 """ 1387 __slots__ = [ 1388 "protocol", 1389 "type", 1390 "query", 1391 "rsalt", 1392 ]
1393
1394 1395 -class ConfdReply(ConfigObject):
1396 """Object holding a confd reply. 1397 1398 @ivar protocol: confd protocol version 1399 @ivar status: reply status code (ok, error) 1400 @ivar answer: confd query reply 1401 @ivar serial: configuration serial number 1402 1403 """ 1404 __slots__ = [ 1405 "protocol", 1406 "status", 1407 "answer", 1408 "serial", 1409 ]
1410
1411 1412 -class QueryFieldDefinition(ConfigObject):
1413 """Object holding a query field definition. 1414 1415 @ivar name: Field name 1416 @ivar title: Human-readable title 1417 @ivar kind: Field type 1418 1419 """ 1420 __slots__ = [ 1421 "name", 1422 "title", 1423 "kind", 1424 ]
1425
1426 1427 -class _QueryResponseBase(ConfigObject):
1428 __slots__ = [ 1429 "fields", 1430 ] 1431
1432 - def ToDict(self):
1433 """Custom function for serializing. 1434 1435 """ 1436 mydict = super(_QueryResponseBase, self).ToDict() 1437 mydict["fields"] = self._ContainerToDicts(mydict["fields"]) 1438 return mydict
1439 1440 @classmethod
1441 - def FromDict(cls, val):
1442 """Custom function for de-serializing. 1443 1444 """ 1445 obj = super(_QueryResponseBase, cls).FromDict(val) 1446 obj.fields = cls._ContainerFromDicts(obj.fields, list, QueryFieldDefinition) 1447 return obj
1448
1449 1450 -class QueryRequest(ConfigObject):
1451 """Object holding a query request. 1452 1453 """ 1454 __slots__ = [ 1455 "what", 1456 "fields", 1457 "filter", 1458 ]
1459
1460 1461 -class QueryResponse(_QueryResponseBase):
1462 """Object holding the response to a query. 1463 1464 @ivar fields: List of L{QueryFieldDefinition} objects 1465 @ivar data: Requested data 1466 1467 """ 1468 __slots__ = [ 1469 "data", 1470 ]
1471
1472 1473 -class QueryFieldsRequest(ConfigObject):
1474 """Object holding a request for querying available fields. 1475 1476 """ 1477 __slots__ = [ 1478 "what", 1479 "fields", 1480 ]
1481
1482 1483 -class QueryFieldsResponse(_QueryResponseBase):
1484 """Object holding the response to a query for fields. 1485 1486 @ivar fields: List of L{QueryFieldDefinition} objects 1487 1488 """ 1489 __slots__ = [ 1490 ]
1491
1492 1493 -class InstanceConsole(ConfigObject):
1494 """Object describing how to access the console of an instance. 1495 1496 """ 1497 __slots__ = [ 1498 "instance", 1499 "kind", 1500 "message", 1501 "host", 1502 "port", 1503 "user", 1504 "command", 1505 "display", 1506 ] 1507
1508 - def Validate(self):
1509 """Validates contents of this object. 1510 1511 """ 1512 assert self.kind in constants.CONS_ALL, "Unknown console type" 1513 assert self.instance, "Missing instance name" 1514 assert self.message or self.kind in [constants.CONS_SSH, constants.CONS_VNC] 1515 assert self.host or self.kind == constants.CONS_MESSAGE 1516 assert self.port or self.kind in [constants.CONS_MESSAGE, 1517 constants.CONS_SSH] 1518 assert self.user or self.kind in [constants.CONS_MESSAGE, 1519 constants.CONS_VNC] 1520 assert self.command or self.kind in [constants.CONS_MESSAGE, 1521 constants.CONS_VNC] 1522 assert self.display or self.kind in [constants.CONS_MESSAGE, 1523 constants.CONS_SSH] 1524 return True
1525
1526 1527 -class SerializableConfigParser(ConfigParser.SafeConfigParser):
1528 """Simple wrapper over ConfigParse that allows serialization. 1529 1530 This class is basically ConfigParser.SafeConfigParser with two 1531 additional methods that allow it to serialize/unserialize to/from a 1532 buffer. 1533 1534 """
1535 - def Dumps(self):
1536 """Dump this instance and return the string representation.""" 1537 buf = StringIO() 1538 self.write(buf) 1539 return buf.getvalue()
1540 1541 @classmethod
1542 - def Loads(cls, data):
1543 """Load data from a string.""" 1544 buf = StringIO(data) 1545 cfp = cls() 1546 cfp.readfp(buf) 1547 return cfp
1548