Package ganeti :: Module objects
[hide private]
[frames] | no frames]

Source Code for Module ganeti.objects

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Transportable objects for Ganeti. 
  32   
  33  This module provides small, mostly data-only objects which are safe to 
  34  pass to and from external parties. 
  35   
  36  """ 
  37   
  38  # pylint: disable=E0203,W0201,R0902 
  39   
  40  # E0203: Access to member %r before its definition, since we use 
  41  # objects.py which doesn't explicitly initialise its members 
  42   
  43  # W0201: Attribute '%s' defined outside __init__ 
  44   
  45  # R0902: Allow instances of these objects to have more than 20 attributes 
  46   
  47  import ConfigParser 
  48  import re 
  49  import copy 
  50  import logging 
  51  import time 
  52  from cStringIO import StringIO 
  53   
  54  from ganeti import errors 
  55  from ganeti import constants 
  56  from ganeti import netutils 
  57  from ganeti import outils 
  58  from ganeti import utils 
  59   
  60  from socket import AF_INET 
  61   
  62   
  63  __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance", 
  64             "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"] 
  65   
  66  _TIMESTAMPS = ["ctime", "mtime"] 
  67  _UUID = ["uuid"] 
68 69 70 -def FillDict(defaults_dict, custom_dict, skip_keys=None):
71 """Basic function to apply settings on top a default dict. 72 73 @type defaults_dict: dict 74 @param defaults_dict: dictionary holding the default values 75 @type custom_dict: dict 76 @param custom_dict: dictionary holding customized value 77 @type skip_keys: list 78 @param skip_keys: which keys not to fill 79 @rtype: dict 80 @return: dict with the 'full' values 81 82 """ 83 ret_dict = copy.deepcopy(defaults_dict) 84 ret_dict.update(custom_dict) 85 if skip_keys: 86 for k in skip_keys: 87 if k in ret_dict: 88 del ret_dict[k] 89 return ret_dict
90
91 92 -def FillIPolicy(default_ipolicy, custom_ipolicy):
93 """Fills an instance policy with defaults. 94 95 """ 96 assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS 97 ret_dict = copy.deepcopy(custom_ipolicy) 98 for key in default_ipolicy: 99 if key not in ret_dict: 100 ret_dict[key] = copy.deepcopy(default_ipolicy[key]) 101 elif key == constants.ISPECS_STD: 102 ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key]) 103 return ret_dict
104
105 106 -def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
107 """Fills the disk parameter defaults. 108 109 @see: L{FillDict} for parameters and return value 110 111 """ 112 assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES 113 114 return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}), 115 skip_keys=skip_keys)) 116 for dt in constants.DISK_TEMPLATES)
117
118 119 -def UpgradeGroupedParams(target, defaults):
120 """Update all groups for the target parameter. 121 122 @type target: dict of dicts 123 @param target: {group: {parameter: value}} 124 @type defaults: dict 125 @param defaults: default parameter values 126 127 """ 128 if target is None: 129 target = {constants.PP_DEFAULT: defaults} 130 else: 131 for group in target: 132 target[group] = FillDict(defaults, target[group]) 133 return target
134
135 136 -def UpgradeBeParams(target):
137 """Update the be parameters dict to the new format. 138 139 @type target: dict 140 @param target: "be" parameters dict 141 142 """ 143 if constants.BE_MEMORY in target: 144 memory = target[constants.BE_MEMORY] 145 target[constants.BE_MAXMEM] = memory 146 target[constants.BE_MINMEM] = memory 147 del target[constants.BE_MEMORY]
148
149 150 -def UpgradeDiskParams(diskparams):
151 """Upgrade the disk parameters. 152 153 @type diskparams: dict 154 @param diskparams: disk parameters to upgrade 155 @rtype: dict 156 @return: the upgraded disk parameters dict 157 158 """ 159 if not diskparams: 160 result = {} 161 else: 162 result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams) 163 164 return result
165
166 167 -def UpgradeNDParams(ndparams):
168 """Upgrade ndparams structure. 169 170 @type ndparams: dict 171 @param ndparams: disk parameters to upgrade 172 @rtype: dict 173 @return: the upgraded node parameters dict 174 175 """ 176 if ndparams is None: 177 ndparams = {} 178 179 if (constants.ND_OOB_PROGRAM in ndparams and 180 ndparams[constants.ND_OOB_PROGRAM] is None): 181 # will be reset by the line below 182 del ndparams[constants.ND_OOB_PROGRAM] 183 return FillDict(constants.NDC_DEFAULTS, ndparams)
184
185 186 -def MakeEmptyIPolicy():
187 """Create empty IPolicy dictionary. 188 189 """ 190 return {}
191
192 193 -class ConfigObject(outils.ValidatedSlots):
194 """A generic config object. 195 196 It has the following properties: 197 198 - provides somewhat safe recursive unpickling and pickling for its classes 199 - unset attributes which are defined in slots are always returned 200 as None instead of raising an error 201 202 Classes derived from this must always declare __slots__ (we use many 203 config objects and the memory reduction is useful) 204 205 """ 206 __slots__ = [] 207
208 - def __getattr__(self, name):
209 if name not in self.GetAllSlots(): 210 raise AttributeError("Invalid object attribute %s.%s" % 211 (type(self).__name__, name)) 212 return None
213
214 - def __setstate__(self, state):
215 slots = self.GetAllSlots() 216 for name in state: 217 if name in slots: 218 setattr(self, name, state[name])
219
220 - def Validate(self):
221 """Validates the slots. 222 223 This method returns L{None} if the validation succeeds, or raises 224 an exception otherwise. 225 226 This method must be implemented by the child classes. 227 228 @rtype: NoneType 229 @return: L{None}, if the validation succeeds 230 231 @raise Exception: validation fails 232 233 """
234
235 - def ToDict(self):
236 """Convert to a dict holding only standard python types. 237 238 The generic routine just dumps all of this object's attributes in 239 a dict. It does not work if the class has children who are 240 ConfigObjects themselves (e.g. the nics list in an Instance), in 241 which case the object should subclass the function in order to 242 make sure all objects returned are only standard python types. 243 244 """ 245 result = {} 246 for name in self.GetAllSlots(): 247 value = getattr(self, name, None) 248 if value is not None: 249 result[name] = value 250 return result
251 252 __getstate__ = ToDict 253 254 @classmethod
255 - def FromDict(cls, val):
256 """Create an object from a dictionary. 257 258 This generic routine takes a dict, instantiates a new instance of 259 the given class, and sets attributes based on the dict content. 260 261 As for `ToDict`, this does not work if the class has children 262 who are ConfigObjects themselves (e.g. the nics list in an 263 Instance), in which case the object should subclass the function 264 and alter the objects. 265 266 """ 267 if not isinstance(val, dict): 268 raise errors.ConfigurationError("Invalid object passed to FromDict:" 269 " expected dict, got %s" % type(val)) 270 val_str = dict([(str(k), v) for k, v in val.iteritems()]) 271 obj = cls(**val_str) # pylint: disable=W0142 272 return obj
273
274 - def Copy(self):
275 """Makes a deep copy of the current object and its children. 276 277 """ 278 dict_form = self.ToDict() 279 clone_obj = self.__class__.FromDict(dict_form) 280 return clone_obj
281
282 - def __repr__(self):
283 """Implement __repr__ for ConfigObjects.""" 284 return repr(self.ToDict())
285
286 - def __eq__(self, other):
287 """Implement __eq__ for ConfigObjects.""" 288 return isinstance(other, self.__class__) and self.ToDict() == other.ToDict()
289
290 - def UpgradeConfig(self):
291 """Fill defaults for missing configuration values. 292 293 This method will be called at configuration load time, and its 294 implementation will be object dependent. 295 296 """ 297 pass
298
299 300 -class TaggableObject(ConfigObject):
301 """An generic class supporting tags. 302 303 """ 304 __slots__ = ["tags"] 305 VALID_TAG_RE = re.compile(r"^[\w.+*/:@-]+$") 306 307 @classmethod
308 - def ValidateTag(cls, tag):
309 """Check if a tag is valid. 310 311 If the tag is invalid, an errors.TagError will be raised. The 312 function has no return value. 313 314 """ 315 if not isinstance(tag, basestring): 316 raise errors.TagError("Invalid tag type (not a string)") 317 if len(tag) > constants.MAX_TAG_LEN: 318 raise errors.TagError("Tag too long (>%d characters)" % 319 constants.MAX_TAG_LEN) 320 if not tag: 321 raise errors.TagError("Tags cannot be empty") 322 if not cls.VALID_TAG_RE.match(tag): 323 raise errors.TagError("Tag contains invalid characters")
324
325 - def GetTags(self):
326 """Return the tags list. 327 328 """ 329 tags = getattr(self, "tags", None) 330 if tags is None: 331 tags = self.tags = set() 332 return tags
333
334 - def AddTag(self, tag):
335 """Add a new tag. 336 337 """ 338 self.ValidateTag(tag) 339 tags = self.GetTags() 340 if len(tags) >= constants.MAX_TAGS_PER_OBJ: 341 raise errors.TagError("Too many tags") 342 self.GetTags().add(tag)
343
344 - def RemoveTag(self, tag):
345 """Remove a tag. 346 347 """ 348 self.ValidateTag(tag) 349 tags = self.GetTags() 350 try: 351 tags.remove(tag) 352 except KeyError: 353 raise errors.TagError("Tag not found")
354
355 - def ToDict(self):
356 """Taggable-object-specific conversion to standard python types. 357 358 This replaces the tags set with a list. 359 360 """ 361 bo = super(TaggableObject, self).ToDict() 362 363 tags = bo.get("tags", None) 364 if isinstance(tags, set): 365 bo["tags"] = list(tags) 366 return bo
367 368 @classmethod
369 - def FromDict(cls, val):
370 """Custom function for instances. 371 372 """ 373 obj = super(TaggableObject, cls).FromDict(val) 374 if hasattr(obj, "tags") and isinstance(obj.tags, list): 375 obj.tags = set(obj.tags) 376 return obj
377
378 379 -class MasterNetworkParameters(ConfigObject):
380 """Network configuration parameters for the master 381 382 @ivar uuid: master nodes UUID 383 @ivar ip: master IP 384 @ivar netmask: master netmask 385 @ivar netdev: master network device 386 @ivar ip_family: master IP family 387 388 """ 389 __slots__ = [ 390 "uuid", 391 "ip", 392 "netmask", 393 "netdev", 394 "ip_family", 395 ]
396
397 398 -class ConfigData(ConfigObject):
399 """Top-level config object.""" 400 __slots__ = [ 401 "version", 402 "cluster", 403 "nodes", 404 "nodegroups", 405 "instances", 406 "networks", 407 "serial_no", 408 ] + _TIMESTAMPS 409
410 - def ToDict(self):
411 """Custom function for top-level config data. 412 413 This just replaces the list of instances, nodes and the cluster 414 with standard python types. 415 416 """ 417 mydict = super(ConfigData, self).ToDict() 418 mydict["cluster"] = mydict["cluster"].ToDict() 419 for key in "nodes", "instances", "nodegroups", "networks": 420 mydict[key] = outils.ContainerToDicts(mydict[key]) 421 422 return mydict
423 424 @classmethod
425 - def FromDict(cls, val):
426 """Custom function for top-level config data 427 428 """ 429 obj = super(ConfigData, cls).FromDict(val) 430 obj.cluster = Cluster.FromDict(obj.cluster) 431 obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node) 432 obj.instances = \ 433 outils.ContainerFromDicts(obj.instances, dict, Instance) 434 obj.nodegroups = \ 435 outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup) 436 obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network) 437 return obj
438
439 - def HasAnyDiskOfType(self, dev_type):
440 """Check if in there is at disk of the given type in the configuration. 441 442 @type dev_type: L{constants.DTS_BLOCK} 443 @param dev_type: the type to look for 444 @rtype: boolean 445 @return: boolean indicating if a disk of the given type was found or not 446 447 """ 448 for instance in self.instances.values(): 449 for disk in instance.disks: 450 if disk.IsBasedOnDiskType(dev_type): 451 return True 452 return False
453
454 - def UpgradeConfig(self):
455 """Fill defaults for missing configuration values. 456 457 """ 458 self.cluster.UpgradeConfig() 459 for node in self.nodes.values(): 460 node.UpgradeConfig() 461 for instance in self.instances.values(): 462 instance.UpgradeConfig() 463 self._UpgradeEnabledDiskTemplates() 464 if self.nodegroups is None: 465 self.nodegroups = {} 466 for nodegroup in self.nodegroups.values(): 467 nodegroup.UpgradeConfig() 468 InstancePolicy.UpgradeDiskTemplates( 469 nodegroup.ipolicy, self.cluster.enabled_disk_templates) 470 if self.cluster.drbd_usermode_helper is None: 471 if self.cluster.IsDiskTemplateEnabled(constants.DT_DRBD8): 472 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER 473 if self.networks is None: 474 self.networks = {} 475 for network in self.networks.values(): 476 network.UpgradeConfig()
477
479 """Upgrade the cluster's enabled disk templates by inspecting the currently 480 enabled and/or used disk templates. 481 482 """ 483 if not self.cluster.enabled_disk_templates: 484 template_set = \ 485 set([inst.disk_template for inst in self.instances.values()]) 486 # Add drbd and plain, if lvm is enabled (by specifying a volume group) 487 if self.cluster.volume_group_name: 488 template_set.add(constants.DT_DRBD8) 489 template_set.add(constants.DT_PLAIN) 490 # Set enabled_disk_templates to the inferred disk templates. Order them 491 # according to a preference list that is based on Ganeti's history of 492 # supported disk templates. 493 self.cluster.enabled_disk_templates = [] 494 for preferred_template in constants.DISK_TEMPLATE_PREFERENCE: 495 if preferred_template in template_set: 496 self.cluster.enabled_disk_templates.append(preferred_template) 497 template_set.remove(preferred_template) 498 self.cluster.enabled_disk_templates.extend(list(template_set)) 499 InstancePolicy.UpgradeDiskTemplates( 500 self.cluster.ipolicy, self.cluster.enabled_disk_templates)
501
502 503 -class NIC(ConfigObject):
504 """Config object representing a network card.""" 505 __slots__ = ["name", "mac", "ip", "network", 506 "nicparams", "netinfo", "pci"] + _UUID 507 508 @classmethod
509 - def CheckParameterSyntax(cls, nicparams):
510 """Check the given parameters for validity. 511 512 @type nicparams: dict 513 @param nicparams: dictionary with parameter names/value 514 @raise errors.ConfigurationError: when a parameter is not valid 515 516 """ 517 mode = nicparams[constants.NIC_MODE] 518 if (mode not in constants.NIC_VALID_MODES and 519 mode != constants.VALUE_AUTO): 520 raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode) 521 522 if (mode == constants.NIC_MODE_BRIDGED and 523 not nicparams[constants.NIC_LINK]): 524 raise errors.ConfigurationError("Missing bridged NIC link")
525
526 527 -class Disk(ConfigObject):
528 """Config object representing a block device.""" 529 __slots__ = (["name", "dev_type", "logical_id", "children", "iv_name", 530 "size", "mode", "params", "spindles", "pci"] + _UUID + 531 # dynamic_params is special. It depends on the node this instance 532 # is sent to, and should not be persisted. 533 ["dynamic_params"]) 534
535 - def CreateOnSecondary(self):
536 """Test if this device needs to be created on a secondary node.""" 537 return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
538
539 - def AssembleOnSecondary(self):
540 """Test if this device needs to be assembled on a secondary node.""" 541 return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
542
543 - def OpenOnSecondary(self):
544 """Test if this device needs to be opened on a secondary node.""" 545 return self.dev_type in (constants.DT_PLAIN,)
546
547 - def StaticDevPath(self):
548 """Return the device path if this device type has a static one. 549 550 Some devices (LVM for example) live always at the same /dev/ path, 551 irrespective of their status. For such devices, we return this 552 path, for others we return None. 553 554 @warning: The path returned is not a normalized pathname; callers 555 should check that it is a valid path. 556 557 """ 558 if self.dev_type == constants.DT_PLAIN: 559 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1]) 560 elif self.dev_type == constants.DT_BLOCK: 561 return self.logical_id[1] 562 elif self.dev_type == constants.DT_RBD: 563 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1]) 564 return None
565
566 - def ChildrenNeeded(self):
567 """Compute the needed number of children for activation. 568 569 This method will return either -1 (all children) or a positive 570 number denoting the minimum number of children needed for 571 activation (only mirrored devices will usually return >=0). 572 573 Currently, only DRBD8 supports diskless activation (therefore we 574 return 0), for all other we keep the previous semantics and return 575 -1. 576 577 """ 578 if self.dev_type == constants.DT_DRBD8: 579 return 0 580 return -1
581
582 - def IsBasedOnDiskType(self, dev_type):
583 """Check if the disk or its children are based on the given type. 584 585 @type dev_type: L{constants.DTS_BLOCK} 586 @param dev_type: the type to look for 587 @rtype: boolean 588 @return: boolean indicating if a device of the given type was found or not 589 590 """ 591 if self.children: 592 for child in self.children: 593 if child.IsBasedOnDiskType(dev_type): 594 return True 595 return self.dev_type == dev_type
596
597 - def GetNodes(self, node_uuid):
598 """This function returns the nodes this device lives on. 599 600 Given the node on which the parent of the device lives on (or, in 601 case of a top-level device, the primary node of the devices' 602 instance), this function will return a list of nodes on which this 603 devices needs to (or can) be assembled. 604 605 """ 606 if self.dev_type in [constants.DT_PLAIN, constants.DT_FILE, 607 constants.DT_BLOCK, constants.DT_RBD, 608 constants.DT_EXT, constants.DT_SHARED_FILE, 609 constants.DT_GLUSTER]: 610 result = [node_uuid] 611 elif self.dev_type in constants.DTS_DRBD: 612 result = [self.logical_id[0], self.logical_id[1]] 613 if node_uuid not in result: 614 raise errors.ConfigurationError("DRBD device passed unknown node") 615 else: 616 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type) 617 return result
618
619 - def ComputeNodeTree(self, parent_node_uuid):
620 """Compute the node/disk tree for this disk and its children. 621 622 This method, given the node on which the parent disk lives, will 623 return the list of all (node UUID, disk) pairs which describe the disk 624 tree in the most compact way. For example, a drbd/lvm stack 625 will be returned as (primary_node, drbd) and (secondary_node, drbd) 626 which represents all the top-level devices on the nodes. 627 628 """ 629 my_nodes = self.GetNodes(parent_node_uuid) 630 result = [(node, self) for node in my_nodes] 631 if not self.children: 632 # leaf device 633 return result 634 for node in my_nodes: 635 for child in self.children: 636 child_result = child.ComputeNodeTree(node) 637 if len(child_result) == 1: 638 # child (and all its descendants) is simple, doesn't split 639 # over multiple hosts, so we don't need to describe it, our 640 # own entry for this node describes it completely 641 continue 642 else: 643 # check if child nodes differ from my nodes; note that 644 # subdisk can differ from the child itself, and be instead 645 # one of its descendants 646 for subnode, subdisk in child_result: 647 if subnode not in my_nodes: 648 result.append((subnode, subdisk)) 649 # otherwise child is under our own node, so we ignore this 650 # entry (but probably the other results in the list will 651 # be different) 652 return result
653
654 - def ComputeGrowth(self, amount):
655 """Compute the per-VG growth requirements. 656 657 This only works for VG-based disks. 658 659 @type amount: integer 660 @param amount: the desired increase in (user-visible) disk space 661 @rtype: dict 662 @return: a dictionary of volume-groups and the required size 663 664 """ 665 if self.dev_type == constants.DT_PLAIN: 666 return {self.logical_id[0]: amount} 667 elif self.dev_type == constants.DT_DRBD8: 668 if self.children: 669 return self.children[0].ComputeGrowth(amount) 670 else: 671 return {} 672 else: 673 # Other disk types do not require VG space 674 return {}
675
676 - def RecordGrow(self, amount):
677 """Update the size of this disk after growth. 678 679 This method recurses over the disks's children and updates their 680 size correspondigly. The method needs to be kept in sync with the 681 actual algorithms from bdev. 682 683 """ 684 if self.dev_type in (constants.DT_PLAIN, constants.DT_FILE, 685 constants.DT_RBD, constants.DT_EXT, 686 constants.DT_SHARED_FILE, constants.DT_GLUSTER): 687 self.size += amount 688 elif self.dev_type == constants.DT_DRBD8: 689 if self.children: 690 self.children[0].RecordGrow(amount) 691 self.size += amount 692 else: 693 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported" 694 " disk type %s" % self.dev_type)
695
696 - def Update(self, size=None, mode=None, spindles=None):
697 """Apply changes to size, spindles and mode. 698 699 """ 700 if self.dev_type == constants.DT_DRBD8: 701 if self.children: 702 self.children[0].Update(size=size, mode=mode) 703 else: 704 assert not self.children 705 706 if size is not None: 707 self.size = size 708 if mode is not None: 709 self.mode = mode 710 if spindles is not None: 711 self.spindles = spindles
712
713 - def UnsetSize(self):
714 """Sets recursively the size to zero for the disk and its children. 715 716 """ 717 if self.children: 718 for child in self.children: 719 child.UnsetSize() 720 self.size = 0
721
722 - def UpdateDynamicDiskParams(self, target_node_uuid, nodes_ip):
723 """Updates the dynamic disk params for the given node. 724 725 This is mainly used for drbd, which needs ip/port configuration. 726 727 Arguments: 728 - target_node_uuid: the node UUID we wish to configure for 729 - nodes_ip: a mapping of node name to ip 730 731 The target_node must exist in nodes_ip, and should be one of the 732 nodes in the logical ID if this device is a DRBD device. 733 734 """ 735 if self.children: 736 for child in self.children: 737 child.UpdateDynamicDiskParams(target_node_uuid, nodes_ip) 738 739 dyn_disk_params = {} 740 if self.logical_id is not None and self.dev_type in constants.DTS_DRBD: 741 pnode_uuid, snode_uuid, _, pminor, sminor, _ = self.logical_id 742 if target_node_uuid not in (pnode_uuid, snode_uuid): 743 # disk object is being sent to neither the primary nor the secondary 744 # node. reset the dynamic parameters, the target node is not 745 # supposed to use them. 746 self.dynamic_params = dyn_disk_params 747 return 748 749 pnode_ip = nodes_ip.get(pnode_uuid, None) 750 snode_ip = nodes_ip.get(snode_uuid, None) 751 if pnode_ip is None or snode_ip is None: 752 raise errors.ConfigurationError("Can't find primary or secondary node" 753 " for %s" % str(self)) 754 if pnode_uuid == target_node_uuid: 755 dyn_disk_params[constants.DDP_LOCAL_IP] = pnode_ip 756 dyn_disk_params[constants.DDP_REMOTE_IP] = snode_ip 757 dyn_disk_params[constants.DDP_LOCAL_MINOR] = pminor 758 dyn_disk_params[constants.DDP_REMOTE_MINOR] = sminor 759 else: # it must be secondary, we tested above 760 dyn_disk_params[constants.DDP_LOCAL_IP] = snode_ip 761 dyn_disk_params[constants.DDP_REMOTE_IP] = pnode_ip 762 dyn_disk_params[constants.DDP_LOCAL_MINOR] = sminor 763 dyn_disk_params[constants.DDP_REMOTE_MINOR] = pminor 764 765 self.dynamic_params = dyn_disk_params
766 767 # pylint: disable=W0221
768 - def ToDict(self, include_dynamic_params=False):
769 """Disk-specific conversion to standard python types. 770 771 This replaces the children lists of objects with lists of 772 standard python types. 773 774 """ 775 bo = super(Disk, self).ToDict() 776 if not include_dynamic_params and "dynamic_params" in bo: 777 del bo["dynamic_params"] 778 779 for attr in ("children",): 780 alist = bo.get(attr, None) 781 if alist: 782 bo[attr] = outils.ContainerToDicts(alist) 783 return bo
784 785 @classmethod
786 - def FromDict(cls, val):
787 """Custom function for Disks 788 789 """ 790 obj = super(Disk, cls).FromDict(val) 791 if obj.children: 792 obj.children = outils.ContainerFromDicts(obj.children, list, Disk) 793 if obj.logical_id and isinstance(obj.logical_id, list): 794 obj.logical_id = tuple(obj.logical_id) 795 if obj.dev_type in constants.DTS_DRBD: 796 # we need a tuple of length six here 797 if len(obj.logical_id) < 6: 798 obj.logical_id += (None,) * (6 - len(obj.logical_id)) 799 return obj
800
801 - def __str__(self):
802 """Custom str() formatter for disks. 803 804 """ 805 if self.dev_type == constants.DT_PLAIN: 806 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id 807 elif self.dev_type in constants.DTS_DRBD: 808 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5] 809 val = "<DRBD8(" 810 811 val += ("hosts=%s/%d-%s/%d, port=%s, " % 812 (node_a, minor_a, node_b, minor_b, port)) 813 if self.children and self.children.count(None) == 0: 814 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1]) 815 else: 816 val += "no local storage" 817 else: 818 val = ("<Disk(type=%s, logical_id=%s, children=%s" % 819 (self.dev_type, self.logical_id, self.children)) 820 if self.iv_name is None: 821 val += ", not visible" 822 else: 823 val += ", visible as /dev/%s" % self.iv_name 824 if self.spindles is not None: 825 val += ", spindles=%s" % self.spindles 826 if isinstance(self.size, int): 827 val += ", size=%dm)>" % self.size 828 else: 829 val += ", size='%s')>" % (self.size,) 830 return val
831
832 - def Verify(self):
833 """Checks that this disk is correctly configured. 834 835 """ 836 all_errors = [] 837 if self.mode not in constants.DISK_ACCESS_SET: 838 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, )) 839 return all_errors
840
841 - def UpgradeConfig(self):
842 """Fill defaults for missing configuration values. 843 844 """ 845 if self.children: 846 for child in self.children: 847 child.UpgradeConfig() 848 849 # FIXME: Make this configurable in Ganeti 2.7 850 # Params should be an empty dict that gets filled any time needed 851 # In case of ext template we allow arbitrary params that should not 852 # be overrided during a config reload/upgrade. 853 if not self.params or not isinstance(self.params, dict): 854 self.params = {} 855 856 # add here config upgrade for this disk 857 858 # map of legacy device types (mapping differing LD constants to new 859 # DT constants) 860 LEG_DEV_TYPE_MAP = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8} 861 if self.dev_type in LEG_DEV_TYPE_MAP: 862 self.dev_type = LEG_DEV_TYPE_MAP[self.dev_type]
863 864 @staticmethod
865 - def ComputeLDParams(disk_template, disk_params):
866 """Computes Logical Disk parameters from Disk Template parameters. 867 868 @type disk_template: string 869 @param disk_template: disk template, one of L{constants.DISK_TEMPLATES} 870 @type disk_params: dict 871 @param disk_params: disk template parameters; 872 dict(template_name -> parameters 873 @rtype: list(dict) 874 @return: a list of dicts, one for each node of the disk hierarchy. Each dict 875 contains the LD parameters of the node. The tree is flattened in-order. 876 877 """ 878 if disk_template not in constants.DISK_TEMPLATES: 879 raise errors.ProgrammerError("Unknown disk template %s" % disk_template) 880 881 assert disk_template in disk_params 882 883 result = list() 884 dt_params = disk_params[disk_template] 885 886 if disk_template == constants.DT_DRBD8: 887 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_DRBD8], { 888 constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE], 889 constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS], 890 constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS], 891 constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG], 892 constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM], 893 constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM], 894 constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL], 895 constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC], 896 constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD], 897 constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET], 898 constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET], 899 constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE], 900 constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE], 901 })) 902 903 # data LV 904 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], { 905 constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES], 906 })) 907 908 # metadata LV 909 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], { 910 constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES], 911 })) 912 913 else: 914 defaults = constants.DISK_LD_DEFAULTS[disk_template] 915 values = {} 916 for field in defaults: 917 values[field] = dt_params[field] 918 result.append(FillDict(defaults, values)) 919 920 return result
921
922 923 -class InstancePolicy(ConfigObject):
924 """Config object representing instance policy limits dictionary. 925 926 Note that this object is not actually used in the config, it's just 927 used as a placeholder for a few functions. 928 929 """ 930 @classmethod
931 - def UpgradeDiskTemplates(cls, ipolicy, enabled_disk_templates):
932 """Upgrades the ipolicy configuration.""" 933 if constants.IPOLICY_DTS in ipolicy: 934 if not set(ipolicy[constants.IPOLICY_DTS]).issubset( 935 set(enabled_disk_templates)): 936 ipolicy[constants.IPOLICY_DTS] = list( 937 set(ipolicy[constants.IPOLICY_DTS]) & set(enabled_disk_templates))
938 939 @classmethod
940 - def CheckParameterSyntax(cls, ipolicy, check_std):
941 """ Check the instance policy for validity. 942 943 @type ipolicy: dict 944 @param ipolicy: dictionary with min/max/std specs and policies 945 @type check_std: bool 946 @param check_std: Whether to check std value or just assume compliance 947 @raise errors.ConfigurationError: when the policy is not legal 948 949 """ 950 InstancePolicy.CheckISpecSyntax(ipolicy, check_std) 951 if constants.IPOLICY_DTS in ipolicy: 952 InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS]) 953 for key in constants.IPOLICY_PARAMETERS: 954 if key in ipolicy: 955 InstancePolicy.CheckParameter(key, ipolicy[key]) 956 wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS 957 if wrong_keys: 958 raise errors.ConfigurationError("Invalid keys in ipolicy: %s" % 959 utils.CommaJoin(wrong_keys))
960 961 @classmethod
962 - def _CheckIncompleteSpec(cls, spec, keyname):
963 missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys()) 964 if missing_params: 965 msg = ("Missing instance specs parameters for %s: %s" % 966 (keyname, utils.CommaJoin(missing_params))) 967 raise errors.ConfigurationError(msg)
968 969 @classmethod
970 - def CheckISpecSyntax(cls, ipolicy, check_std):
971 """Check the instance policy specs for validity. 972 973 @type ipolicy: dict 974 @param ipolicy: dictionary with min/max/std specs 975 @type check_std: bool 976 @param check_std: Whether to check std value or just assume compliance 977 @raise errors.ConfigurationError: when specs are not valid 978 979 """ 980 if constants.ISPECS_MINMAX not in ipolicy: 981 # Nothing to check 982 return 983 984 if check_std and constants.ISPECS_STD not in ipolicy: 985 msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD 986 raise errors.ConfigurationError(msg) 987 stdspec = ipolicy.get(constants.ISPECS_STD) 988 if check_std: 989 InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD) 990 991 if not ipolicy[constants.ISPECS_MINMAX]: 992 raise errors.ConfigurationError("Empty minmax specifications") 993 std_is_good = False 994 for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]: 995 missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys()) 996 if missing: 997 msg = "Missing instance specification: %s" % utils.CommaJoin(missing) 998 raise errors.ConfigurationError(msg) 999 for (key, spec) in minmaxspecs.items(): 1000 InstancePolicy._CheckIncompleteSpec(spec, key) 1001 1002 spec_std_ok = True 1003 for param in constants.ISPECS_PARAMETERS: 1004 par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec, 1005 param, check_std) 1006 spec_std_ok = spec_std_ok and par_std_ok 1007 std_is_good = std_is_good or spec_std_ok 1008 if not std_is_good: 1009 raise errors.ConfigurationError("Invalid std specifications")
1010 1011 @classmethod
1012 - def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
1013 """Check the instance policy specs for validity on a given key. 1014 1015 We check if the instance specs makes sense for a given key, that is 1016 if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name]. 1017 1018 @type minmaxspecs: dict 1019 @param minmaxspecs: dictionary with min and max instance spec 1020 @type stdspec: dict 1021 @param stdspec: dictionary with standard instance spec 1022 @type name: string 1023 @param name: what are the limits for 1024 @type check_std: bool 1025 @param check_std: Whether to check std value or just assume compliance 1026 @rtype: bool 1027 @return: C{True} when specs are valid, C{False} when standard spec for the 1028 given name is not valid 1029 @raise errors.ConfigurationError: when min/max specs for the given name 1030 are not valid 1031 1032 """ 1033 minspec = minmaxspecs[constants.ISPECS_MIN] 1034 maxspec = minmaxspecs[constants.ISPECS_MAX] 1035 min_v = minspec[name] 1036 max_v = maxspec[name] 1037 1038 if min_v > max_v: 1039 err = ("Invalid specification of min/max values for %s: %s/%s" % 1040 (name, min_v, max_v)) 1041 raise errors.ConfigurationError(err) 1042 elif check_std: 1043 std_v = stdspec.get(name, min_v) 1044 return std_v >= min_v and std_v <= max_v 1045 else: 1046 return True
1047 1048 @classmethod
1049 - def CheckDiskTemplates(cls, disk_templates):
1050 """Checks the disk templates for validity. 1051 1052 """ 1053 if not disk_templates: 1054 raise errors.ConfigurationError("Instance policy must contain" + 1055 " at least one disk template") 1056 wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES) 1057 if wrong: 1058 raise errors.ConfigurationError("Invalid disk template(s) %s" % 1059 utils.CommaJoin(wrong))
1060 1061 @classmethod
1062 - def CheckParameter(cls, key, value):
1063 """Checks a parameter. 1064 1065 Currently we expect all parameters to be float values. 1066 1067 """ 1068 try: 1069 float(value) 1070 except (TypeError, ValueError), err: 1071 raise errors.ConfigurationError("Invalid value for key" " '%s':" 1072 " '%s', error: %s" % (key, value, err))
1073
1074 1075 -class Instance(TaggableObject):
1076 """Config object representing an instance.""" 1077 __slots__ = [ 1078 "name", 1079 "primary_node", 1080 "os", 1081 "hypervisor", 1082 "hvparams", 1083 "beparams", 1084 "osparams", 1085 "admin_state", 1086 "admin_state_source", 1087 "nics", 1088 "disks", 1089 "disk_template", 1090 "disks_active", 1091 "network_port", 1092 "serial_no", 1093 ] + _TIMESTAMPS + _UUID 1094
1095 - def _ComputeSecondaryNodes(self):
1096 """Compute the list of secondary nodes. 1097 1098 This is a simple wrapper over _ComputeAllNodes. 1099 1100 """ 1101 all_nodes = set(self._ComputeAllNodes()) 1102 all_nodes.discard(self.primary_node) 1103 return tuple(all_nodes)
1104 1105 secondary_nodes = property(_ComputeSecondaryNodes, None, None, 1106 "List of names of secondary nodes") 1107
1108 - def _ComputeAllNodes(self):
1109 """Compute the list of all nodes. 1110 1111 Since the data is already there (in the drbd disks), keeping it as 1112 a separate normal attribute is redundant and if not properly 1113 synchronised can cause problems. Thus it's better to compute it 1114 dynamically. 1115 1116 """ 1117 def _Helper(nodes, device): 1118 """Recursively computes nodes given a top device.""" 1119 if device.dev_type in constants.DTS_DRBD: 1120 nodea, nodeb = device.logical_id[:2] 1121 nodes.add(nodea) 1122 nodes.add(nodeb) 1123 if device.children: 1124 for child in device.children: 1125 _Helper(nodes, child)
1126 1127 all_nodes = set() 1128 for device in self.disks: 1129 _Helper(all_nodes, device) 1130 # ensure that the primary node is always the first 1131 all_nodes.discard(self.primary_node) 1132 return (self.primary_node, ) + tuple(all_nodes)
1133 1134 all_nodes = property(_ComputeAllNodes, None, None, 1135 "List of names of all the nodes of the instance") 1136
1137 - def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
1138 """Provide a mapping of nodes to LVs this instance owns. 1139 1140 This function figures out what logical volumes should belong on 1141 which nodes, recursing through a device tree. 1142 1143 @type lvmap: dict 1144 @param lvmap: optional dictionary to receive the 1145 'node' : ['lv', ...] data. 1146 @type devs: list of L{Disk} 1147 @param devs: disks to get the LV name for. If None, all disk of this 1148 instance are used. 1149 @type node_uuid: string 1150 @param node_uuid: UUID of the node to get the LV names for. If None, the 1151 primary node of this instance is used. 1152 @return: None if lvmap arg is given, otherwise, a dictionary of 1153 the form { 'node_uuid' : ['volume1', 'volume2', ...], ... }; 1154 volumeN is of the form "vg_name/lv_name", compatible with 1155 GetVolumeList() 1156 1157 """ 1158 if node_uuid is None: 1159 node_uuid = self.primary_node 1160 1161 if lvmap is None: 1162 lvmap = { 1163 node_uuid: [], 1164 } 1165 ret = lvmap 1166 else: 1167 if not node_uuid in lvmap: 1168 lvmap[node_uuid] = [] 1169 ret = None 1170 1171 if not devs: 1172 devs = self.disks 1173 1174 for dev in devs: 1175 if dev.dev_type == constants.DT_PLAIN: 1176 lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1]) 1177 1178 elif dev.dev_type in constants.DTS_DRBD: 1179 if dev.children: 1180 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0]) 1181 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1]) 1182 1183 elif dev.children: 1184 self.MapLVsByNode(lvmap, dev.children, node_uuid) 1185 1186 return ret
1187
1188 - def FindDisk(self, idx):
1189 """Find a disk given having a specified index. 1190 1191 This is just a wrapper that does validation of the index. 1192 1193 @type idx: int 1194 @param idx: the disk index 1195 @rtype: L{Disk} 1196 @return: the corresponding disk 1197 @raise errors.OpPrereqError: when the given index is not valid 1198 1199 """ 1200 try: 1201 idx = int(idx) 1202 return self.disks[idx] 1203 except (TypeError, ValueError), err: 1204 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err), 1205 errors.ECODE_INVAL) 1206 except IndexError: 1207 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks" 1208 " 0 to %d" % (idx, len(self.disks) - 1), 1209 errors.ECODE_INVAL)
1210
1211 - def ToDict(self):
1212 """Instance-specific conversion to standard python types. 1213 1214 This replaces the children lists of objects with lists of standard 1215 python types. 1216 1217 """ 1218 bo = super(Instance, self).ToDict() 1219 1220 for attr in "nics", "disks": 1221 alist = bo.get(attr, None) 1222 if alist: 1223 nlist = outils.ContainerToDicts(alist) 1224 else: 1225 nlist = [] 1226 bo[attr] = nlist 1227 return bo
1228 1229 @classmethod
1230 - def FromDict(cls, val):
1231 """Custom function for instances. 1232 1233 """ 1234 if "admin_state" not in val: 1235 if val.get("admin_up", False): 1236 val["admin_state"] = constants.ADMINST_UP 1237 else: 1238 val["admin_state"] = constants.ADMINST_DOWN 1239 if "admin_up" in val: 1240 del val["admin_up"] 1241 obj = super(Instance, cls).FromDict(val) 1242 obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC) 1243 obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk) 1244 return obj
1245
1246 - def UpgradeConfig(self):
1247 """Fill defaults for missing configuration values. 1248 1249 """ 1250 if self.admin_state_source is None: 1251 self.admin_state_source = constants.ADMIN_SOURCE 1252 for nic in self.nics: 1253 nic.UpgradeConfig() 1254 for disk in self.disks: 1255 disk.UpgradeConfig() 1256 if self.hvparams: 1257 for key in constants.HVC_GLOBALS: 1258 try: 1259 del self.hvparams[key] 1260 except KeyError: 1261 pass 1262 if self.osparams is None: 1263 self.osparams = {} 1264 UpgradeBeParams(self.beparams) 1265 if self.disks_active is None: 1266 self.disks_active = self.admin_state == constants.ADMINST_UP
1267
1268 1269 -class OS(ConfigObject):
1270 """Config object representing an operating system. 1271 1272 @type supported_parameters: list 1273 @ivar supported_parameters: a list of tuples, name and description, 1274 containing the supported parameters by this OS 1275 1276 @type VARIANT_DELIM: string 1277 @cvar VARIANT_DELIM: the variant delimiter 1278 1279 """ 1280 __slots__ = [ 1281 "name", 1282 "path", 1283 "api_versions", 1284 "create_script", 1285 "export_script", 1286 "import_script", 1287 "rename_script", 1288 "verify_script", 1289 "supported_variants", 1290 "supported_parameters", 1291 ] 1292 1293 VARIANT_DELIM = "+" 1294 1295 @classmethod
1296 - def SplitNameVariant(cls, name):
1297 """Splits the name into the proper name and variant. 1298 1299 @param name: the OS (unprocessed) name 1300 @rtype: list 1301 @return: a list of two elements; if the original name didn't 1302 contain a variant, it's returned as an empty string 1303 1304 """ 1305 nv = name.split(cls.VARIANT_DELIM, 1) 1306 if len(nv) == 1: 1307 nv.append("") 1308 return nv
1309 1310 @classmethod
1311 - def GetName(cls, name):
1312 """Returns the proper name of the os (without the variant). 1313 1314 @param name: the OS (unprocessed) name 1315 1316 """ 1317 return cls.SplitNameVariant(name)[0]
1318 1319 @classmethod
1320 - def GetVariant(cls, name):
1321 """Returns the variant the os (without the base name). 1322 1323 @param name: the OS (unprocessed) name 1324 1325 """ 1326 return cls.SplitNameVariant(name)[1]
1327
1328 1329 -class ExtStorage(ConfigObject):
1330 """Config object representing an External Storage Provider. 1331 1332 """ 1333 __slots__ = [ 1334 "name", 1335 "path", 1336 "create_script", 1337 "remove_script", 1338 "grow_script", 1339 "attach_script", 1340 "detach_script", 1341 "setinfo_script", 1342 "verify_script", 1343 "supported_parameters", 1344 ]
1345
1346 1347 -class NodeHvState(ConfigObject):
1348 """Hypvervisor state on a node. 1349 1350 @ivar mem_total: Total amount of memory 1351 @ivar mem_node: Memory used by, or reserved for, the node itself (not always 1352 available) 1353 @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation 1354 rounding 1355 @ivar mem_inst: Memory used by instances living on node 1356 @ivar cpu_total: Total node CPU core count 1357 @ivar cpu_node: Number of CPU cores reserved for the node itself 1358 1359 """ 1360 __slots__ = [ 1361 "mem_total", 1362 "mem_node", 1363 "mem_hv", 1364 "mem_inst", 1365 "cpu_total", 1366 "cpu_node", 1367 ] + _TIMESTAMPS
1368
1369 1370 -class NodeDiskState(ConfigObject):
1371 """Disk state on a node. 1372 1373 """ 1374 __slots__ = [ 1375 "total", 1376 "reserved", 1377 "overhead", 1378 ] + _TIMESTAMPS
1379
1380 1381 -class Node(TaggableObject):
1382 """Config object representing a node. 1383 1384 @ivar hv_state: Hypervisor state (e.g. number of CPUs) 1385 @ivar hv_state_static: Hypervisor state overriden by user 1386 @ivar disk_state: Disk state (e.g. free space) 1387 @ivar disk_state_static: Disk state overriden by user 1388 1389 """ 1390 __slots__ = [ 1391 "name", 1392 "primary_ip", 1393 "secondary_ip", 1394 "serial_no", 1395 "master_candidate", 1396 "offline", 1397 "drained", 1398 "group", 1399 "master_capable", 1400 "vm_capable", 1401 "ndparams", 1402 "powered", 1403 "hv_state", 1404 "hv_state_static", 1405 "disk_state", 1406 "disk_state_static", 1407 ] + _TIMESTAMPS + _UUID 1408
1409 - def UpgradeConfig(self):
1410 """Fill defaults for missing configuration values. 1411 1412 """ 1413 # pylint: disable=E0203 1414 # because these are "defined" via slots, not manually 1415 if self.master_capable is None: 1416 self.master_capable = True 1417 1418 if self.vm_capable is None: 1419 self.vm_capable = True 1420 1421 if self.ndparams is None: 1422 self.ndparams = {} 1423 # And remove any global parameter 1424 for key in constants.NDC_GLOBALS: 1425 if key in self.ndparams: 1426 logging.warning("Ignoring %s node parameter for node %s", 1427 key, self.name) 1428 del self.ndparams[key] 1429 1430 if self.powered is None: 1431 self.powered = True
1432
1433 - def ToDict(self):
1434 """Custom function for serializing. 1435 1436 """ 1437 data = super(Node, self).ToDict() 1438 1439 hv_state = data.get("hv_state", None) 1440 if hv_state is not None: 1441 data["hv_state"] = outils.ContainerToDicts(hv_state) 1442 1443 disk_state = data.get("disk_state", None) 1444 if disk_state is not None: 1445 data["disk_state"] = \ 1446 dict((key, outils.ContainerToDicts(value)) 1447 for (key, value) in disk_state.items()) 1448 1449 return data
1450 1451 @classmethod
1452 - def FromDict(cls, val):
1453 """Custom function for deserializing. 1454 1455 """ 1456 obj = super(Node, cls).FromDict(val) 1457 1458 if obj.hv_state is not None: 1459 obj.hv_state = \ 1460 outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState) 1461 1462 if obj.disk_state is not None: 1463 obj.disk_state = \ 1464 dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState)) 1465 for (key, value) in obj.disk_state.items()) 1466 1467 return obj
1468
1469 1470 -class NodeGroup(TaggableObject):
1471 """Config object representing a node group.""" 1472 __slots__ = [ 1473 "name", 1474 "members", 1475 "ndparams", 1476 "diskparams", 1477 "ipolicy", 1478 "serial_no", 1479 "hv_state_static", 1480 "disk_state_static", 1481 "alloc_policy", 1482 "networks", 1483 ] + _TIMESTAMPS + _UUID 1484
1485 - def ToDict(self):
1486 """Custom function for nodegroup. 1487 1488 This discards the members object, which gets recalculated and is only kept 1489 in memory. 1490 1491 """ 1492 mydict = super(NodeGroup, self).ToDict() 1493 del mydict["members"] 1494 return mydict
1495 1496 @classmethod
1497 - def FromDict(cls, val):
1498 """Custom function for nodegroup. 1499 1500 The members slot is initialized to an empty list, upon deserialization. 1501 1502 """ 1503 obj = super(NodeGroup, cls).FromDict(val) 1504 obj.members = [] 1505 return obj
1506
1507 - def UpgradeConfig(self):
1508 """Fill defaults for missing configuration values. 1509 1510 """ 1511 if self.ndparams is None: 1512 self.ndparams = {} 1513 1514 if self.serial_no is None: 1515 self.serial_no = 1 1516 1517 if self.alloc_policy is None: 1518 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED 1519 1520 # We only update mtime, and not ctime, since we would not be able 1521 # to provide a correct value for creation time. 1522 if self.mtime is None: 1523 self.mtime = time.time() 1524 1525 if self.diskparams is None: 1526 self.diskparams = {} 1527 if self.ipolicy is None: 1528 self.ipolicy = MakeEmptyIPolicy() 1529 1530 if self.networks is None: 1531 self.networks = {} 1532 1533 for network, netparams in self.networks.items(): 1534 self.networks[network] = FillDict(constants.NICC_DEFAULTS, netparams)
1535
1536 - def FillND(self, node):
1537 """Return filled out ndparams for L{objects.Node} 1538 1539 @type node: L{objects.Node} 1540 @param node: A Node object to fill 1541 @return a copy of the node's ndparams with defaults filled 1542 1543 """ 1544 return self.SimpleFillND(node.ndparams)
1545
1546 - def SimpleFillND(self, ndparams):
1547 """Fill a given ndparams dict with defaults. 1548 1549 @type ndparams: dict 1550 @param ndparams: the dict to fill 1551 @rtype: dict 1552 @return: a copy of the passed in ndparams with missing keys filled 1553 from the node group defaults 1554 1555 """ 1556 return FillDict(self.ndparams, ndparams)
1557
1558 1559 -class Cluster(TaggableObject):
1560 """Config object representing the cluster.""" 1561 __slots__ = [ 1562 "serial_no", 1563 "rsahostkeypub", 1564 "dsahostkeypub", 1565 "highest_used_port", 1566 "tcpudp_port_pool", 1567 "mac_prefix", 1568 "volume_group_name", 1569 "reserved_lvs", 1570 "drbd_usermode_helper", 1571 "default_bridge", 1572 "default_hypervisor", 1573 "master_node", 1574 "master_ip", 1575 "master_netdev", 1576 "master_netmask", 1577 "use_external_mip_script", 1578 "cluster_name", 1579 "file_storage_dir", 1580 "shared_file_storage_dir", 1581 "gluster_storage_dir", 1582 "enabled_hypervisors", 1583 "hvparams", 1584 "ipolicy", 1585 "os_hvp", 1586 "beparams", 1587 "osparams", 1588 "nicparams", 1589 "ndparams", 1590 "diskparams", 1591 "candidate_pool_size", 1592 "modify_etc_hosts", 1593 "modify_ssh_setup", 1594 "maintain_node_health", 1595 "uid_pool", 1596 "default_iallocator", 1597 "default_iallocator_params", 1598 "hidden_os", 1599 "blacklisted_os", 1600 "primary_ip_family", 1601 "prealloc_wipe_disks", 1602 "hv_state_static", 1603 "disk_state_static", 1604 "enabled_disk_templates", 1605 "candidate_certs", 1606 "max_running_jobs", 1607 "enabled_user_shutdown", 1608 ] + _TIMESTAMPS + _UUID 1609
1610 - def UpgradeConfig(self):
1611 """Fill defaults for missing configuration values. 1612 1613 """ 1614 # pylint: disable=E0203 1615 # because these are "defined" via slots, not manually 1616 if self.hvparams is None: 1617 self.hvparams = constants.HVC_DEFAULTS 1618 else: 1619 for hypervisor in constants.HYPER_TYPES: 1620 try: 1621 existing_params = self.hvparams[hypervisor] 1622 except KeyError: 1623 existing_params = {} 1624 self.hvparams[hypervisor] = FillDict( 1625 constants.HVC_DEFAULTS[hypervisor], existing_params) 1626 1627 if self.os_hvp is None: 1628 self.os_hvp = {} 1629 1630 # osparams added before 2.2 1631 if self.osparams is None: 1632 self.osparams = {} 1633 1634 self.ndparams = UpgradeNDParams(self.ndparams) 1635 1636 self.beparams = UpgradeGroupedParams(self.beparams, 1637 constants.BEC_DEFAULTS) 1638 for beparams_group in self.beparams: 1639 UpgradeBeParams(self.beparams[beparams_group]) 1640 1641 migrate_default_bridge = not self.nicparams 1642 self.nicparams = UpgradeGroupedParams(self.nicparams, 1643 constants.NICC_DEFAULTS) 1644 if migrate_default_bridge: 1645 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \ 1646 self.default_bridge 1647 1648 if self.modify_etc_hosts is None: 1649 self.modify_etc_hosts = True 1650 1651 if self.modify_ssh_setup is None: 1652 self.modify_ssh_setup = True 1653 1654 # default_bridge is no longer used in 2.1. The slot is left there to 1655 # support auto-upgrading. It can be removed once we decide to deprecate 1656 # upgrading straight from 2.0. 1657 if self.default_bridge is not None: 1658 self.default_bridge = None 1659 1660 # default_hypervisor is just the first enabled one in 2.1. This slot and 1661 # code can be removed once upgrading straight from 2.0 is deprecated. 1662 if self.default_hypervisor is not None: 1663 self.enabled_hypervisors = ([self.default_hypervisor] + 1664 [hvname for hvname in self.enabled_hypervisors 1665 if hvname != self.default_hypervisor]) 1666 self.default_hypervisor = None 1667 1668 # maintain_node_health added after 2.1.1 1669 if self.maintain_node_health is None: 1670 self.maintain_node_health = False 1671 1672 if self.uid_pool is None: 1673 self.uid_pool = [] 1674 1675 if self.default_iallocator is None: 1676 self.default_iallocator = "" 1677 1678 if self.default_iallocator_params is None: 1679 self.default_iallocator_params = {} 1680 1681 # reserved_lvs added before 2.2 1682 if self.reserved_lvs is None: 1683 self.reserved_lvs = [] 1684 1685 # hidden and blacklisted operating systems added before 2.2.1 1686 if self.hidden_os is None: 1687 self.hidden_os = [] 1688 1689 if self.blacklisted_os is None: 1690 self.blacklisted_os = [] 1691 1692 # primary_ip_family added before 2.3 1693 if self.primary_ip_family is None: 1694 self.primary_ip_family = AF_INET 1695 1696 if self.master_netmask is None: 1697 ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family) 1698 self.master_netmask = ipcls.iplen 1699 1700 if self.prealloc_wipe_disks is None: 1701 self.prealloc_wipe_disks = False 1702 1703 # shared_file_storage_dir added before 2.5 1704 if self.shared_file_storage_dir is None: 1705 self.shared_file_storage_dir = "" 1706 1707 # gluster_storage_dir added in 2.11 1708 if self.gluster_storage_dir is None: 1709 self.gluster_storage_dir = "" 1710 1711 if self.use_external_mip_script is None: 1712 self.use_external_mip_script = False 1713 1714 if self.diskparams: 1715 self.diskparams = UpgradeDiskParams(self.diskparams) 1716 else: 1717 self.diskparams = constants.DISK_DT_DEFAULTS.copy() 1718 1719 # instance policy added before 2.6 1720 if self.ipolicy is None: 1721 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {}) 1722 else: 1723 # we can either make sure to upgrade the ipolicy always, or only 1724 # do it in some corner cases (e.g. missing keys); note that this 1725 # will break any removal of keys from the ipolicy dict 1726 wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS 1727 if wrongkeys: 1728 # These keys would be silently removed by FillIPolicy() 1729 msg = ("Cluster instance policy contains spurious keys: %s" % 1730 utils.CommaJoin(wrongkeys)) 1731 raise errors.ConfigurationError(msg) 1732 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy) 1733 1734 # hv_state_static added in 2.7 1735 if self.hv_state_static is None: 1736 self.hv_state_static = {} 1737 if self.disk_state_static is None: 1738 self.disk_state_static = {} 1739 1740 if self.candidate_certs is None: 1741 self.candidate_certs = {} 1742 1743 if self.max_running_jobs is None: 1744 self.max_running_jobs = constants.LUXID_MAXIMAL_RUNNING_JOBS_DEFAULT 1745 1746 if self.enabled_user_shutdown is None: 1747 self.enabled_user_shutdown = False
1748 1749 @property
1750 - def primary_hypervisor(self):
1751 """The first hypervisor is the primary. 1752 1753 Useful, for example, for L{Node}'s hv/disk state. 1754 1755 """ 1756 return self.enabled_hypervisors[0]
1757
1758 - def ToDict(self):
1759 """Custom function for cluster. 1760 1761 """ 1762 mydict = super(Cluster, self).ToDict() 1763 1764 if self.tcpudp_port_pool is None: 1765 tcpudp_port_pool = [] 1766 else: 1767 tcpudp_port_pool = list(self.tcpudp_port_pool) 1768 1769 mydict["tcpudp_port_pool"] = tcpudp_port_pool 1770 1771 return mydict
1772 1773 @classmethod
1774 - def FromDict(cls, val):
1775 """Custom function for cluster. 1776 1777 """ 1778 obj = super(Cluster, cls).FromDict(val) 1779 1780 if obj.tcpudp_port_pool is None: 1781 obj.tcpudp_port_pool = set() 1782 elif not isinstance(obj.tcpudp_port_pool, set): 1783 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool) 1784 1785 return obj
1786
1787 - def SimpleFillDP(self, diskparams):
1788 """Fill a given diskparams dict with cluster defaults. 1789 1790 @param diskparams: The diskparams 1791 @return: The defaults dict 1792 1793 """ 1794 return FillDiskParams(self.diskparams, diskparams)
1795
1796 - def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1797 """Get the default hypervisor parameters for the cluster. 1798 1799 @param hypervisor: the hypervisor name 1800 @param os_name: if specified, we'll also update the defaults for this OS 1801 @param skip_keys: if passed, list of keys not to use 1802 @return: the defaults dict 1803 1804 """ 1805 if skip_keys is None: 1806 skip_keys = [] 1807 1808 fill_stack = [self.hvparams.get(hypervisor, {})] 1809 if os_name is not None: 1810 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {}) 1811 fill_stack.append(os_hvp) 1812 1813 ret_dict = {} 1814 for o_dict in fill_stack: 1815 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys) 1816 1817 return ret_dict
1818
1819 - def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1820 """Fill a given hvparams dict with cluster defaults. 1821 1822 @type hv_name: string 1823 @param hv_name: the hypervisor to use 1824 @type os_name: string 1825 @param os_name: the OS to use for overriding the hypervisor defaults 1826 @type skip_globals: boolean 1827 @param skip_globals: if True, the global hypervisor parameters will 1828 not be filled 1829 @rtype: dict 1830 @return: a copy of the given hvparams with missing keys filled from 1831 the cluster defaults 1832 1833 """ 1834 if skip_globals: 1835 skip_keys = constants.HVC_GLOBALS 1836 else: 1837 skip_keys = [] 1838 1839 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys) 1840 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1841
1842 - def FillHV(self, instance, skip_globals=False):
1843 """Fill an instance's hvparams dict with cluster defaults. 1844 1845 @type instance: L{objects.Instance} 1846 @param instance: the instance parameter to fill 1847 @type skip_globals: boolean 1848 @param skip_globals: if True, the global hypervisor parameters will 1849 not be filled 1850 @rtype: dict 1851 @return: a copy of the instance's hvparams with missing keys filled from 1852 the cluster defaults 1853 1854 """ 1855 return self.SimpleFillHV(instance.hypervisor, instance.os, 1856 instance.hvparams, skip_globals)
1857
1858 - def SimpleFillBE(self, beparams):
1859 """Fill a given beparams dict with cluster defaults. 1860 1861 @type beparams: dict 1862 @param beparams: the dict to fill 1863 @rtype: dict 1864 @return: a copy of the passed in beparams with missing keys filled 1865 from the cluster defaults 1866 1867 """ 1868 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1869
1870 - def FillBE(self, instance):
1871 """Fill an instance's beparams dict with cluster defaults. 1872 1873 @type instance: L{objects.Instance} 1874 @param instance: the instance parameter to fill 1875 @rtype: dict 1876 @return: a copy of the instance's beparams with missing keys filled from 1877 the cluster defaults 1878 1879 """ 1880 return self.SimpleFillBE(instance.beparams)
1881
1882 - def SimpleFillNIC(self, nicparams):
1883 """Fill a given nicparams dict with cluster defaults. 1884 1885 @type nicparams: dict 1886 @param nicparams: the dict to fill 1887 @rtype: dict 1888 @return: a copy of the passed in nicparams with missing keys filled 1889 from the cluster defaults 1890 1891 """ 1892 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1893
1894 - def SimpleFillOS(self, os_name, os_params):
1895 """Fill an instance's osparams dict with cluster defaults. 1896 1897 @type os_name: string 1898 @param os_name: the OS name to use 1899 @type os_params: dict 1900 @param os_params: the dict to fill with default values 1901 @rtype: dict 1902 @return: a copy of the instance's osparams with missing keys filled from 1903 the cluster defaults 1904 1905 """ 1906 name_only = os_name.split("+", 1)[0] 1907 # base OS 1908 result = self.osparams.get(name_only, {}) 1909 # OS with variant 1910 result = FillDict(result, self.osparams.get(os_name, {})) 1911 # specified params 1912 return FillDict(result, os_params)
1913 1914 @staticmethod
1915 - def SimpleFillHvState(hv_state):
1916 """Fill an hv_state sub dict with cluster defaults. 1917 1918 """ 1919 return FillDict(constants.HVST_DEFAULTS, hv_state)
1920 1921 @staticmethod
1922 - def SimpleFillDiskState(disk_state):
1923 """Fill an disk_state sub dict with cluster defaults. 1924 1925 """ 1926 return FillDict(constants.DS_DEFAULTS, disk_state)
1927
1928 - def FillND(self, node, nodegroup):
1929 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node} 1930 1931 @type node: L{objects.Node} 1932 @param node: A Node object to fill 1933 @type nodegroup: L{objects.NodeGroup} 1934 @param nodegroup: A Node object to fill 1935 @return a copy of the node's ndparams with defaults filled 1936 1937 """ 1938 return self.SimpleFillND(nodegroup.FillND(node))
1939
1940 - def FillNDGroup(self, nodegroup):
1941 """Return filled out ndparams for just L{objects.NodeGroup} 1942 1943 @type nodegroup: L{objects.NodeGroup} 1944 @param nodegroup: A Node object to fill 1945 @return a copy of the node group's ndparams with defaults filled 1946 1947 """ 1948 return self.SimpleFillND(nodegroup.SimpleFillND({}))
1949
1950 - def SimpleFillND(self, ndparams):
1951 """Fill a given ndparams dict with defaults. 1952 1953 @type ndparams: dict 1954 @param ndparams: the dict to fill 1955 @rtype: dict 1956 @return: a copy of the passed in ndparams with missing keys filled 1957 from the cluster defaults 1958 1959 """ 1960 return FillDict(self.ndparams, ndparams)
1961
1962 - def SimpleFillIPolicy(self, ipolicy):
1963 """ Fill instance policy dict with defaults. 1964 1965 @type ipolicy: dict 1966 @param ipolicy: the dict to fill 1967 @rtype: dict 1968 @return: a copy of passed ipolicy with missing keys filled from 1969 the cluster defaults 1970 1971 """ 1972 return FillIPolicy(self.ipolicy, ipolicy)
1973
1974 - def IsDiskTemplateEnabled(self, disk_template):
1975 """Checks if a particular disk template is enabled. 1976 1977 """ 1978 return utils.storage.IsDiskTemplateEnabled( 1979 disk_template, self.enabled_disk_templates)
1980
1981 - def IsFileStorageEnabled(self):
1982 """Checks if file storage is enabled. 1983 1984 """ 1985 return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
1986
1987 - def IsSharedFileStorageEnabled(self):
1988 """Checks if shared file storage is enabled. 1989 1990 """ 1991 return utils.storage.IsSharedFileStorageEnabled( 1992 self.enabled_disk_templates)
1993
1994 1995 -class BlockDevStatus(ConfigObject):
1996 """Config object representing the status of a block device.""" 1997 __slots__ = [ 1998 "dev_path", 1999 "major", 2000 "minor", 2001 "sync_percent", 2002 "estimated_time", 2003 "is_degraded", 2004 "ldisk_status", 2005 ]
2006
2007 2008 -class ImportExportStatus(ConfigObject):
2009 """Config object representing the status of an import or export.""" 2010 __slots__ = [ 2011 "recent_output", 2012 "listen_port", 2013 "connected", 2014 "progress_mbytes", 2015 "progress_throughput", 2016 "progress_eta", 2017 "progress_percent", 2018 "exit_status", 2019 "error_message", 2020 ] + _TIMESTAMPS
2021
2022 2023 -class ImportExportOptions(ConfigObject):
2024 """Options for import/export daemon 2025 2026 @ivar key_name: X509 key name (None for cluster certificate) 2027 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate) 2028 @ivar compress: Compression method (one of L{constants.IEC_ALL}) 2029 @ivar magic: Used to ensure the connection goes to the right disk 2030 @ivar ipv6: Whether to use IPv6 2031 @ivar connect_timeout: Number of seconds for establishing connection 2032 2033 """ 2034 __slots__ = [ 2035 "key_name", 2036 "ca_pem", 2037 "compress", 2038 "magic", 2039 "ipv6", 2040 "connect_timeout", 2041 ]
2042
2043 2044 -class ConfdRequest(ConfigObject):
2045 """Object holding a confd request. 2046 2047 @ivar protocol: confd protocol version 2048 @ivar type: confd query type 2049 @ivar query: query request 2050 @ivar rsalt: requested reply salt 2051 2052 """ 2053 __slots__ = [ 2054 "protocol", 2055 "type", 2056 "query", 2057 "rsalt", 2058 ]
2059
2060 2061 -class ConfdReply(ConfigObject):
2062 """Object holding a confd reply. 2063 2064 @ivar protocol: confd protocol version 2065 @ivar status: reply status code (ok, error) 2066 @ivar answer: confd query reply 2067 @ivar serial: configuration serial number 2068 2069 """ 2070 __slots__ = [ 2071 "protocol", 2072 "status", 2073 "answer", 2074 "serial", 2075 ]
2076
2077 2078 -class QueryFieldDefinition(ConfigObject):
2079 """Object holding a query field definition. 2080 2081 @ivar name: Field name 2082 @ivar title: Human-readable title 2083 @ivar kind: Field type 2084 @ivar doc: Human-readable description 2085 2086 """ 2087 __slots__ = [ 2088 "name", 2089 "title", 2090 "kind", 2091 "doc", 2092 ]
2093
2094 2095 -class _QueryResponseBase(ConfigObject):
2096 __slots__ = [ 2097 "fields", 2098 ] 2099
2100 - def ToDict(self):
2101 """Custom function for serializing. 2102 2103 """ 2104 mydict = super(_QueryResponseBase, self).ToDict() 2105 mydict["fields"] = outils.ContainerToDicts(mydict["fields"]) 2106 return mydict
2107 2108 @classmethod
2109 - def FromDict(cls, val):
2110 """Custom function for de-serializing. 2111 2112 """ 2113 obj = super(_QueryResponseBase, cls).FromDict(val) 2114 obj.fields = \ 2115 outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition) 2116 return obj
2117
2118 2119 -class QueryResponse(_QueryResponseBase):
2120 """Object holding the response to a query. 2121 2122 @ivar fields: List of L{QueryFieldDefinition} objects 2123 @ivar data: Requested data 2124 2125 """ 2126 __slots__ = [ 2127 "data", 2128 ]
2129
2130 2131 -class QueryFieldsRequest(ConfigObject):
2132 """Object holding a request for querying available fields. 2133 2134 """ 2135 __slots__ = [ 2136 "what", 2137 "fields", 2138 ]
2139
2140 2141 -class QueryFieldsResponse(_QueryResponseBase):
2142 """Object holding the response to a query for fields. 2143 2144 @ivar fields: List of L{QueryFieldDefinition} objects 2145 2146 """ 2147 __slots__ = []
2148
2149 2150 -class MigrationStatus(ConfigObject):
2151 """Object holding the status of a migration. 2152 2153 """ 2154 __slots__ = [ 2155 "status", 2156 "transferred_ram", 2157 "total_ram", 2158 ]
2159
2160 2161 -class InstanceConsole(ConfigObject):
2162 """Object describing how to access the console of an instance. 2163 2164 """ 2165 __slots__ = [ 2166 "instance", 2167 "kind", 2168 "message", 2169 "host", 2170 "port", 2171 "user", 2172 "command", 2173 "display", 2174 ] 2175
2176 - def Validate(self):
2177 """Validates contents of this object. 2178 2179 """ 2180 assert self.kind in constants.CONS_ALL, "Unknown console type" 2181 assert self.instance, "Missing instance name" 2182 assert self.message or self.kind in [constants.CONS_SSH, 2183 constants.CONS_SPICE, 2184 constants.CONS_VNC] 2185 assert self.host or self.kind == constants.CONS_MESSAGE 2186 assert self.port or self.kind in [constants.CONS_MESSAGE, 2187 constants.CONS_SSH] 2188 assert self.user or self.kind in [constants.CONS_MESSAGE, 2189 constants.CONS_SPICE, 2190 constants.CONS_VNC] 2191 assert self.command or self.kind in [constants.CONS_MESSAGE, 2192 constants.CONS_SPICE, 2193 constants.CONS_VNC] 2194 assert self.display or self.kind in [constants.CONS_MESSAGE, 2195 constants.CONS_SPICE, 2196 constants.CONS_SSH]
2197
2198 2199 -class Network(TaggableObject):
2200 """Object representing a network definition for ganeti. 2201 2202 """ 2203 __slots__ = [ 2204 "name", 2205 "serial_no", 2206 "mac_prefix", 2207 "network", 2208 "network6", 2209 "gateway", 2210 "gateway6", 2211 "reservations", 2212 "ext_reservations", 2213 ] + _TIMESTAMPS + _UUID 2214
2215 - def HooksDict(self, prefix=""):
2216 """Export a dictionary used by hooks with a network's information. 2217 2218 @type prefix: String 2219 @param prefix: Prefix to prepend to the dict entries 2220 2221 """ 2222 result = { 2223 "%sNETWORK_NAME" % prefix: self.name, 2224 "%sNETWORK_UUID" % prefix: self.uuid, 2225 "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()), 2226 } 2227 if self.network: 2228 result["%sNETWORK_SUBNET" % prefix] = self.network 2229 if self.gateway: 2230 result["%sNETWORK_GATEWAY" % prefix] = self.gateway 2231 if self.network6: 2232 result["%sNETWORK_SUBNET6" % prefix] = self.network6 2233 if self.gateway6: 2234 result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6 2235 if self.mac_prefix: 2236 result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix 2237 2238 return result
2239 2240 @classmethod
2241 - def FromDict(cls, val):
2242 """Custom function for networks. 2243 2244 Remove deprecated network_type and family. 2245 2246 """ 2247 if "network_type" in val: 2248 del val["network_type"] 2249 if "family" in val: 2250 del val["family"] 2251 obj = super(Network, cls).FromDict(val) 2252 return obj
2253
2254 2255 # need to inherit object in order to use super() 2256 -class SerializableConfigParser(ConfigParser.SafeConfigParser, object):
2257 """Simple wrapper over ConfigParse that allows serialization. 2258 2259 This class is basically ConfigParser.SafeConfigParser with two 2260 additional methods that allow it to serialize/unserialize to/from a 2261 buffer. 2262 2263 """
2264 - def Dumps(self):
2265 """Dump this instance and return the string representation.""" 2266 buf = StringIO() 2267 self.write(buf) 2268 return buf.getvalue()
2269 2270 @classmethod
2271 - def Loads(cls, data):
2272 """Load data from a string.""" 2273 buf = StringIO(data) 2274 cfp = cls() 2275 cfp.readfp(buf) 2276 return cfp
2277
2278 - def get(self, section, option, **kwargs):
2279 value = None 2280 try: 2281 value = super(SerializableConfigParser, self).get(section, option, 2282 **kwargs) 2283 if value.lower() == constants.VALUE_NONE: 2284 value = None 2285 except ConfigParser.NoOptionError: 2286 r = re.compile(r"(disk|nic)\d+_name|nic\d+_(network|vlan)") 2287 match = r.match(option) 2288 if match: 2289 pass 2290 else: 2291 raise 2292 2293 return value
2294
2295 2296 -class LvmPvInfo(ConfigObject):
2297 """Information about an LVM physical volume (PV). 2298 2299 @type name: string 2300 @ivar name: name of the PV 2301 @type vg_name: string 2302 @ivar vg_name: name of the volume group containing the PV 2303 @type size: float 2304 @ivar size: size of the PV in MiB 2305 @type free: float 2306 @ivar free: free space in the PV, in MiB 2307 @type attributes: string 2308 @ivar attributes: PV attributes 2309 @type lv_list: list of strings 2310 @ivar lv_list: names of the LVs hosted on the PV 2311 """ 2312 __slots__ = [ 2313 "name", 2314 "vg_name", 2315 "size", 2316 "free", 2317 "attributes", 2318 "lv_list" 2319 ] 2320
2321 - def IsEmpty(self):
2322 """Is this PV empty? 2323 2324 """ 2325 return self.size <= (self.free + 1)
2326
2327 - def IsAllocatable(self):
2328 """Is this PV allocatable? 2329 2330 """ 2331 return ("a" in self.attributes)
2332