Package ganeti :: Module objects
[hide private]
[frames] | no frames]

Source Code for Module ganeti.objects

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Transportable objects for Ganeti. 
  32   
  33  This module provides small, mostly data-only objects which are safe to 
  34  pass to and from external parties. 
  35   
  36  """ 
  37   
  38  # pylint: disable=E0203,W0201,R0902 
  39   
  40  # E0203: Access to member %r before its definition, since we use 
  41  # objects.py which doesn't explicitly initialise its members 
  42   
  43  # W0201: Attribute '%s' defined outside __init__ 
  44   
  45  # R0902: Allow instances of these objects to have more than 20 attributes 
  46   
  47  import ConfigParser 
  48  import re 
  49  import copy 
  50  import logging 
  51  import time 
  52  from cStringIO import StringIO 
  53   
  54  from ganeti import errors 
  55  from ganeti import constants 
  56  from ganeti import netutils 
  57  from ganeti import outils 
  58  from ganeti import utils 
  59   
  60  from socket import AF_INET 
  61   
  62   
  63  __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance", 
  64             "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"] 
  65   
  66  _TIMESTAMPS = ["ctime", "mtime"] 
  67  _UUID = ["uuid"] 
68 69 70 -def FillDict(defaults_dict, custom_dict, skip_keys=None):
71 """Basic function to apply settings on top a default dict. 72 73 @type defaults_dict: dict 74 @param defaults_dict: dictionary holding the default values 75 @type custom_dict: dict 76 @param custom_dict: dictionary holding customized value 77 @type skip_keys: list 78 @param skip_keys: which keys not to fill 79 @rtype: dict 80 @return: dict with the 'full' values 81 82 """ 83 ret_dict = copy.deepcopy(defaults_dict) 84 ret_dict.update(custom_dict) 85 if skip_keys: 86 for k in skip_keys: 87 try: 88 del ret_dict[k] 89 except KeyError: 90 pass 91 return ret_dict
92
93 94 -def FillIPolicy(default_ipolicy, custom_ipolicy):
95 """Fills an instance policy with defaults. 96 97 """ 98 assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS 99 ret_dict = copy.deepcopy(custom_ipolicy) 100 for key in default_ipolicy: 101 if key not in ret_dict: 102 ret_dict[key] = copy.deepcopy(default_ipolicy[key]) 103 elif key == constants.ISPECS_STD: 104 ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key]) 105 return ret_dict
106
107 108 -def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
109 """Fills the disk parameter defaults. 110 111 @see: L{FillDict} for parameters and return value 112 113 """ 114 assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES 115 116 return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}), 117 skip_keys=skip_keys)) 118 for dt in constants.DISK_TEMPLATES)
119
120 121 -def UpgradeGroupedParams(target, defaults):
122 """Update all groups for the target parameter. 123 124 @type target: dict of dicts 125 @param target: {group: {parameter: value}} 126 @type defaults: dict 127 @param defaults: default parameter values 128 129 """ 130 if target is None: 131 target = {constants.PP_DEFAULT: defaults} 132 else: 133 for group in target: 134 target[group] = FillDict(defaults, target[group]) 135 return target
136
137 138 -def UpgradeBeParams(target):
139 """Update the be parameters dict to the new format. 140 141 @type target: dict 142 @param target: "be" parameters dict 143 144 """ 145 if constants.BE_MEMORY in target: 146 memory = target[constants.BE_MEMORY] 147 target[constants.BE_MAXMEM] = memory 148 target[constants.BE_MINMEM] = memory 149 del target[constants.BE_MEMORY]
150
151 152 -def UpgradeDiskParams(diskparams):
153 """Upgrade the disk parameters. 154 155 @type diskparams: dict 156 @param diskparams: disk parameters to upgrade 157 @rtype: dict 158 @return: the upgraded disk parameters dict 159 160 """ 161 if not diskparams: 162 result = {} 163 else: 164 result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams) 165 166 return result
167
168 169 -def UpgradeNDParams(ndparams):
170 """Upgrade ndparams structure. 171 172 @type ndparams: dict 173 @param ndparams: disk parameters to upgrade 174 @rtype: dict 175 @return: the upgraded node parameters dict 176 177 """ 178 if ndparams is None: 179 ndparams = {} 180 181 if (constants.ND_OOB_PROGRAM in ndparams and 182 ndparams[constants.ND_OOB_PROGRAM] is None): 183 # will be reset by the line below 184 del ndparams[constants.ND_OOB_PROGRAM] 185 return FillDict(constants.NDC_DEFAULTS, ndparams)
186
187 188 -def MakeEmptyIPolicy():
189 """Create empty IPolicy dictionary. 190 191 """ 192 return {}
193
194 195 -class ConfigObject(outils.ValidatedSlots):
196 """A generic config object. 197 198 It has the following properties: 199 200 - provides somewhat safe recursive unpickling and pickling for its classes 201 - unset attributes which are defined in slots are always returned 202 as None instead of raising an error 203 204 Classes derived from this must always declare __slots__ (we use many 205 config objects and the memory reduction is useful) 206 207 """ 208 __slots__ = [] 209
210 - def __getattr__(self, name):
211 if name not in self.GetAllSlots(): 212 raise AttributeError("Invalid object attribute %s.%s" % 213 (type(self).__name__, name)) 214 return None
215
216 - def __setstate__(self, state):
217 slots = self.GetAllSlots() 218 for name in state: 219 if name in slots: 220 setattr(self, name, state[name])
221
222 - def Validate(self):
223 """Validates the slots. 224 225 """
226
227 - def ToDict(self):
228 """Convert to a dict holding only standard python types. 229 230 The generic routine just dumps all of this object's attributes in 231 a dict. It does not work if the class has children who are 232 ConfigObjects themselves (e.g. the nics list in an Instance), in 233 which case the object should subclass the function in order to 234 make sure all objects returned are only standard python types. 235 236 """ 237 result = {} 238 for name in self.GetAllSlots(): 239 value = getattr(self, name, None) 240 if value is not None: 241 result[name] = value 242 return result
243 244 __getstate__ = ToDict 245 246 @classmethod
247 - def FromDict(cls, val):
248 """Create an object from a dictionary. 249 250 This generic routine takes a dict, instantiates a new instance of 251 the given class, and sets attributes based on the dict content. 252 253 As for `ToDict`, this does not work if the class has children 254 who are ConfigObjects themselves (e.g. the nics list in an 255 Instance), in which case the object should subclass the function 256 and alter the objects. 257 258 """ 259 if not isinstance(val, dict): 260 raise errors.ConfigurationError("Invalid object passed to FromDict:" 261 " expected dict, got %s" % type(val)) 262 val_str = dict([(str(k), v) for k, v in val.iteritems()]) 263 obj = cls(**val_str) # pylint: disable=W0142 264 return obj
265
266 - def Copy(self):
267 """Makes a deep copy of the current object and its children. 268 269 """ 270 dict_form = self.ToDict() 271 clone_obj = self.__class__.FromDict(dict_form) 272 return clone_obj
273
274 - def __repr__(self):
275 """Implement __repr__ for ConfigObjects.""" 276 return repr(self.ToDict())
277
278 - def __eq__(self, other):
279 """Implement __eq__ for ConfigObjects.""" 280 return isinstance(other, self.__class__) and self.ToDict() == other.ToDict()
281
282 - def UpgradeConfig(self):
283 """Fill defaults for missing configuration values. 284 285 This method will be called at configuration load time, and its 286 implementation will be object dependent. 287 288 """ 289 pass
290
291 292 -class TaggableObject(ConfigObject):
293 """An generic class supporting tags. 294 295 """ 296 __slots__ = ["tags"] 297 VALID_TAG_RE = re.compile(r"^[\w.+*/:@-]+$") 298 299 @classmethod
300 - def ValidateTag(cls, tag):
301 """Check if a tag is valid. 302 303 If the tag is invalid, an errors.TagError will be raised. The 304 function has no return value. 305 306 """ 307 if not isinstance(tag, basestring): 308 raise errors.TagError("Invalid tag type (not a string)") 309 if len(tag) > constants.MAX_TAG_LEN: 310 raise errors.TagError("Tag too long (>%d characters)" % 311 constants.MAX_TAG_LEN) 312 if not tag: 313 raise errors.TagError("Tags cannot be empty") 314 if not cls.VALID_TAG_RE.match(tag): 315 raise errors.TagError("Tag contains invalid characters")
316
317 - def GetTags(self):
318 """Return the tags list. 319 320 """ 321 tags = getattr(self, "tags", None) 322 if tags is None: 323 tags = self.tags = set() 324 return tags
325
326 - def AddTag(self, tag):
327 """Add a new tag. 328 329 """ 330 self.ValidateTag(tag) 331 tags = self.GetTags() 332 if len(tags) >= constants.MAX_TAGS_PER_OBJ: 333 raise errors.TagError("Too many tags") 334 self.GetTags().add(tag)
335
336 - def RemoveTag(self, tag):
337 """Remove a tag. 338 339 """ 340 self.ValidateTag(tag) 341 tags = self.GetTags() 342 try: 343 tags.remove(tag) 344 except KeyError: 345 raise errors.TagError("Tag not found")
346
347 - def ToDict(self):
348 """Taggable-object-specific conversion to standard python types. 349 350 This replaces the tags set with a list. 351 352 """ 353 bo = super(TaggableObject, self).ToDict() 354 355 tags = bo.get("tags", None) 356 if isinstance(tags, set): 357 bo["tags"] = list(tags) 358 return bo
359 360 @classmethod
361 - def FromDict(cls, val):
362 """Custom function for instances. 363 364 """ 365 obj = super(TaggableObject, cls).FromDict(val) 366 if hasattr(obj, "tags") and isinstance(obj.tags, list): 367 obj.tags = set(obj.tags) 368 return obj
369
370 371 -class MasterNetworkParameters(ConfigObject):
372 """Network configuration parameters for the master 373 374 @ivar uuid: master nodes UUID 375 @ivar ip: master IP 376 @ivar netmask: master netmask 377 @ivar netdev: master network device 378 @ivar ip_family: master IP family 379 380 """ 381 __slots__ = [ 382 "uuid", 383 "ip", 384 "netmask", 385 "netdev", 386 "ip_family", 387 ]
388
389 390 -class ConfigData(ConfigObject):
391 """Top-level config object.""" 392 __slots__ = [ 393 "version", 394 "cluster", 395 "nodes", 396 "nodegroups", 397 "instances", 398 "networks", 399 "serial_no", 400 ] + _TIMESTAMPS 401
402 - def ToDict(self):
403 """Custom function for top-level config data. 404 405 This just replaces the list of instances, nodes and the cluster 406 with standard python types. 407 408 """ 409 mydict = super(ConfigData, self).ToDict() 410 mydict["cluster"] = mydict["cluster"].ToDict() 411 for key in "nodes", "instances", "nodegroups", "networks": 412 mydict[key] = outils.ContainerToDicts(mydict[key]) 413 414 return mydict
415 416 @classmethod
417 - def FromDict(cls, val):
418 """Custom function for top-level config data 419 420 """ 421 obj = super(ConfigData, cls).FromDict(val) 422 obj.cluster = Cluster.FromDict(obj.cluster) 423 obj.nodes = outils.ContainerFromDicts(obj.nodes, dict, Node) 424 obj.instances = \ 425 outils.ContainerFromDicts(obj.instances, dict, Instance) 426 obj.nodegroups = \ 427 outils.ContainerFromDicts(obj.nodegroups, dict, NodeGroup) 428 obj.networks = outils.ContainerFromDicts(obj.networks, dict, Network) 429 return obj
430
431 - def HasAnyDiskOfType(self, dev_type):
432 """Check if in there is at disk of the given type in the configuration. 433 434 @type dev_type: L{constants.DTS_BLOCK} 435 @param dev_type: the type to look for 436 @rtype: boolean 437 @return: boolean indicating if a disk of the given type was found or not 438 439 """ 440 for instance in self.instances.values(): 441 for disk in instance.disks: 442 if disk.IsBasedOnDiskType(dev_type): 443 return True 444 return False
445
446 - def UpgradeConfig(self):
447 """Fill defaults for missing configuration values. 448 449 """ 450 self.cluster.UpgradeConfig() 451 for node in self.nodes.values(): 452 node.UpgradeConfig() 453 for instance in self.instances.values(): 454 instance.UpgradeConfig() 455 self._UpgradeEnabledDiskTemplates() 456 if self.nodegroups is None: 457 self.nodegroups = {} 458 for nodegroup in self.nodegroups.values(): 459 nodegroup.UpgradeConfig() 460 InstancePolicy.UpgradeDiskTemplates( 461 nodegroup.ipolicy, self.cluster.enabled_disk_templates) 462 if self.cluster.drbd_usermode_helper is None: 463 if self.cluster.IsDiskTemplateEnabled(constants.DT_DRBD8): 464 self.cluster.drbd_usermode_helper = constants.DEFAULT_DRBD_HELPER 465 if self.networks is None: 466 self.networks = {} 467 for network in self.networks.values(): 468 network.UpgradeConfig()
469
471 """Upgrade the cluster's enabled disk templates by inspecting the currently 472 enabled and/or used disk templates. 473 474 """ 475 if not self.cluster.enabled_disk_templates: 476 template_set = \ 477 set([inst.disk_template for inst in self.instances.values()]) 478 # Add drbd and plain, if lvm is enabled (by specifying a volume group) 479 if self.cluster.volume_group_name: 480 template_set.add(constants.DT_DRBD8) 481 template_set.add(constants.DT_PLAIN) 482 # Set enabled_disk_templates to the inferred disk templates. Order them 483 # according to a preference list that is based on Ganeti's history of 484 # supported disk templates. 485 self.cluster.enabled_disk_templates = [] 486 for preferred_template in constants.DISK_TEMPLATE_PREFERENCE: 487 if preferred_template in template_set: 488 self.cluster.enabled_disk_templates.append(preferred_template) 489 template_set.remove(preferred_template) 490 self.cluster.enabled_disk_templates.extend(list(template_set)) 491 InstancePolicy.UpgradeDiskTemplates( 492 self.cluster.ipolicy, self.cluster.enabled_disk_templates)
493
494 495 -class NIC(ConfigObject):
496 """Config object representing a network card.""" 497 __slots__ = ["name", "mac", "ip", "network", 498 "nicparams", "netinfo", "pci"] + _UUID 499 500 @classmethod
501 - def CheckParameterSyntax(cls, nicparams):
502 """Check the given parameters for validity. 503 504 @type nicparams: dict 505 @param nicparams: dictionary with parameter names/value 506 @raise errors.ConfigurationError: when a parameter is not valid 507 508 """ 509 mode = nicparams[constants.NIC_MODE] 510 if (mode not in constants.NIC_VALID_MODES and 511 mode != constants.VALUE_AUTO): 512 raise errors.ConfigurationError("Invalid NIC mode '%s'" % mode) 513 514 if (mode == constants.NIC_MODE_BRIDGED and 515 not nicparams[constants.NIC_LINK]): 516 raise errors.ConfigurationError("Missing bridged NIC link")
517
518 519 -class Disk(ConfigObject):
520 """Config object representing a block device.""" 521 __slots__ = (["name", "dev_type", "logical_id", "children", "iv_name", 522 "size", "mode", "params", "spindles", "pci"] + _UUID + 523 # dynamic_params is special. It depends on the node this instance 524 # is sent to, and should not be persisted. 525 ["dynamic_params"]) 526
527 - def CreateOnSecondary(self):
528 """Test if this device needs to be created on a secondary node.""" 529 return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
530
531 - def AssembleOnSecondary(self):
532 """Test if this device needs to be assembled on a secondary node.""" 533 return self.dev_type in (constants.DT_DRBD8, constants.DT_PLAIN)
534
535 - def OpenOnSecondary(self):
536 """Test if this device needs to be opened on a secondary node.""" 537 return self.dev_type in (constants.DT_PLAIN,)
538
539 - def StaticDevPath(self):
540 """Return the device path if this device type has a static one. 541 542 Some devices (LVM for example) live always at the same /dev/ path, 543 irrespective of their status. For such devices, we return this 544 path, for others we return None. 545 546 @warning: The path returned is not a normalized pathname; callers 547 should check that it is a valid path. 548 549 """ 550 if self.dev_type == constants.DT_PLAIN: 551 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1]) 552 elif self.dev_type == constants.DT_BLOCK: 553 return self.logical_id[1] 554 elif self.dev_type == constants.DT_RBD: 555 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1]) 556 return None
557
558 - def ChildrenNeeded(self):
559 """Compute the needed number of children for activation. 560 561 This method will return either -1 (all children) or a positive 562 number denoting the minimum number of children needed for 563 activation (only mirrored devices will usually return >=0). 564 565 Currently, only DRBD8 supports diskless activation (therefore we 566 return 0), for all other we keep the previous semantics and return 567 -1. 568 569 """ 570 if self.dev_type == constants.DT_DRBD8: 571 return 0 572 return -1
573
574 - def IsBasedOnDiskType(self, dev_type):
575 """Check if the disk or its children are based on the given type. 576 577 @type dev_type: L{constants.DTS_BLOCK} 578 @param dev_type: the type to look for 579 @rtype: boolean 580 @return: boolean indicating if a device of the given type was found or not 581 582 """ 583 if self.children: 584 for child in self.children: 585 if child.IsBasedOnDiskType(dev_type): 586 return True 587 return self.dev_type == dev_type
588
589 - def GetNodes(self, node_uuid):
590 """This function returns the nodes this device lives on. 591 592 Given the node on which the parent of the device lives on (or, in 593 case of a top-level device, the primary node of the devices' 594 instance), this function will return a list of nodes on which this 595 devices needs to (or can) be assembled. 596 597 """ 598 if self.dev_type in [constants.DT_PLAIN, constants.DT_FILE, 599 constants.DT_BLOCK, constants.DT_RBD, 600 constants.DT_EXT, constants.DT_SHARED_FILE]: 601 result = [node_uuid] 602 elif self.dev_type in constants.DTS_DRBD: 603 result = [self.logical_id[0], self.logical_id[1]] 604 if node_uuid not in result: 605 raise errors.ConfigurationError("DRBD device passed unknown node") 606 else: 607 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type) 608 return result
609
610 - def ComputeNodeTree(self, parent_node_uuid):
611 """Compute the node/disk tree for this disk and its children. 612 613 This method, given the node on which the parent disk lives, will 614 return the list of all (node UUID, disk) pairs which describe the disk 615 tree in the most compact way. For example, a drbd/lvm stack 616 will be returned as (primary_node, drbd) and (secondary_node, drbd) 617 which represents all the top-level devices on the nodes. 618 619 """ 620 my_nodes = self.GetNodes(parent_node_uuid) 621 result = [(node, self) for node in my_nodes] 622 if not self.children: 623 # leaf device 624 return result 625 for node in my_nodes: 626 for child in self.children: 627 child_result = child.ComputeNodeTree(node) 628 if len(child_result) == 1: 629 # child (and all its descendants) is simple, doesn't split 630 # over multiple hosts, so we don't need to describe it, our 631 # own entry for this node describes it completely 632 continue 633 else: 634 # check if child nodes differ from my nodes; note that 635 # subdisk can differ from the child itself, and be instead 636 # one of its descendants 637 for subnode, subdisk in child_result: 638 if subnode not in my_nodes: 639 result.append((subnode, subdisk)) 640 # otherwise child is under our own node, so we ignore this 641 # entry (but probably the other results in the list will 642 # be different) 643 return result
644
645 - def ComputeGrowth(self, amount):
646 """Compute the per-VG growth requirements. 647 648 This only works for VG-based disks. 649 650 @type amount: integer 651 @param amount: the desired increase in (user-visible) disk space 652 @rtype: dict 653 @return: a dictionary of volume-groups and the required size 654 655 """ 656 if self.dev_type == constants.DT_PLAIN: 657 return {self.logical_id[0]: amount} 658 elif self.dev_type == constants.DT_DRBD8: 659 if self.children: 660 return self.children[0].ComputeGrowth(amount) 661 else: 662 return {} 663 else: 664 # Other disk types do not require VG space 665 return {}
666
667 - def RecordGrow(self, amount):
668 """Update the size of this disk after growth. 669 670 This method recurses over the disks's children and updates their 671 size correspondigly. The method needs to be kept in sync with the 672 actual algorithms from bdev. 673 674 """ 675 if self.dev_type in (constants.DT_PLAIN, constants.DT_FILE, 676 constants.DT_RBD, constants.DT_EXT, 677 constants.DT_SHARED_FILE): 678 self.size += amount 679 elif self.dev_type == constants.DT_DRBD8: 680 if self.children: 681 self.children[0].RecordGrow(amount) 682 self.size += amount 683 else: 684 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported" 685 " disk type %s" % self.dev_type)
686
687 - def Update(self, size=None, mode=None, spindles=None):
688 """Apply changes to size, spindles and mode. 689 690 """ 691 if self.dev_type == constants.DT_DRBD8: 692 if self.children: 693 self.children[0].Update(size=size, mode=mode) 694 else: 695 assert not self.children 696 697 if size is not None: 698 self.size = size 699 if mode is not None: 700 self.mode = mode 701 if spindles is not None: 702 self.spindles = spindles
703
704 - def UnsetSize(self):
705 """Sets recursively the size to zero for the disk and its children. 706 707 """ 708 if self.children: 709 for child in self.children: 710 child.UnsetSize() 711 self.size = 0
712
713 - def UpdateDynamicDiskParams(self, target_node_uuid, nodes_ip):
714 """Updates the dynamic disk params for the given node. 715 716 This is mainly used for drbd, which needs ip/port configuration. 717 718 Arguments: 719 - target_node_uuid: the node UUID we wish to configure for 720 - nodes_ip: a mapping of node name to ip 721 722 The target_node must exist in nodes_ip, and should be one of the 723 nodes in the logical ID if this device is a DRBD device. 724 725 """ 726 if self.children: 727 for child in self.children: 728 child.UpdateDynamicDiskParams(target_node_uuid, nodes_ip) 729 730 dyn_disk_params = {} 731 if self.logical_id is not None and self.dev_type in constants.DTS_DRBD: 732 pnode_uuid, snode_uuid, _, pminor, sminor, _ = self.logical_id 733 if target_node_uuid not in (pnode_uuid, snode_uuid): 734 # disk object is being sent to neither the primary nor the secondary 735 # node. reset the dynamic parameters, the target node is not 736 # supposed to use them. 737 self.dynamic_params = dyn_disk_params 738 return 739 740 pnode_ip = nodes_ip.get(pnode_uuid, None) 741 snode_ip = nodes_ip.get(snode_uuid, None) 742 if pnode_ip is None or snode_ip is None: 743 raise errors.ConfigurationError("Can't find primary or secondary node" 744 " for %s" % str(self)) 745 if pnode_uuid == target_node_uuid: 746 dyn_disk_params[constants.DDP_LOCAL_IP] = pnode_ip 747 dyn_disk_params[constants.DDP_REMOTE_IP] = snode_ip 748 dyn_disk_params[constants.DDP_LOCAL_MINOR] = pminor 749 dyn_disk_params[constants.DDP_REMOTE_MINOR] = sminor 750 else: # it must be secondary, we tested above 751 dyn_disk_params[constants.DDP_LOCAL_IP] = snode_ip 752 dyn_disk_params[constants.DDP_REMOTE_IP] = pnode_ip 753 dyn_disk_params[constants.DDP_LOCAL_MINOR] = sminor 754 dyn_disk_params[constants.DDP_REMOTE_MINOR] = pminor 755 756 self.dynamic_params = dyn_disk_params
757 758 # pylint: disable=W0221
759 - def ToDict(self, include_dynamic_params=False):
760 """Disk-specific conversion to standard python types. 761 762 This replaces the children lists of objects with lists of 763 standard python types. 764 765 """ 766 bo = super(Disk, self).ToDict() 767 if not include_dynamic_params and "dynamic_params" in bo: 768 del bo["dynamic_params"] 769 770 for attr in ("children",): 771 alist = bo.get(attr, None) 772 if alist: 773 bo[attr] = outils.ContainerToDicts(alist) 774 return bo
775 776 @classmethod
777 - def FromDict(cls, val):
778 """Custom function for Disks 779 780 """ 781 obj = super(Disk, cls).FromDict(val) 782 if obj.children: 783 obj.children = outils.ContainerFromDicts(obj.children, list, Disk) 784 if obj.logical_id and isinstance(obj.logical_id, list): 785 obj.logical_id = tuple(obj.logical_id) 786 if obj.dev_type in constants.DTS_DRBD: 787 # we need a tuple of length six here 788 if len(obj.logical_id) < 6: 789 obj.logical_id += (None,) * (6 - len(obj.logical_id)) 790 return obj
791
792 - def __str__(self):
793 """Custom str() formatter for disks. 794 795 """ 796 if self.dev_type == constants.DT_PLAIN: 797 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id 798 elif self.dev_type in constants.DTS_DRBD: 799 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5] 800 val = "<DRBD8(" 801 802 val += ("hosts=%s/%d-%s/%d, port=%s, " % 803 (node_a, minor_a, node_b, minor_b, port)) 804 if self.children and self.children.count(None) == 0: 805 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1]) 806 else: 807 val += "no local storage" 808 else: 809 val = ("<Disk(type=%s, logical_id=%s, children=%s" % 810 (self.dev_type, self.logical_id, self.children)) 811 if self.iv_name is None: 812 val += ", not visible" 813 else: 814 val += ", visible as /dev/%s" % self.iv_name 815 if self.spindles is not None: 816 val += ", spindles=%s" % self.spindles 817 if isinstance(self.size, int): 818 val += ", size=%dm)>" % self.size 819 else: 820 val += ", size='%s')>" % (self.size,) 821 return val
822
823 - def Verify(self):
824 """Checks that this disk is correctly configured. 825 826 """ 827 all_errors = [] 828 if self.mode not in constants.DISK_ACCESS_SET: 829 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, )) 830 return all_errors
831
832 - def UpgradeConfig(self):
833 """Fill defaults for missing configuration values. 834 835 """ 836 if self.children: 837 for child in self.children: 838 child.UpgradeConfig() 839 840 # FIXME: Make this configurable in Ganeti 2.7 841 # Params should be an empty dict that gets filled any time needed 842 # In case of ext template we allow arbitrary params that should not 843 # be overrided during a config reload/upgrade. 844 if not self.params or not isinstance(self.params, dict): 845 self.params = {} 846 847 # add here config upgrade for this disk 848 849 # map of legacy device types (mapping differing LD constants to new 850 # DT constants) 851 LEG_DEV_TYPE_MAP = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8} 852 if self.dev_type in LEG_DEV_TYPE_MAP: 853 self.dev_type = LEG_DEV_TYPE_MAP[self.dev_type]
854 855 @staticmethod
856 - def ComputeLDParams(disk_template, disk_params):
857 """Computes Logical Disk parameters from Disk Template parameters. 858 859 @type disk_template: string 860 @param disk_template: disk template, one of L{constants.DISK_TEMPLATES} 861 @type disk_params: dict 862 @param disk_params: disk template parameters; 863 dict(template_name -> parameters 864 @rtype: list(dict) 865 @return: a list of dicts, one for each node of the disk hierarchy. Each dict 866 contains the LD parameters of the node. The tree is flattened in-order. 867 868 """ 869 if disk_template not in constants.DISK_TEMPLATES: 870 raise errors.ProgrammerError("Unknown disk template %s" % disk_template) 871 872 assert disk_template in disk_params 873 874 result = list() 875 dt_params = disk_params[disk_template] 876 if disk_template == constants.DT_DRBD8: 877 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_DRBD8], { 878 constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE], 879 constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS], 880 constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS], 881 constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG], 882 constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM], 883 constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM], 884 constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL], 885 constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC], 886 constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD], 887 constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET], 888 constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET], 889 constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE], 890 constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE], 891 })) 892 893 # data LV 894 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], { 895 constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES], 896 })) 897 898 # metadata LV 899 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], { 900 constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES], 901 })) 902 903 elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE): 904 result.append(constants.DISK_LD_DEFAULTS[disk_template]) 905 906 elif disk_template == constants.DT_PLAIN: 907 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], { 908 constants.LDP_STRIPES: dt_params[constants.LV_STRIPES], 909 })) 910 911 elif disk_template == constants.DT_BLOCK: 912 result.append(constants.DISK_LD_DEFAULTS[constants.DT_BLOCK]) 913 914 elif disk_template == constants.DT_RBD: 915 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_RBD], { 916 constants.LDP_POOL: dt_params[constants.RBD_POOL], 917 constants.LDP_ACCESS: dt_params[constants.RBD_ACCESS], 918 })) 919 920 elif disk_template == constants.DT_EXT: 921 result.append(constants.DISK_LD_DEFAULTS[constants.DT_EXT]) 922 923 return result
924
925 926 -class InstancePolicy(ConfigObject):
927 """Config object representing instance policy limits dictionary. 928 929 Note that this object is not actually used in the config, it's just 930 used as a placeholder for a few functions. 931 932 """ 933 @classmethod
934 - def UpgradeDiskTemplates(cls, ipolicy, enabled_disk_templates):
935 """Upgrades the ipolicy configuration.""" 936 if constants.IPOLICY_DTS in ipolicy: 937 if not set(ipolicy[constants.IPOLICY_DTS]).issubset( 938 set(enabled_disk_templates)): 939 ipolicy[constants.IPOLICY_DTS] = list( 940 set(ipolicy[constants.IPOLICY_DTS]) & set(enabled_disk_templates))
941 942 @classmethod
943 - def CheckParameterSyntax(cls, ipolicy, check_std):
944 """ Check the instance policy for validity. 945 946 @type ipolicy: dict 947 @param ipolicy: dictionary with min/max/std specs and policies 948 @type check_std: bool 949 @param check_std: Whether to check std value or just assume compliance 950 @raise errors.ConfigurationError: when the policy is not legal 951 952 """ 953 InstancePolicy.CheckISpecSyntax(ipolicy, check_std) 954 if constants.IPOLICY_DTS in ipolicy: 955 InstancePolicy.CheckDiskTemplates(ipolicy[constants.IPOLICY_DTS]) 956 for key in constants.IPOLICY_PARAMETERS: 957 if key in ipolicy: 958 InstancePolicy.CheckParameter(key, ipolicy[key]) 959 wrong_keys = frozenset(ipolicy.keys()) - constants.IPOLICY_ALL_KEYS 960 if wrong_keys: 961 raise errors.ConfigurationError("Invalid keys in ipolicy: %s" % 962 utils.CommaJoin(wrong_keys))
963 964 @classmethod
965 - def _CheckIncompleteSpec(cls, spec, keyname):
966 missing_params = constants.ISPECS_PARAMETERS - frozenset(spec.keys()) 967 if missing_params: 968 msg = ("Missing instance specs parameters for %s: %s" % 969 (keyname, utils.CommaJoin(missing_params))) 970 raise errors.ConfigurationError(msg)
971 972 @classmethod
973 - def CheckISpecSyntax(cls, ipolicy, check_std):
974 """Check the instance policy specs for validity. 975 976 @type ipolicy: dict 977 @param ipolicy: dictionary with min/max/std specs 978 @type check_std: bool 979 @param check_std: Whether to check std value or just assume compliance 980 @raise errors.ConfigurationError: when specs are not valid 981 982 """ 983 if constants.ISPECS_MINMAX not in ipolicy: 984 # Nothing to check 985 return 986 987 if check_std and constants.ISPECS_STD not in ipolicy: 988 msg = "Missing key in ipolicy: %s" % constants.ISPECS_STD 989 raise errors.ConfigurationError(msg) 990 stdspec = ipolicy.get(constants.ISPECS_STD) 991 if check_std: 992 InstancePolicy._CheckIncompleteSpec(stdspec, constants.ISPECS_STD) 993 994 if not ipolicy[constants.ISPECS_MINMAX]: 995 raise errors.ConfigurationError("Empty minmax specifications") 996 std_is_good = False 997 for minmaxspecs in ipolicy[constants.ISPECS_MINMAX]: 998 missing = constants.ISPECS_MINMAX_KEYS - frozenset(minmaxspecs.keys()) 999 if missing: 1000 msg = "Missing instance specification: %s" % utils.CommaJoin(missing) 1001 raise errors.ConfigurationError(msg) 1002 for (key, spec) in minmaxspecs.items(): 1003 InstancePolicy._CheckIncompleteSpec(spec, key) 1004 1005 spec_std_ok = True 1006 for param in constants.ISPECS_PARAMETERS: 1007 par_std_ok = InstancePolicy._CheckISpecParamSyntax(minmaxspecs, stdspec, 1008 param, check_std) 1009 spec_std_ok = spec_std_ok and par_std_ok 1010 std_is_good = std_is_good or spec_std_ok 1011 if not std_is_good: 1012 raise errors.ConfigurationError("Invalid std specifications")
1013 1014 @classmethod
1015 - def _CheckISpecParamSyntax(cls, minmaxspecs, stdspec, name, check_std):
1016 """Check the instance policy specs for validity on a given key. 1017 1018 We check if the instance specs makes sense for a given key, that is 1019 if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name]. 1020 1021 @type minmaxspecs: dict 1022 @param minmaxspecs: dictionary with min and max instance spec 1023 @type stdspec: dict 1024 @param stdspec: dictionary with standard instance spec 1025 @type name: string 1026 @param name: what are the limits for 1027 @type check_std: bool 1028 @param check_std: Whether to check std value or just assume compliance 1029 @rtype: bool 1030 @return: C{True} when specs are valid, C{False} when standard spec for the 1031 given name is not valid 1032 @raise errors.ConfigurationError: when min/max specs for the given name 1033 are not valid 1034 1035 """ 1036 minspec = minmaxspecs[constants.ISPECS_MIN] 1037 maxspec = minmaxspecs[constants.ISPECS_MAX] 1038 min_v = minspec[name] 1039 max_v = maxspec[name] 1040 1041 if min_v > max_v: 1042 err = ("Invalid specification of min/max values for %s: %s/%s" % 1043 (name, min_v, max_v)) 1044 raise errors.ConfigurationError(err) 1045 elif check_std: 1046 std_v = stdspec.get(name, min_v) 1047 return std_v >= min_v and std_v <= max_v 1048 else: 1049 return True
1050 1051 @classmethod
1052 - def CheckDiskTemplates(cls, disk_templates):
1053 """Checks the disk templates for validity. 1054 1055 """ 1056 if not disk_templates: 1057 raise errors.ConfigurationError("Instance policy must contain" + 1058 " at least one disk template") 1059 wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES) 1060 if wrong: 1061 raise errors.ConfigurationError("Invalid disk template(s) %s" % 1062 utils.CommaJoin(wrong))
1063 1064 @classmethod
1065 - def CheckParameter(cls, key, value):
1066 """Checks a parameter. 1067 1068 Currently we expect all parameters to be float values. 1069 1070 """ 1071 try: 1072 float(value) 1073 except (TypeError, ValueError), err: 1074 raise errors.ConfigurationError("Invalid value for key" " '%s':" 1075 " '%s', error: %s" % (key, value, err))
1076
1077 1078 -class Instance(TaggableObject):
1079 """Config object representing an instance.""" 1080 __slots__ = [ 1081 "name", 1082 "primary_node", 1083 "os", 1084 "hypervisor", 1085 "hvparams", 1086 "beparams", 1087 "osparams", 1088 "admin_state", 1089 "nics", 1090 "disks", 1091 "disk_template", 1092 "disks_active", 1093 "network_port", 1094 "serial_no", 1095 ] + _TIMESTAMPS + _UUID 1096
1097 - def _ComputeSecondaryNodes(self):
1098 """Compute the list of secondary nodes. 1099 1100 This is a simple wrapper over _ComputeAllNodes. 1101 1102 """ 1103 all_nodes = set(self._ComputeAllNodes()) 1104 all_nodes.discard(self.primary_node) 1105 return tuple(all_nodes)
1106 1107 secondary_nodes = property(_ComputeSecondaryNodes, None, None, 1108 "List of names of secondary nodes") 1109
1110 - def _ComputeAllNodes(self):
1111 """Compute the list of all nodes. 1112 1113 Since the data is already there (in the drbd disks), keeping it as 1114 a separate normal attribute is redundant and if not properly 1115 synchronised can cause problems. Thus it's better to compute it 1116 dynamically. 1117 1118 """ 1119 def _Helper(nodes, device): 1120 """Recursively computes nodes given a top device.""" 1121 if device.dev_type in constants.DTS_DRBD: 1122 nodea, nodeb = device.logical_id[:2] 1123 nodes.add(nodea) 1124 nodes.add(nodeb) 1125 if device.children: 1126 for child in device.children: 1127 _Helper(nodes, child)
1128 1129 all_nodes = set() 1130 for device in self.disks: 1131 _Helper(all_nodes, device) 1132 # ensure that the primary node is always the first 1133 all_nodes.discard(self.primary_node) 1134 return (self.primary_node, ) + tuple(all_nodes)
1135 1136 all_nodes = property(_ComputeAllNodes, None, None, 1137 "List of names of all the nodes of the instance") 1138
1139 - def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
1140 """Provide a mapping of nodes to LVs this instance owns. 1141 1142 This function figures out what logical volumes should belong on 1143 which nodes, recursing through a device tree. 1144 1145 @type lvmap: dict 1146 @param lvmap: optional dictionary to receive the 1147 'node' : ['lv', ...] data. 1148 @type devs: list of L{Disk} 1149 @param devs: disks to get the LV name for. If None, all disk of this 1150 instance are used. 1151 @type node_uuid: string 1152 @param node_uuid: UUID of the node to get the LV names for. If None, the 1153 primary node of this instance is used. 1154 @return: None if lvmap arg is given, otherwise, a dictionary of 1155 the form { 'node_uuid' : ['volume1', 'volume2', ...], ... }; 1156 volumeN is of the form "vg_name/lv_name", compatible with 1157 GetVolumeList() 1158 1159 """ 1160 if node_uuid is None: 1161 node_uuid = self.primary_node 1162 1163 if lvmap is None: 1164 lvmap = { 1165 node_uuid: [], 1166 } 1167 ret = lvmap 1168 else: 1169 if not node_uuid in lvmap: 1170 lvmap[node_uuid] = [] 1171 ret = None 1172 1173 if not devs: 1174 devs = self.disks 1175 1176 for dev in devs: 1177 if dev.dev_type == constants.DT_PLAIN: 1178 lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1]) 1179 1180 elif dev.dev_type in constants.DTS_DRBD: 1181 if dev.children: 1182 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0]) 1183 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1]) 1184 1185 elif dev.children: 1186 self.MapLVsByNode(lvmap, dev.children, node_uuid) 1187 1188 return ret
1189
1190 - def FindDisk(self, idx):
1191 """Find a disk given having a specified index. 1192 1193 This is just a wrapper that does validation of the index. 1194 1195 @type idx: int 1196 @param idx: the disk index 1197 @rtype: L{Disk} 1198 @return: the corresponding disk 1199 @raise errors.OpPrereqError: when the given index is not valid 1200 1201 """ 1202 try: 1203 idx = int(idx) 1204 return self.disks[idx] 1205 except (TypeError, ValueError), err: 1206 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err), 1207 errors.ECODE_INVAL) 1208 except IndexError: 1209 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks" 1210 " 0 to %d" % (idx, len(self.disks) - 1), 1211 errors.ECODE_INVAL)
1212
1213 - def ToDict(self):
1214 """Instance-specific conversion to standard python types. 1215 1216 This replaces the children lists of objects with lists of standard 1217 python types. 1218 1219 """ 1220 bo = super(Instance, self).ToDict() 1221 1222 for attr in "nics", "disks": 1223 alist = bo.get(attr, None) 1224 if alist: 1225 nlist = outils.ContainerToDicts(alist) 1226 else: 1227 nlist = [] 1228 bo[attr] = nlist 1229 return bo
1230 1231 @classmethod
1232 - def FromDict(cls, val):
1233 """Custom function for instances. 1234 1235 """ 1236 if "admin_state" not in val: 1237 if val.get("admin_up", False): 1238 val["admin_state"] = constants.ADMINST_UP 1239 else: 1240 val["admin_state"] = constants.ADMINST_DOWN 1241 if "admin_up" in val: 1242 del val["admin_up"] 1243 obj = super(Instance, cls).FromDict(val) 1244 obj.nics = outils.ContainerFromDicts(obj.nics, list, NIC) 1245 obj.disks = outils.ContainerFromDicts(obj.disks, list, Disk) 1246 return obj
1247
1248 - def UpgradeConfig(self):
1249 """Fill defaults for missing configuration values. 1250 1251 """ 1252 for nic in self.nics: 1253 nic.UpgradeConfig() 1254 for disk in self.disks: 1255 disk.UpgradeConfig() 1256 if self.hvparams: 1257 for key in constants.HVC_GLOBALS: 1258 try: 1259 del self.hvparams[key] 1260 except KeyError: 1261 pass 1262 if self.osparams is None: 1263 self.osparams = {} 1264 UpgradeBeParams(self.beparams) 1265 if self.disks_active is None: 1266 self.disks_active = self.admin_state == constants.ADMINST_UP
1267
1268 1269 -class OS(ConfigObject):
1270 """Config object representing an operating system. 1271 1272 @type supported_parameters: list 1273 @ivar supported_parameters: a list of tuples, name and description, 1274 containing the supported parameters by this OS 1275 1276 @type VARIANT_DELIM: string 1277 @cvar VARIANT_DELIM: the variant delimiter 1278 1279 """ 1280 __slots__ = [ 1281 "name", 1282 "path", 1283 "api_versions", 1284 "create_script", 1285 "export_script", 1286 "import_script", 1287 "rename_script", 1288 "verify_script", 1289 "supported_variants", 1290 "supported_parameters", 1291 ] 1292 1293 VARIANT_DELIM = "+" 1294 1295 @classmethod
1296 - def SplitNameVariant(cls, name):
1297 """Splits the name into the proper name and variant. 1298 1299 @param name: the OS (unprocessed) name 1300 @rtype: list 1301 @return: a list of two elements; if the original name didn't 1302 contain a variant, it's returned as an empty string 1303 1304 """ 1305 nv = name.split(cls.VARIANT_DELIM, 1) 1306 if len(nv) == 1: 1307 nv.append("") 1308 return nv
1309 1310 @classmethod
1311 - def GetName(cls, name):
1312 """Returns the proper name of the os (without the variant). 1313 1314 @param name: the OS (unprocessed) name 1315 1316 """ 1317 return cls.SplitNameVariant(name)[0]
1318 1319 @classmethod
1320 - def GetVariant(cls, name):
1321 """Returns the variant the os (without the base name). 1322 1323 @param name: the OS (unprocessed) name 1324 1325 """ 1326 return cls.SplitNameVariant(name)[1]
1327
1328 1329 -class ExtStorage(ConfigObject):
1330 """Config object representing an External Storage Provider. 1331 1332 """ 1333 __slots__ = [ 1334 "name", 1335 "path", 1336 "create_script", 1337 "remove_script", 1338 "grow_script", 1339 "attach_script", 1340 "detach_script", 1341 "setinfo_script", 1342 "verify_script", 1343 "supported_parameters", 1344 ]
1345
1346 1347 -class NodeHvState(ConfigObject):
1348 """Hypvervisor state on a node. 1349 1350 @ivar mem_total: Total amount of memory 1351 @ivar mem_node: Memory used by, or reserved for, the node itself (not always 1352 available) 1353 @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation 1354 rounding 1355 @ivar mem_inst: Memory used by instances living on node 1356 @ivar cpu_total: Total node CPU core count 1357 @ivar cpu_node: Number of CPU cores reserved for the node itself 1358 1359 """ 1360 __slots__ = [ 1361 "mem_total", 1362 "mem_node", 1363 "mem_hv", 1364 "mem_inst", 1365 "cpu_total", 1366 "cpu_node", 1367 ] + _TIMESTAMPS
1368
1369 1370 -class NodeDiskState(ConfigObject):
1371 """Disk state on a node. 1372 1373 """ 1374 __slots__ = [ 1375 "total", 1376 "reserved", 1377 "overhead", 1378 ] + _TIMESTAMPS
1379
1380 1381 -class Node(TaggableObject):
1382 """Config object representing a node. 1383 1384 @ivar hv_state: Hypervisor state (e.g. number of CPUs) 1385 @ivar hv_state_static: Hypervisor state overriden by user 1386 @ivar disk_state: Disk state (e.g. free space) 1387 @ivar disk_state_static: Disk state overriden by user 1388 1389 """ 1390 __slots__ = [ 1391 "name", 1392 "primary_ip", 1393 "secondary_ip", 1394 "serial_no", 1395 "master_candidate", 1396 "offline", 1397 "drained", 1398 "group", 1399 "master_capable", 1400 "vm_capable", 1401 "ndparams", 1402 "powered", 1403 "hv_state", 1404 "hv_state_static", 1405 "disk_state", 1406 "disk_state_static", 1407 ] + _TIMESTAMPS + _UUID 1408
1409 - def UpgradeConfig(self):
1410 """Fill defaults for missing configuration values. 1411 1412 """ 1413 # pylint: disable=E0203 1414 # because these are "defined" via slots, not manually 1415 if self.master_capable is None: 1416 self.master_capable = True 1417 1418 if self.vm_capable is None: 1419 self.vm_capable = True 1420 1421 if self.ndparams is None: 1422 self.ndparams = {} 1423 # And remove any global parameter 1424 for key in constants.NDC_GLOBALS: 1425 if key in self.ndparams: 1426 logging.warning("Ignoring %s node parameter for node %s", 1427 key, self.name) 1428 del self.ndparams[key] 1429 1430 if self.powered is None: 1431 self.powered = True
1432
1433 - def ToDict(self):
1434 """Custom function for serializing. 1435 1436 """ 1437 data = super(Node, self).ToDict() 1438 1439 hv_state = data.get("hv_state", None) 1440 if hv_state is not None: 1441 data["hv_state"] = outils.ContainerToDicts(hv_state) 1442 1443 disk_state = data.get("disk_state", None) 1444 if disk_state is not None: 1445 data["disk_state"] = \ 1446 dict((key, outils.ContainerToDicts(value)) 1447 for (key, value) in disk_state.items()) 1448 1449 return data
1450 1451 @classmethod
1452 - def FromDict(cls, val):
1453 """Custom function for deserializing. 1454 1455 """ 1456 obj = super(Node, cls).FromDict(val) 1457 1458 if obj.hv_state is not None: 1459 obj.hv_state = \ 1460 outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState) 1461 1462 if obj.disk_state is not None: 1463 obj.disk_state = \ 1464 dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState)) 1465 for (key, value) in obj.disk_state.items()) 1466 1467 return obj
1468
1469 1470 -class NodeGroup(TaggableObject):
1471 """Config object representing a node group.""" 1472 __slots__ = [ 1473 "name", 1474 "members", 1475 "ndparams", 1476 "diskparams", 1477 "ipolicy", 1478 "serial_no", 1479 "hv_state_static", 1480 "disk_state_static", 1481 "alloc_policy", 1482 "networks", 1483 ] + _TIMESTAMPS + _UUID 1484
1485 - def ToDict(self):
1486 """Custom function for nodegroup. 1487 1488 This discards the members object, which gets recalculated and is only kept 1489 in memory. 1490 1491 """ 1492 mydict = super(NodeGroup, self).ToDict() 1493 del mydict["members"] 1494 return mydict
1495 1496 @classmethod
1497 - def FromDict(cls, val):
1498 """Custom function for nodegroup. 1499 1500 The members slot is initialized to an empty list, upon deserialization. 1501 1502 """ 1503 obj = super(NodeGroup, cls).FromDict(val) 1504 obj.members = [] 1505 return obj
1506
1507 - def UpgradeConfig(self):
1508 """Fill defaults for missing configuration values. 1509 1510 """ 1511 if self.ndparams is None: 1512 self.ndparams = {} 1513 1514 if self.serial_no is None: 1515 self.serial_no = 1 1516 1517 if self.alloc_policy is None: 1518 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED 1519 1520 # We only update mtime, and not ctime, since we would not be able 1521 # to provide a correct value for creation time. 1522 if self.mtime is None: 1523 self.mtime = time.time() 1524 1525 if self.diskparams is None: 1526 self.diskparams = {} 1527 if self.ipolicy is None: 1528 self.ipolicy = MakeEmptyIPolicy() 1529 1530 if self.networks is None: 1531 self.networks = {} 1532 1533 for network, netparams in self.networks.items(): 1534 self.networks[network] = FillDict(constants.NICC_DEFAULTS, netparams)
1535
1536 - def FillND(self, node):
1537 """Return filled out ndparams for L{objects.Node} 1538 1539 @type node: L{objects.Node} 1540 @param node: A Node object to fill 1541 @return a copy of the node's ndparams with defaults filled 1542 1543 """ 1544 return self.SimpleFillND(node.ndparams)
1545
1546 - def SimpleFillND(self, ndparams):
1547 """Fill a given ndparams dict with defaults. 1548 1549 @type ndparams: dict 1550 @param ndparams: the dict to fill 1551 @rtype: dict 1552 @return: a copy of the passed in ndparams with missing keys filled 1553 from the node group defaults 1554 1555 """ 1556 return FillDict(self.ndparams, ndparams)
1557
1558 1559 -class Cluster(TaggableObject):
1560 """Config object representing the cluster.""" 1561 __slots__ = [ 1562 "serial_no", 1563 "rsahostkeypub", 1564 "dsahostkeypub", 1565 "highest_used_port", 1566 "tcpudp_port_pool", 1567 "mac_prefix", 1568 "volume_group_name", 1569 "reserved_lvs", 1570 "drbd_usermode_helper", 1571 "default_bridge", 1572 "default_hypervisor", 1573 "master_node", 1574 "master_ip", 1575 "master_netdev", 1576 "master_netmask", 1577 "use_external_mip_script", 1578 "cluster_name", 1579 "file_storage_dir", 1580 "shared_file_storage_dir", 1581 "enabled_hypervisors", 1582 "hvparams", 1583 "ipolicy", 1584 "os_hvp", 1585 "beparams", 1586 "osparams", 1587 "nicparams", 1588 "ndparams", 1589 "diskparams", 1590 "candidate_pool_size", 1591 "modify_etc_hosts", 1592 "modify_ssh_setup", 1593 "maintain_node_health", 1594 "uid_pool", 1595 "default_iallocator", 1596 "hidden_os", 1597 "blacklisted_os", 1598 "primary_ip_family", 1599 "prealloc_wipe_disks", 1600 "hv_state_static", 1601 "disk_state_static", 1602 "enabled_disk_templates", 1603 ] + _TIMESTAMPS + _UUID 1604
1605 - def UpgradeConfig(self):
1606 """Fill defaults for missing configuration values. 1607 1608 """ 1609 # pylint: disable=E0203 1610 # because these are "defined" via slots, not manually 1611 if self.hvparams is None: 1612 self.hvparams = constants.HVC_DEFAULTS 1613 else: 1614 for hypervisor in constants.HYPER_TYPES: 1615 try: 1616 existing_params = self.hvparams[hypervisor] 1617 except KeyError: 1618 existing_params = {} 1619 self.hvparams[hypervisor] = FillDict( 1620 constants.HVC_DEFAULTS[hypervisor], existing_params) 1621 1622 if self.os_hvp is None: 1623 self.os_hvp = {} 1624 1625 # osparams added before 2.2 1626 if self.osparams is None: 1627 self.osparams = {} 1628 1629 self.ndparams = UpgradeNDParams(self.ndparams) 1630 1631 self.beparams = UpgradeGroupedParams(self.beparams, 1632 constants.BEC_DEFAULTS) 1633 for beparams_group in self.beparams: 1634 UpgradeBeParams(self.beparams[beparams_group]) 1635 1636 migrate_default_bridge = not self.nicparams 1637 self.nicparams = UpgradeGroupedParams(self.nicparams, 1638 constants.NICC_DEFAULTS) 1639 if migrate_default_bridge: 1640 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \ 1641 self.default_bridge 1642 1643 if self.modify_etc_hosts is None: 1644 self.modify_etc_hosts = True 1645 1646 if self.modify_ssh_setup is None: 1647 self.modify_ssh_setup = True 1648 1649 # default_bridge is no longer used in 2.1. The slot is left there to 1650 # support auto-upgrading. It can be removed once we decide to deprecate 1651 # upgrading straight from 2.0. 1652 if self.default_bridge is not None: 1653 self.default_bridge = None 1654 1655 # default_hypervisor is just the first enabled one in 2.1. This slot and 1656 # code can be removed once upgrading straight from 2.0 is deprecated. 1657 if self.default_hypervisor is not None: 1658 self.enabled_hypervisors = ([self.default_hypervisor] + 1659 [hvname for hvname in self.enabled_hypervisors 1660 if hvname != self.default_hypervisor]) 1661 self.default_hypervisor = None 1662 1663 # maintain_node_health added after 2.1.1 1664 if self.maintain_node_health is None: 1665 self.maintain_node_health = False 1666 1667 if self.uid_pool is None: 1668 self.uid_pool = [] 1669 1670 if self.default_iallocator is None: 1671 self.default_iallocator = "" 1672 1673 # reserved_lvs added before 2.2 1674 if self.reserved_lvs is None: 1675 self.reserved_lvs = [] 1676 1677 # hidden and blacklisted operating systems added before 2.2.1 1678 if self.hidden_os is None: 1679 self.hidden_os = [] 1680 1681 if self.blacklisted_os is None: 1682 self.blacklisted_os = [] 1683 1684 # primary_ip_family added before 2.3 1685 if self.primary_ip_family is None: 1686 self.primary_ip_family = AF_INET 1687 1688 if self.master_netmask is None: 1689 ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family) 1690 self.master_netmask = ipcls.iplen 1691 1692 if self.prealloc_wipe_disks is None: 1693 self.prealloc_wipe_disks = False 1694 1695 # shared_file_storage_dir added before 2.5 1696 if self.shared_file_storage_dir is None: 1697 self.shared_file_storage_dir = "" 1698 1699 if self.use_external_mip_script is None: 1700 self.use_external_mip_script = False 1701 1702 if self.diskparams: 1703 self.diskparams = UpgradeDiskParams(self.diskparams) 1704 else: 1705 self.diskparams = constants.DISK_DT_DEFAULTS.copy() 1706 1707 # instance policy added before 2.6 1708 if self.ipolicy is None: 1709 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {}) 1710 else: 1711 # we can either make sure to upgrade the ipolicy always, or only 1712 # do it in some corner cases (e.g. missing keys); note that this 1713 # will break any removal of keys from the ipolicy dict 1714 wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS 1715 if wrongkeys: 1716 # These keys would be silently removed by FillIPolicy() 1717 msg = ("Cluster instance policy contains spurious keys: %s" % 1718 utils.CommaJoin(wrongkeys)) 1719 raise errors.ConfigurationError(msg) 1720 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy) 1721 1722 # hv_state_static added in 2.7 1723 if self.hv_state_static is None: 1724 self.hv_state_static = {} 1725 if self.disk_state_static is None: 1726 self.disk_state_static = {}
1727 1728 @property
1729 - def primary_hypervisor(self):
1730 """The first hypervisor is the primary. 1731 1732 Useful, for example, for L{Node}'s hv/disk state. 1733 1734 """ 1735 return self.enabled_hypervisors[0]
1736
1737 - def ToDict(self):
1738 """Custom function for cluster. 1739 1740 """ 1741 mydict = super(Cluster, self).ToDict() 1742 1743 if self.tcpudp_port_pool is None: 1744 tcpudp_port_pool = [] 1745 else: 1746 tcpudp_port_pool = list(self.tcpudp_port_pool) 1747 1748 mydict["tcpudp_port_pool"] = tcpudp_port_pool 1749 1750 return mydict
1751 1752 @classmethod
1753 - def FromDict(cls, val):
1754 """Custom function for cluster. 1755 1756 """ 1757 obj = super(Cluster, cls).FromDict(val) 1758 1759 if obj.tcpudp_port_pool is None: 1760 obj.tcpudp_port_pool = set() 1761 elif not isinstance(obj.tcpudp_port_pool, set): 1762 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool) 1763 1764 return obj
1765
1766 - def SimpleFillDP(self, diskparams):
1767 """Fill a given diskparams dict with cluster defaults. 1768 1769 @param diskparams: The diskparams 1770 @return: The defaults dict 1771 1772 """ 1773 return FillDiskParams(self.diskparams, diskparams)
1774
1775 - def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1776 """Get the default hypervisor parameters for the cluster. 1777 1778 @param hypervisor: the hypervisor name 1779 @param os_name: if specified, we'll also update the defaults for this OS 1780 @param skip_keys: if passed, list of keys not to use 1781 @return: the defaults dict 1782 1783 """ 1784 if skip_keys is None: 1785 skip_keys = [] 1786 1787 fill_stack = [self.hvparams.get(hypervisor, {})] 1788 if os_name is not None: 1789 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {}) 1790 fill_stack.append(os_hvp) 1791 1792 ret_dict = {} 1793 for o_dict in fill_stack: 1794 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys) 1795 1796 return ret_dict
1797
1798 - def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1799 """Fill a given hvparams dict with cluster defaults. 1800 1801 @type hv_name: string 1802 @param hv_name: the hypervisor to use 1803 @type os_name: string 1804 @param os_name: the OS to use for overriding the hypervisor defaults 1805 @type skip_globals: boolean 1806 @param skip_globals: if True, the global hypervisor parameters will 1807 not be filled 1808 @rtype: dict 1809 @return: a copy of the given hvparams with missing keys filled from 1810 the cluster defaults 1811 1812 """ 1813 if skip_globals: 1814 skip_keys = constants.HVC_GLOBALS 1815 else: 1816 skip_keys = [] 1817 1818 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys) 1819 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1820
1821 - def FillHV(self, instance, skip_globals=False):
1822 """Fill an instance's hvparams dict with cluster defaults. 1823 1824 @type instance: L{objects.Instance} 1825 @param instance: the instance parameter to fill 1826 @type skip_globals: boolean 1827 @param skip_globals: if True, the global hypervisor parameters will 1828 not be filled 1829 @rtype: dict 1830 @return: a copy of the instance's hvparams with missing keys filled from 1831 the cluster defaults 1832 1833 """ 1834 return self.SimpleFillHV(instance.hypervisor, instance.os, 1835 instance.hvparams, skip_globals)
1836
1837 - def SimpleFillBE(self, beparams):
1838 """Fill a given beparams dict with cluster defaults. 1839 1840 @type beparams: dict 1841 @param beparams: the dict to fill 1842 @rtype: dict 1843 @return: a copy of the passed in beparams with missing keys filled 1844 from the cluster defaults 1845 1846 """ 1847 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1848
1849 - def FillBE(self, instance):
1850 """Fill an instance's beparams dict with cluster defaults. 1851 1852 @type instance: L{objects.Instance} 1853 @param instance: the instance parameter to fill 1854 @rtype: dict 1855 @return: a copy of the instance's beparams with missing keys filled from 1856 the cluster defaults 1857 1858 """ 1859 return self.SimpleFillBE(instance.beparams)
1860
1861 - def SimpleFillNIC(self, nicparams):
1862 """Fill a given nicparams dict with cluster defaults. 1863 1864 @type nicparams: dict 1865 @param nicparams: the dict to fill 1866 @rtype: dict 1867 @return: a copy of the passed in nicparams with missing keys filled 1868 from the cluster defaults 1869 1870 """ 1871 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1872
1873 - def SimpleFillOS(self, os_name, os_params):
1874 """Fill an instance's osparams dict with cluster defaults. 1875 1876 @type os_name: string 1877 @param os_name: the OS name to use 1878 @type os_params: dict 1879 @param os_params: the dict to fill with default values 1880 @rtype: dict 1881 @return: a copy of the instance's osparams with missing keys filled from 1882 the cluster defaults 1883 1884 """ 1885 name_only = os_name.split("+", 1)[0] 1886 # base OS 1887 result = self.osparams.get(name_only, {}) 1888 # OS with variant 1889 result = FillDict(result, self.osparams.get(os_name, {})) 1890 # specified params 1891 return FillDict(result, os_params)
1892 1893 @staticmethod
1894 - def SimpleFillHvState(hv_state):
1895 """Fill an hv_state sub dict with cluster defaults. 1896 1897 """ 1898 return FillDict(constants.HVST_DEFAULTS, hv_state)
1899 1900 @staticmethod
1901 - def SimpleFillDiskState(disk_state):
1902 """Fill an disk_state sub dict with cluster defaults. 1903 1904 """ 1905 return FillDict(constants.DS_DEFAULTS, disk_state)
1906
1907 - def FillND(self, node, nodegroup):
1908 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node} 1909 1910 @type node: L{objects.Node} 1911 @param node: A Node object to fill 1912 @type nodegroup: L{objects.NodeGroup} 1913 @param nodegroup: A Node object to fill 1914 @return a copy of the node's ndparams with defaults filled 1915 1916 """ 1917 return self.SimpleFillND(nodegroup.FillND(node))
1918
1919 - def SimpleFillND(self, ndparams):
1920 """Fill a given ndparams dict with defaults. 1921 1922 @type ndparams: dict 1923 @param ndparams: the dict to fill 1924 @rtype: dict 1925 @return: a copy of the passed in ndparams with missing keys filled 1926 from the cluster defaults 1927 1928 """ 1929 return FillDict(self.ndparams, ndparams)
1930
1931 - def SimpleFillIPolicy(self, ipolicy):
1932 """ Fill instance policy dict with defaults. 1933 1934 @type ipolicy: dict 1935 @param ipolicy: the dict to fill 1936 @rtype: dict 1937 @return: a copy of passed ipolicy with missing keys filled from 1938 the cluster defaults 1939 1940 """ 1941 return FillIPolicy(self.ipolicy, ipolicy)
1942
1943 - def IsDiskTemplateEnabled(self, disk_template):
1944 """Checks if a particular disk template is enabled. 1945 1946 """ 1947 return utils.storage.IsDiskTemplateEnabled( 1948 disk_template, self.enabled_disk_templates)
1949
1950 - def IsFileStorageEnabled(self):
1951 """Checks if file storage is enabled. 1952 1953 """ 1954 return utils.storage.IsFileStorageEnabled(self.enabled_disk_templates)
1955
1956 - def IsSharedFileStorageEnabled(self):
1957 """Checks if shared file storage is enabled. 1958 1959 """ 1960 return utils.storage.IsSharedFileStorageEnabled( 1961 self.enabled_disk_templates)
1962
1963 1964 -class BlockDevStatus(ConfigObject):
1965 """Config object representing the status of a block device.""" 1966 __slots__ = [ 1967 "dev_path", 1968 "major", 1969 "minor", 1970 "sync_percent", 1971 "estimated_time", 1972 "is_degraded", 1973 "ldisk_status", 1974 ]
1975
1976 1977 -class ImportExportStatus(ConfigObject):
1978 """Config object representing the status of an import or export.""" 1979 __slots__ = [ 1980 "recent_output", 1981 "listen_port", 1982 "connected", 1983 "progress_mbytes", 1984 "progress_throughput", 1985 "progress_eta", 1986 "progress_percent", 1987 "exit_status", 1988 "error_message", 1989 ] + _TIMESTAMPS
1990
1991 1992 -class ImportExportOptions(ConfigObject):
1993 """Options for import/export daemon 1994 1995 @ivar key_name: X509 key name (None for cluster certificate) 1996 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate) 1997 @ivar compress: Compression method (one of L{constants.IEC_ALL}) 1998 @ivar magic: Used to ensure the connection goes to the right disk 1999 @ivar ipv6: Whether to use IPv6 2000 @ivar connect_timeout: Number of seconds for establishing connection 2001 2002 """ 2003 __slots__ = [ 2004 "key_name", 2005 "ca_pem", 2006 "compress", 2007 "magic", 2008 "ipv6", 2009 "connect_timeout", 2010 ]
2011
2012 2013 -class ConfdRequest(ConfigObject):
2014 """Object holding a confd request. 2015 2016 @ivar protocol: confd protocol version 2017 @ivar type: confd query type 2018 @ivar query: query request 2019 @ivar rsalt: requested reply salt 2020 2021 """ 2022 __slots__ = [ 2023 "protocol", 2024 "type", 2025 "query", 2026 "rsalt", 2027 ]
2028
2029 2030 -class ConfdReply(ConfigObject):
2031 """Object holding a confd reply. 2032 2033 @ivar protocol: confd protocol version 2034 @ivar status: reply status code (ok, error) 2035 @ivar answer: confd query reply 2036 @ivar serial: configuration serial number 2037 2038 """ 2039 __slots__ = [ 2040 "protocol", 2041 "status", 2042 "answer", 2043 "serial", 2044 ]
2045
2046 2047 -class QueryFieldDefinition(ConfigObject):
2048 """Object holding a query field definition. 2049 2050 @ivar name: Field name 2051 @ivar title: Human-readable title 2052 @ivar kind: Field type 2053 @ivar doc: Human-readable description 2054 2055 """ 2056 __slots__ = [ 2057 "name", 2058 "title", 2059 "kind", 2060 "doc", 2061 ]
2062
2063 2064 -class _QueryResponseBase(ConfigObject):
2065 __slots__ = [ 2066 "fields", 2067 ] 2068
2069 - def ToDict(self):
2070 """Custom function for serializing. 2071 2072 """ 2073 mydict = super(_QueryResponseBase, self).ToDict() 2074 mydict["fields"] = outils.ContainerToDicts(mydict["fields"]) 2075 return mydict
2076 2077 @classmethod
2078 - def FromDict(cls, val):
2079 """Custom function for de-serializing. 2080 2081 """ 2082 obj = super(_QueryResponseBase, cls).FromDict(val) 2083 obj.fields = \ 2084 outils.ContainerFromDicts(obj.fields, list, QueryFieldDefinition) 2085 return obj
2086
2087 2088 -class QueryResponse(_QueryResponseBase):
2089 """Object holding the response to a query. 2090 2091 @ivar fields: List of L{QueryFieldDefinition} objects 2092 @ivar data: Requested data 2093 2094 """ 2095 __slots__ = [ 2096 "data", 2097 ]
2098
2099 2100 -class QueryFieldsRequest(ConfigObject):
2101 """Object holding a request for querying available fields. 2102 2103 """ 2104 __slots__ = [ 2105 "what", 2106 "fields", 2107 ]
2108
2109 2110 -class QueryFieldsResponse(_QueryResponseBase):
2111 """Object holding the response to a query for fields. 2112 2113 @ivar fields: List of L{QueryFieldDefinition} objects 2114 2115 """ 2116 __slots__ = []
2117
2118 2119 -class MigrationStatus(ConfigObject):
2120 """Object holding the status of a migration. 2121 2122 """ 2123 __slots__ = [ 2124 "status", 2125 "transferred_ram", 2126 "total_ram", 2127 ]
2128
2129 2130 -class InstanceConsole(ConfigObject):
2131 """Object describing how to access the console of an instance. 2132 2133 """ 2134 __slots__ = [ 2135 "instance", 2136 "kind", 2137 "message", 2138 "host", 2139 "port", 2140 "user", 2141 "command", 2142 "display", 2143 ] 2144
2145 - def Validate(self):
2146 """Validates contents of this object. 2147 2148 """ 2149 assert self.kind in constants.CONS_ALL, "Unknown console type" 2150 assert self.instance, "Missing instance name" 2151 assert self.message or self.kind in [constants.CONS_SSH, 2152 constants.CONS_SPICE, 2153 constants.CONS_VNC] 2154 assert self.host or self.kind == constants.CONS_MESSAGE 2155 assert self.port or self.kind in [constants.CONS_MESSAGE, 2156 constants.CONS_SSH] 2157 assert self.user or self.kind in [constants.CONS_MESSAGE, 2158 constants.CONS_SPICE, 2159 constants.CONS_VNC] 2160 assert self.command or self.kind in [constants.CONS_MESSAGE, 2161 constants.CONS_SPICE, 2162 constants.CONS_VNC] 2163 assert self.display or self.kind in [constants.CONS_MESSAGE, 2164 constants.CONS_SPICE, 2165 constants.CONS_SSH] 2166 return True
2167
2168 2169 -class Network(TaggableObject):
2170 """Object representing a network definition for ganeti. 2171 2172 """ 2173 __slots__ = [ 2174 "name", 2175 "serial_no", 2176 "mac_prefix", 2177 "network", 2178 "network6", 2179 "gateway", 2180 "gateway6", 2181 "reservations", 2182 "ext_reservations", 2183 ] + _TIMESTAMPS + _UUID 2184
2185 - def HooksDict(self, prefix=""):
2186 """Export a dictionary used by hooks with a network's information. 2187 2188 @type prefix: String 2189 @param prefix: Prefix to prepend to the dict entries 2190 2191 """ 2192 result = { 2193 "%sNETWORK_NAME" % prefix: self.name, 2194 "%sNETWORK_UUID" % prefix: self.uuid, 2195 "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()), 2196 } 2197 if self.network: 2198 result["%sNETWORK_SUBNET" % prefix] = self.network 2199 if self.gateway: 2200 result["%sNETWORK_GATEWAY" % prefix] = self.gateway 2201 if self.network6: 2202 result["%sNETWORK_SUBNET6" % prefix] = self.network6 2203 if self.gateway6: 2204 result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6 2205 if self.mac_prefix: 2206 result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix 2207 2208 return result
2209 2210 @classmethod
2211 - def FromDict(cls, val):
2212 """Custom function for networks. 2213 2214 Remove deprecated network_type and family. 2215 2216 """ 2217 if "network_type" in val: 2218 del val["network_type"] 2219 if "family" in val: 2220 del val["family"] 2221 obj = super(Network, cls).FromDict(val) 2222 return obj
2223
2224 2225 # need to inherit object in order to use super() 2226 -class SerializableConfigParser(ConfigParser.SafeConfigParser, object):
2227 """Simple wrapper over ConfigParse that allows serialization. 2228 2229 This class is basically ConfigParser.SafeConfigParser with two 2230 additional methods that allow it to serialize/unserialize to/from a 2231 buffer. 2232 2233 """
2234 - def Dumps(self):
2235 """Dump this instance and return the string representation.""" 2236 buf = StringIO() 2237 self.write(buf) 2238 return buf.getvalue()
2239 2240 @classmethod
2241 - def Loads(cls, data):
2242 """Load data from a string.""" 2243 buf = StringIO(data) 2244 cfp = cls() 2245 cfp.readfp(buf) 2246 return cfp
2247
2248 - def get(self, section, option, **kwargs):
2249 value = None 2250 try: 2251 value = super(SerializableConfigParser, self).get(section, option, 2252 **kwargs) 2253 if value.lower() == constants.VALUE_NONE: 2254 value = None 2255 except ConfigParser.NoOptionError: 2256 r = re.compile(r"(disk|nic)\d+_name|nic\d+_(network|vlan)") 2257 match = r.match(option) 2258 if match: 2259 pass 2260 else: 2261 raise 2262 2263 return value
2264
2265 2266 -class LvmPvInfo(ConfigObject):
2267 """Information about an LVM physical volume (PV). 2268 2269 @type name: string 2270 @ivar name: name of the PV 2271 @type vg_name: string 2272 @ivar vg_name: name of the volume group containing the PV 2273 @type size: float 2274 @ivar size: size of the PV in MiB 2275 @type free: float 2276 @ivar free: free space in the PV, in MiB 2277 @type attributes: string 2278 @ivar attributes: PV attributes 2279 @type lv_list: list of strings 2280 @ivar lv_list: names of the LVs hosted on the PV 2281 """ 2282 __slots__ = [ 2283 "name", 2284 "vg_name", 2285 "size", 2286 "free", 2287 "attributes", 2288 "lv_list" 2289 ] 2290
2291 - def IsEmpty(self):
2292 """Is this PV empty? 2293 2294 """ 2295 return self.size <= (self.free + 1)
2296
2297 - def IsAllocatable(self):
2298 """Is this PV allocatable? 2299 2300 """ 2301 return ("a" in self.attributes)
2302