1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 """Transportable objects for Ganeti.
23
24 This module provides small, mostly data-only objects which are safe to
25 pass to and from external parties.
26
27 """
28
29
30
31
32
33
34
35
36
37
38 import ConfigParser
39 import re
40 import copy
41 import logging
42 import time
43 from cStringIO import StringIO
44
45 from ganeti import errors
46 from ganeti import constants
47 from ganeti import netutils
48 from ganeti import outils
49 from ganeti import utils
50
51 from socket import AF_INET
52
53
54 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
55 "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
56
57 _TIMESTAMPS = ["ctime", "mtime"]
58 _UUID = ["uuid"]
59
60
61 -def FillDict(defaults_dict, custom_dict, skip_keys=None):
62 """Basic function to apply settings on top a default dict.
63
64 @type defaults_dict: dict
65 @param defaults_dict: dictionary holding the default values
66 @type custom_dict: dict
67 @param custom_dict: dictionary holding customized value
68 @type skip_keys: list
69 @param skip_keys: which keys not to fill
70 @rtype: dict
71 @return: dict with the 'full' values
72
73 """
74 ret_dict = copy.deepcopy(defaults_dict)
75 ret_dict.update(custom_dict)
76 if skip_keys:
77 for k in skip_keys:
78 try:
79 del ret_dict[k]
80 except KeyError:
81 pass
82 return ret_dict
83
86 """Fills an instance policy with defaults.
87
88 """
89 assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
90 ret_dict = copy.deepcopy(custom_ipolicy)
91 for key in default_ipolicy:
92 if key not in ret_dict:
93 ret_dict[key] = copy.deepcopy(default_ipolicy[key])
94 elif key == constants.ISPECS_STD:
95 ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
96 return ret_dict
97
98
99 -def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
100 """Fills the disk parameter defaults.
101
102 @see: L{FillDict} for parameters and return value
103
104 """
105 assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
106
107 return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
108 skip_keys=skip_keys))
109 for dt in constants.DISK_TEMPLATES)
110
113 """Update all groups for the target parameter.
114
115 @type target: dict of dicts
116 @param target: {group: {parameter: value}}
117 @type defaults: dict
118 @param defaults: default parameter values
119
120 """
121 if target is None:
122 target = {constants.PP_DEFAULT: defaults}
123 else:
124 for group in target:
125 target[group] = FillDict(defaults, target[group])
126 return target
127
141
144 """Upgrade the disk parameters.
145
146 @type diskparams: dict
147 @param diskparams: disk parameters to upgrade
148 @rtype: dict
149 @return: the upgraded disk parameters dict
150
151 """
152 if not diskparams:
153 result = {}
154 else:
155 result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
156
157 return result
158
177
180 """Create empty IPolicy dictionary.
181
182 """
183 return {}
184
187 """A generic config object.
188
189 It has the following properties:
190
191 - provides somewhat safe recursive unpickling and pickling for its classes
192 - unset attributes which are defined in slots are always returned
193 as None instead of raising an error
194
195 Classes derived from this must always declare __slots__ (we use many
196 config objects and the memory reduction is useful)
197
198 """
199 __slots__ = []
200
202 if name not in self.GetAllSlots():
203 raise AttributeError("Invalid object attribute %s.%s" %
204 (type(self).__name__, name))
205 return None
206
212
214 """Validates the slots.
215
216 """
217
219 """Convert to a dict holding only standard python types.
220
221 The generic routine just dumps all of this object's attributes in
222 a dict. It does not work if the class has children who are
223 ConfigObjects themselves (e.g. the nics list in an Instance), in
224 which case the object should subclass the function in order to
225 make sure all objects returned are only standard python types.
226
227 """
228 result = {}
229 for name in self.GetAllSlots():
230 value = getattr(self, name, None)
231 if value is not None:
232 result[name] = value
233 return result
234
235 __getstate__ = ToDict
236
237 @classmethod
239 """Create an object from a dictionary.
240
241 This generic routine takes a dict, instantiates a new instance of
242 the given class, and sets attributes based on the dict content.
243
244 As for `ToDict`, this does not work if the class has children
245 who are ConfigObjects themselves (e.g. the nics list in an
246 Instance), in which case the object should subclass the function
247 and alter the objects.
248
249 """
250 if not isinstance(val, dict):
251 raise errors.ConfigurationError("Invalid object passed to FromDict:"
252 " expected dict, got %s" % type(val))
253 val_str = dict([(str(k), v) for k, v in val.iteritems()])
254 obj = cls(**val_str)
255 return obj
256
258 """Makes a deep copy of the current object and its children.
259
260 """
261 dict_form = self.ToDict()
262 clone_obj = self.__class__.FromDict(dict_form)
263 return clone_obj
264
266 """Implement __repr__ for ConfigObjects."""
267 return repr(self.ToDict())
268
270 """Fill defaults for missing configuration values.
271
272 This method will be called at configuration load time, and its
273 implementation will be object dependent.
274
275 """
276 pass
277
280 """An generic class supporting tags.
281
282 """
283 __slots__ = ["tags"]
284 VALID_TAG_RE = re.compile("^[\w.+*/:@-]+$")
285
286 @classmethod
288 """Check if a tag is valid.
289
290 If the tag is invalid, an errors.TagError will be raised. The
291 function has no return value.
292
293 """
294 if not isinstance(tag, basestring):
295 raise errors.TagError("Invalid tag type (not a string)")
296 if len(tag) > constants.MAX_TAG_LEN:
297 raise errors.TagError("Tag too long (>%d characters)" %
298 constants.MAX_TAG_LEN)
299 if not tag:
300 raise errors.TagError("Tags cannot be empty")
301 if not cls.VALID_TAG_RE.match(tag):
302 raise errors.TagError("Tag contains invalid characters")
303
312
322
333
335 """Taggable-object-specific conversion to standard python types.
336
337 This replaces the tags set with a list.
338
339 """
340 bo = super(TaggableObject, self).ToDict()
341
342 tags = bo.get("tags", None)
343 if isinstance(tags, set):
344 bo["tags"] = list(tags)
345 return bo
346
347 @classmethod
349 """Custom function for instances.
350
351 """
352 obj = super(TaggableObject, cls).FromDict(val)
353 if hasattr(obj, "tags") and isinstance(obj.tags, list):
354 obj.tags = set(obj.tags)
355 return obj
356
359 """Network configuration parameters for the master
360
361 @ivar name: master name
362 @ivar ip: master IP
363 @ivar netmask: master netmask
364 @ivar netdev: master network device
365 @ivar ip_family: master IP family
366
367 """
368 __slots__ = [
369 "name",
370 "ip",
371 "netmask",
372 "netdev",
373 "ip_family",
374 ]
375
378 """Top-level config object."""
379 __slots__ = [
380 "version",
381 "cluster",
382 "nodes",
383 "nodegroups",
384 "instances",
385 "networks",
386 "serial_no",
387 ] + _TIMESTAMPS
388
390 """Custom function for top-level config data.
391
392 This just replaces the list of instances, nodes and the cluster
393 with standard python types.
394
395 """
396 mydict = super(ConfigData, self).ToDict()
397 mydict["cluster"] = mydict["cluster"].ToDict()
398 for key in "nodes", "instances", "nodegroups", "networks":
399 mydict[key] = outils.ContainerToDicts(mydict[key])
400
401 return mydict
402
403 @classmethod
417
419 """Check if in there is at disk of the given type in the configuration.
420
421 @type dev_type: L{constants.LDS_BLOCK}
422 @param dev_type: the type to look for
423 @rtype: boolean
424 @return: boolean indicating if a disk of the given type was found or not
425
426 """
427 for instance in self.instances.values():
428 for disk in instance.disks:
429 if disk.IsBasedOnDiskType(dev_type):
430 return True
431 return False
432
457
488
489
490 -class NIC(ConfigObject):
491 """Config object representing a network card."""
492 __slots__ = ["name", "mac", "ip", "network", "nicparams", "netinfo"] + _UUID
493
494 @classmethod
511
512
513 -class Disk(ConfigObject):
514 """Config object representing a block device."""
515 __slots__ = ["name", "dev_type", "logical_id", "physical_id",
516 "children", "iv_name", "size", "mode", "params"] + _UUID
517
521
525
527 """Test if this device needs to be opened on a secondary node."""
528 return self.dev_type in (constants.LD_LV,)
529
531 """Return the device path if this device type has a static one.
532
533 Some devices (LVM for example) live always at the same /dev/ path,
534 irrespective of their status. For such devices, we return this
535 path, for others we return None.
536
537 @warning: The path returned is not a normalized pathname; callers
538 should check that it is a valid path.
539
540 """
541 if self.dev_type == constants.LD_LV:
542 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
543 elif self.dev_type == constants.LD_BLOCKDEV:
544 return self.logical_id[1]
545 elif self.dev_type == constants.LD_RBD:
546 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
547 return None
548
550 """Compute the needed number of children for activation.
551
552 This method will return either -1 (all children) or a positive
553 number denoting the minimum number of children needed for
554 activation (only mirrored devices will usually return >=0).
555
556 Currently, only DRBD8 supports diskless activation (therefore we
557 return 0), for all other we keep the previous semantics and return
558 -1.
559
560 """
561 if self.dev_type == constants.LD_DRBD8:
562 return 0
563 return -1
564
566 """Check if the disk or its children are based on the given type.
567
568 @type dev_type: L{constants.LDS_BLOCK}
569 @param dev_type: the type to look for
570 @rtype: boolean
571 @return: boolean indicating if a device of the given type was found or not
572
573 """
574 if self.children:
575 for child in self.children:
576 if child.IsBasedOnDiskType(dev_type):
577 return True
578 return self.dev_type == dev_type
579
581 """This function returns the nodes this device lives on.
582
583 Given the node on which the parent of the device lives on (or, in
584 case of a top-level device, the primary node of the devices'
585 instance), this function will return a list of nodes on which this
586 devices needs to (or can) be assembled.
587
588 """
589 if self.dev_type in [constants.LD_LV, constants.LD_FILE,
590 constants.LD_BLOCKDEV, constants.LD_RBD,
591 constants.LD_EXT]:
592 result = [node]
593 elif self.dev_type in constants.LDS_DRBD:
594 result = [self.logical_id[0], self.logical_id[1]]
595 if node not in result:
596 raise errors.ConfigurationError("DRBD device passed unknown node")
597 else:
598 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
599 return result
600
602 """Compute the node/disk tree for this disk and its children.
603
604 This method, given the node on which the parent disk lives, will
605 return the list of all (node, disk) pairs which describe the disk
606 tree in the most compact way. For example, a drbd/lvm stack
607 will be returned as (primary_node, drbd) and (secondary_node, drbd)
608 which represents all the top-level devices on the nodes.
609
610 """
611 my_nodes = self.GetNodes(parent_node)
612 result = [(node, self) for node in my_nodes]
613 if not self.children:
614
615 return result
616 for node in my_nodes:
617 for child in self.children:
618 child_result = child.ComputeNodeTree(node)
619 if len(child_result) == 1:
620
621
622
623 continue
624 else:
625
626
627
628 for subnode, subdisk in child_result:
629 if subnode not in my_nodes:
630 result.append((subnode, subdisk))
631
632
633
634 return result
635
637 """Compute the per-VG growth requirements.
638
639 This only works for VG-based disks.
640
641 @type amount: integer
642 @param amount: the desired increase in (user-visible) disk space
643 @rtype: dict
644 @return: a dictionary of volume-groups and the required size
645
646 """
647 if self.dev_type == constants.LD_LV:
648 return {self.logical_id[0]: amount}
649 elif self.dev_type == constants.LD_DRBD8:
650 if self.children:
651 return self.children[0].ComputeGrowth(amount)
652 else:
653 return {}
654 else:
655
656 return {}
657
659 """Update the size of this disk after growth.
660
661 This method recurses over the disks's children and updates their
662 size correspondigly. The method needs to be kept in sync with the
663 actual algorithms from bdev.
664
665 """
666 if self.dev_type in (constants.LD_LV, constants.LD_FILE,
667 constants.LD_RBD, constants.LD_EXT):
668 self.size += amount
669 elif self.dev_type == constants.LD_DRBD8:
670 if self.children:
671 self.children[0].RecordGrow(amount)
672 self.size += amount
673 else:
674 raise errors.ProgrammerError("Disk.RecordGrow called for unsupported"
675 " disk type %s" % self.dev_type)
676
677 - def Update(self, size=None, mode=None):
678 """Apply changes to size and mode.
679
680 """
681 if self.dev_type == constants.LD_DRBD8:
682 if self.children:
683 self.children[0].Update(size=size, mode=mode)
684 else:
685 assert not self.children
686
687 if size is not None:
688 self.size = size
689 if mode is not None:
690 self.mode = mode
691
693 """Sets recursively the size to zero for the disk and its children.
694
695 """
696 if self.children:
697 for child in self.children:
698 child.UnsetSize()
699 self.size = 0
700
702 """Convert the logical ID to the physical ID.
703
704 This is used only for drbd, which needs ip/port configuration.
705
706 The routine descends down and updates its children also, because
707 this helps when the only the top device is passed to the remote
708 node.
709
710 Arguments:
711 - target_node: the node we wish to configure for
712 - nodes_ip: a mapping of node name to ip
713
714 The target_node must exist in in nodes_ip, and must be one of the
715 nodes in the logical ID for each of the DRBD devices encountered
716 in the disk tree.
717
718 """
719 if self.children:
720 for child in self.children:
721 child.SetPhysicalID(target_node, nodes_ip)
722
723 if self.logical_id is None and self.physical_id is not None:
724 return
725 if self.dev_type in constants.LDS_DRBD:
726 pnode, snode, port, pminor, sminor, secret = self.logical_id
727 if target_node not in (pnode, snode):
728 raise errors.ConfigurationError("DRBD device not knowing node %s" %
729 target_node)
730 pnode_ip = nodes_ip.get(pnode, None)
731 snode_ip = nodes_ip.get(snode, None)
732 if pnode_ip is None or snode_ip is None:
733 raise errors.ConfigurationError("Can't find primary or secondary node"
734 " for %s" % str(self))
735 p_data = (pnode_ip, port)
736 s_data = (snode_ip, port)
737 if pnode == target_node:
738 self.physical_id = p_data + s_data + (pminor, secret)
739 else:
740 self.physical_id = s_data + p_data + (sminor, secret)
741 else:
742 self.physical_id = self.logical_id
743 return
744
746 """Disk-specific conversion to standard python types.
747
748 This replaces the children lists of objects with lists of
749 standard python types.
750
751 """
752 bo = super(Disk, self).ToDict()
753
754 for attr in ("children",):
755 alist = bo.get(attr, None)
756 if alist:
757 bo[attr] = outils.ContainerToDicts(alist)
758 return bo
759
760 @classmethod
762 """Custom function for Disks
763
764 """
765 obj = super(Disk, cls).FromDict(val)
766 if obj.children:
767 obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
768 if obj.logical_id and isinstance(obj.logical_id, list):
769 obj.logical_id = tuple(obj.logical_id)
770 if obj.physical_id and isinstance(obj.physical_id, list):
771 obj.physical_id = tuple(obj.physical_id)
772 if obj.dev_type in constants.LDS_DRBD:
773
774 if len(obj.logical_id) < 6:
775 obj.logical_id += (None,) * (6 - len(obj.logical_id))
776 return obj
777
779 """Custom str() formatter for disks.
780
781 """
782 if self.dev_type == constants.LD_LV:
783 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
784 elif self.dev_type in constants.LDS_DRBD:
785 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
786 val = "<DRBD8("
787 if self.physical_id is None:
788 phy = "unconfigured"
789 else:
790 phy = ("configured as %s:%s %s:%s" %
791 (self.physical_id[0], self.physical_id[1],
792 self.physical_id[2], self.physical_id[3]))
793
794 val += ("hosts=%s/%d-%s/%d, port=%s, %s, " %
795 (node_a, minor_a, node_b, minor_b, port, phy))
796 if self.children and self.children.count(None) == 0:
797 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
798 else:
799 val += "no local storage"
800 else:
801 val = ("<Disk(type=%s, logical_id=%s, physical_id=%s, children=%s" %
802 (self.dev_type, self.logical_id, self.physical_id, self.children))
803 if self.iv_name is None:
804 val += ", not visible"
805 else:
806 val += ", visible as /dev/%s" % self.iv_name
807 if isinstance(self.size, int):
808 val += ", size=%dm)>" % self.size
809 else:
810 val += ", size='%s')>" % (self.size,)
811 return val
812
814 """Checks that this disk is correctly configured.
815
816 """
817 all_errors = []
818 if self.mode not in constants.DISK_ACCESS_SET:
819 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
820 return all_errors
821
823 """Fill defaults for missing configuration values.
824
825 """
826 if self.children:
827 for child in self.children:
828 child.UpgradeConfig()
829
830
831
832
833
834 if not self.params or not isinstance(self.params, dict):
835 self.params = {}
836
837
838
839
840 if self.dev_type == constants.LD_FILE and self.physical_id[0] is None:
841 self.physical_id[0] = constants.FD_DEFAULT
842
843 @staticmethod
845 """Computes Logical Disk parameters from Disk Template parameters.
846
847 @type disk_template: string
848 @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
849 @type disk_params: dict
850 @param disk_params: disk template parameters;
851 dict(template_name -> parameters
852 @rtype: list(dict)
853 @return: a list of dicts, one for each node of the disk hierarchy. Each dict
854 contains the LD parameters of the node. The tree is flattened in-order.
855
856 """
857 if disk_template not in constants.DISK_TEMPLATES:
858 raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
859
860 assert disk_template in disk_params
861
862 result = list()
863 dt_params = disk_params[disk_template]
864 if disk_template == constants.DT_DRBD8:
865 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_DRBD8], {
866 constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
867 constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
868 constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
869 constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
870 constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
871 constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
872 constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
873 constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
874 constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
875 constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
876 constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
877 constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
878 }))
879
880
881 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
882 constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
883 }))
884
885
886 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
887 constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
888 }))
889
890 elif disk_template in (constants.DT_FILE, constants.DT_SHARED_FILE):
891 result.append(constants.DISK_LD_DEFAULTS[constants.LD_FILE])
892
893 elif disk_template == constants.DT_PLAIN:
894 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_LV], {
895 constants.LDP_STRIPES: dt_params[constants.LV_STRIPES],
896 }))
897
898 elif disk_template == constants.DT_BLOCK:
899 result.append(constants.DISK_LD_DEFAULTS[constants.LD_BLOCKDEV])
900
901 elif disk_template == constants.DT_RBD:
902 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.LD_RBD], {
903 constants.LDP_POOL: dt_params[constants.RBD_POOL],
904 }))
905
906 elif disk_template == constants.DT_EXT:
907 result.append(constants.DISK_LD_DEFAULTS[constants.LD_EXT])
908
909 return result
910
913 """Config object representing instance policy limits dictionary.
914
915 Note that this object is not actually used in the config, it's just
916 used as a placeholder for a few functions.
917
918 """
919 @classmethod
940
941 @classmethod
948
949 @classmethod
990
991 @classmethod
993 """Check the instance policy specs for validity on a given key.
994
995 We check if the instance specs makes sense for a given key, that is
996 if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
997
998 @type minmaxspecs: dict
999 @param minmaxspecs: dictionary with min and max instance spec
1000 @type stdspec: dict
1001 @param stdspec: dictionary with standard instance spec
1002 @type name: string
1003 @param name: what are the limits for
1004 @type check_std: bool
1005 @param check_std: Whether to check std value or just assume compliance
1006 @rtype: bool
1007 @return: C{True} when specs are valid, C{False} when standard spec for the
1008 given name is not valid
1009 @raise errors.ConfigurationError: when min/max specs for the given name
1010 are not valid
1011
1012 """
1013 minspec = minmaxspecs[constants.ISPECS_MIN]
1014 maxspec = minmaxspecs[constants.ISPECS_MAX]
1015 min_v = minspec[name]
1016 max_v = maxspec[name]
1017
1018 if min_v > max_v:
1019 err = ("Invalid specification of min/max values for %s: %s/%s" %
1020 (name, min_v, max_v))
1021 raise errors.ConfigurationError(err)
1022 elif check_std:
1023 std_v = stdspec.get(name, min_v)
1024 return std_v >= min_v and std_v <= max_v
1025 else:
1026 return True
1027
1028 @classmethod
1030 """Checks the disk templates for validity.
1031
1032 """
1033 if not disk_templates:
1034 raise errors.ConfigurationError("Instance policy must contain" +
1035 " at least one disk template")
1036 wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1037 if wrong:
1038 raise errors.ConfigurationError("Invalid disk template(s) %s" %
1039 utils.CommaJoin(wrong))
1040
1041 @classmethod
1043 """Checks a parameter.
1044
1045 Currently we expect all parameters to be float values.
1046
1047 """
1048 try:
1049 float(value)
1050 except (TypeError, ValueError), err:
1051 raise errors.ConfigurationError("Invalid value for key" " '%s':"
1052 " '%s', error: %s" % (key, value, err))
1053
1056 """Config object representing an instance."""
1057 __slots__ = [
1058 "name",
1059 "primary_node",
1060 "os",
1061 "hypervisor",
1062 "hvparams",
1063 "beparams",
1064 "osparams",
1065 "admin_state",
1066 "nics",
1067 "disks",
1068 "disk_template",
1069 "disks_active",
1070 "network_port",
1071 "serial_no",
1072 ] + _TIMESTAMPS + _UUID
1073
1075 """Compute the list of secondary nodes.
1076
1077 This is a simple wrapper over _ComputeAllNodes.
1078
1079 """
1080 all_nodes = set(self._ComputeAllNodes())
1081 all_nodes.discard(self.primary_node)
1082 return tuple(all_nodes)
1083
1084 secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1085 "List of names of secondary nodes")
1086
1088 """Compute the list of all nodes.
1089
1090 Since the data is already there (in the drbd disks), keeping it as
1091 a separate normal attribute is redundant and if not properly
1092 synchronised can cause problems. Thus it's better to compute it
1093 dynamically.
1094
1095 """
1096 def _Helper(nodes, device):
1097 """Recursively computes nodes given a top device."""
1098 if device.dev_type in constants.LDS_DRBD:
1099 nodea, nodeb = device.logical_id[:2]
1100 nodes.add(nodea)
1101 nodes.add(nodeb)
1102 if device.children:
1103 for child in device.children:
1104 _Helper(nodes, child)
1105
1106 all_nodes = set()
1107 all_nodes.add(self.primary_node)
1108 for device in self.disks:
1109 _Helper(all_nodes, device)
1110 return tuple(all_nodes)
1111
1112 all_nodes = property(_ComputeAllNodes, None, None,
1113 "List of names of all the nodes of the instance")
1114
1116 """Provide a mapping of nodes to LVs this instance owns.
1117
1118 This function figures out what logical volumes should belong on
1119 which nodes, recursing through a device tree.
1120
1121 @param lvmap: optional dictionary to receive the
1122 'node' : ['lv', ...] data.
1123
1124 @return: None if lvmap arg is given, otherwise, a dictionary of
1125 the form { 'nodename' : ['volume1', 'volume2', ...], ... };
1126 volumeN is of the form "vg_name/lv_name", compatible with
1127 GetVolumeList()
1128
1129 """
1130 if node is None:
1131 node = self.primary_node
1132
1133 if lvmap is None:
1134 lvmap = {
1135 node: [],
1136 }
1137 ret = lvmap
1138 else:
1139 if not node in lvmap:
1140 lvmap[node] = []
1141 ret = None
1142
1143 if not devs:
1144 devs = self.disks
1145
1146 for dev in devs:
1147 if dev.dev_type == constants.LD_LV:
1148 lvmap[node].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1149
1150 elif dev.dev_type in constants.LDS_DRBD:
1151 if dev.children:
1152 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1153 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1154
1155 elif dev.children:
1156 self.MapLVsByNode(lvmap, dev.children, node)
1157
1158 return ret
1159
1161 """Find a disk given having a specified index.
1162
1163 This is just a wrapper that does validation of the index.
1164
1165 @type idx: int
1166 @param idx: the disk index
1167 @rtype: L{Disk}
1168 @return: the corresponding disk
1169 @raise errors.OpPrereqError: when the given index is not valid
1170
1171 """
1172 try:
1173 idx = int(idx)
1174 return self.disks[idx]
1175 except (TypeError, ValueError), err:
1176 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1177 errors.ECODE_INVAL)
1178 except IndexError:
1179 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1180 " 0 to %d" % (idx, len(self.disks) - 1),
1181 errors.ECODE_INVAL)
1182
1184 """Instance-specific conversion to standard python types.
1185
1186 This replaces the children lists of objects with lists of standard
1187 python types.
1188
1189 """
1190 bo = super(Instance, self).ToDict()
1191
1192 for attr in "nics", "disks":
1193 alist = bo.get(attr, None)
1194 if alist:
1195 nlist = outils.ContainerToDicts(alist)
1196 else:
1197 nlist = []
1198 bo[attr] = nlist
1199 return bo
1200
1201 @classmethod
1217
1219 """Fill defaults for missing configuration values.
1220
1221 """
1222 for nic in self.nics:
1223 nic.UpgradeConfig()
1224 for disk in self.disks:
1225 disk.UpgradeConfig()
1226 if self.hvparams:
1227 for key in constants.HVC_GLOBALS:
1228 try:
1229 del self.hvparams[key]
1230 except KeyError:
1231 pass
1232 if self.osparams is None:
1233 self.osparams = {}
1234 UpgradeBeParams(self.beparams)
1235 if self.disks_active is None:
1236 self.disks_active = self.admin_state == constants.ADMINST_UP
1237
1238
1239 -class OS(ConfigObject):
1240 """Config object representing an operating system.
1241
1242 @type supported_parameters: list
1243 @ivar supported_parameters: a list of tuples, name and description,
1244 containing the supported parameters by this OS
1245
1246 @type VARIANT_DELIM: string
1247 @cvar VARIANT_DELIM: the variant delimiter
1248
1249 """
1250 __slots__ = [
1251 "name",
1252 "path",
1253 "api_versions",
1254 "create_script",
1255 "export_script",
1256 "import_script",
1257 "rename_script",
1258 "verify_script",
1259 "supported_variants",
1260 "supported_parameters",
1261 ]
1262
1263 VARIANT_DELIM = "+"
1264
1265 @classmethod
1267 """Splits the name into the proper name and variant.
1268
1269 @param name: the OS (unprocessed) name
1270 @rtype: list
1271 @return: a list of two elements; if the original name didn't
1272 contain a variant, it's returned as an empty string
1273
1274 """
1275 nv = name.split(cls.VARIANT_DELIM, 1)
1276 if len(nv) == 1:
1277 nv.append("")
1278 return nv
1279
1280 @classmethod
1282 """Returns the proper name of the os (without the variant).
1283
1284 @param name: the OS (unprocessed) name
1285
1286 """
1287 return cls.SplitNameVariant(name)[0]
1288
1289 @classmethod
1291 """Returns the variant the os (without the base name).
1292
1293 @param name: the OS (unprocessed) name
1294
1295 """
1296 return cls.SplitNameVariant(name)[1]
1297
1300 """Config object representing an External Storage Provider.
1301
1302 """
1303 __slots__ = [
1304 "name",
1305 "path",
1306 "create_script",
1307 "remove_script",
1308 "grow_script",
1309 "attach_script",
1310 "detach_script",
1311 "setinfo_script",
1312 "verify_script",
1313 "supported_parameters",
1314 ]
1315
1318 """Hypvervisor state on a node.
1319
1320 @ivar mem_total: Total amount of memory
1321 @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1322 available)
1323 @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1324 rounding
1325 @ivar mem_inst: Memory used by instances living on node
1326 @ivar cpu_total: Total node CPU core count
1327 @ivar cpu_node: Number of CPU cores reserved for the node itself
1328
1329 """
1330 __slots__ = [
1331 "mem_total",
1332 "mem_node",
1333 "mem_hv",
1334 "mem_inst",
1335 "cpu_total",
1336 "cpu_node",
1337 ] + _TIMESTAMPS
1338
1341 """Disk state on a node.
1342
1343 """
1344 __slots__ = [
1345 "total",
1346 "reserved",
1347 "overhead",
1348 ] + _TIMESTAMPS
1349
1350
1351 -class Node(TaggableObject):
1352 """Config object representing a node.
1353
1354 @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1355 @ivar hv_state_static: Hypervisor state overriden by user
1356 @ivar disk_state: Disk state (e.g. free space)
1357 @ivar disk_state_static: Disk state overriden by user
1358
1359 """
1360 __slots__ = [
1361 "name",
1362 "primary_ip",
1363 "secondary_ip",
1364 "serial_no",
1365 "master_candidate",
1366 "offline",
1367 "drained",
1368 "group",
1369 "master_capable",
1370 "vm_capable",
1371 "ndparams",
1372 "powered",
1373 "hv_state",
1374 "hv_state_static",
1375 "disk_state",
1376 "disk_state_static",
1377 ] + _TIMESTAMPS + _UUID
1378
1380 """Fill defaults for missing configuration values.
1381
1382 """
1383
1384
1385 if self.master_capable is None:
1386 self.master_capable = True
1387
1388 if self.vm_capable is None:
1389 self.vm_capable = True
1390
1391 if self.ndparams is None:
1392 self.ndparams = {}
1393
1394 for key in constants.NDC_GLOBALS:
1395 if key in self.ndparams:
1396 logging.warning("Ignoring %s node parameter for node %s",
1397 key, self.name)
1398 del self.ndparams[key]
1399
1400 if self.powered is None:
1401 self.powered = True
1402
1404 """Custom function for serializing.
1405
1406 """
1407 data = super(Node, self).ToDict()
1408
1409 hv_state = data.get("hv_state", None)
1410 if hv_state is not None:
1411 data["hv_state"] = outils.ContainerToDicts(hv_state)
1412
1413 disk_state = data.get("disk_state", None)
1414 if disk_state is not None:
1415 data["disk_state"] = \
1416 dict((key, outils.ContainerToDicts(value))
1417 for (key, value) in disk_state.items())
1418
1419 return data
1420
1421 @classmethod
1423 """Custom function for deserializing.
1424
1425 """
1426 obj = super(Node, cls).FromDict(val)
1427
1428 if obj.hv_state is not None:
1429 obj.hv_state = \
1430 outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1431
1432 if obj.disk_state is not None:
1433 obj.disk_state = \
1434 dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1435 for (key, value) in obj.disk_state.items())
1436
1437 return obj
1438
1441 """Config object representing a node group."""
1442 __slots__ = [
1443 "name",
1444 "members",
1445 "ndparams",
1446 "diskparams",
1447 "ipolicy",
1448 "serial_no",
1449 "hv_state_static",
1450 "disk_state_static",
1451 "alloc_policy",
1452 "networks",
1453 ] + _TIMESTAMPS + _UUID
1454
1456 """Custom function for nodegroup.
1457
1458 This discards the members object, which gets recalculated and is only kept
1459 in memory.
1460
1461 """
1462 mydict = super(NodeGroup, self).ToDict()
1463 del mydict["members"]
1464 return mydict
1465
1466 @classmethod
1468 """Custom function for nodegroup.
1469
1470 The members slot is initialized to an empty list, upon deserialization.
1471
1472 """
1473 obj = super(NodeGroup, cls).FromDict(val)
1474 obj.members = []
1475 return obj
1476
1478 """Fill defaults for missing configuration values.
1479
1480 """
1481 if self.ndparams is None:
1482 self.ndparams = {}
1483
1484 if self.serial_no is None:
1485 self.serial_no = 1
1486
1487 if self.alloc_policy is None:
1488 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1489
1490
1491
1492 if self.mtime is None:
1493 self.mtime = time.time()
1494
1495 if self.diskparams is None:
1496 self.diskparams = {}
1497 if self.ipolicy is None:
1498 self.ipolicy = MakeEmptyIPolicy()
1499
1500 if self.networks is None:
1501 self.networks = {}
1502
1504 """Return filled out ndparams for L{objects.Node}
1505
1506 @type node: L{objects.Node}
1507 @param node: A Node object to fill
1508 @return a copy of the node's ndparams with defaults filled
1509
1510 """
1511 return self.SimpleFillND(node.ndparams)
1512
1514 """Fill a given ndparams dict with defaults.
1515
1516 @type ndparams: dict
1517 @param ndparams: the dict to fill
1518 @rtype: dict
1519 @return: a copy of the passed in ndparams with missing keys filled
1520 from the node group defaults
1521
1522 """
1523 return FillDict(self.ndparams, ndparams)
1524
1525
1526 -class Cluster(TaggableObject):
1527 """Config object representing the cluster."""
1528 __slots__ = [
1529 "serial_no",
1530 "rsahostkeypub",
1531 "dsahostkeypub",
1532 "highest_used_port",
1533 "tcpudp_port_pool",
1534 "mac_prefix",
1535 "volume_group_name",
1536 "reserved_lvs",
1537 "drbd_usermode_helper",
1538 "default_bridge",
1539 "default_hypervisor",
1540 "master_node",
1541 "master_ip",
1542 "master_netdev",
1543 "master_netmask",
1544 "use_external_mip_script",
1545 "cluster_name",
1546 "file_storage_dir",
1547 "shared_file_storage_dir",
1548 "enabled_hypervisors",
1549 "hvparams",
1550 "ipolicy",
1551 "os_hvp",
1552 "beparams",
1553 "osparams",
1554 "nicparams",
1555 "ndparams",
1556 "diskparams",
1557 "candidate_pool_size",
1558 "modify_etc_hosts",
1559 "modify_ssh_setup",
1560 "maintain_node_health",
1561 "uid_pool",
1562 "default_iallocator",
1563 "hidden_os",
1564 "blacklisted_os",
1565 "primary_ip_family",
1566 "prealloc_wipe_disks",
1567 "hv_state_static",
1568 "disk_state_static",
1569 "enabled_disk_templates",
1570 ] + _TIMESTAMPS + _UUID
1571
1573 """Fill defaults for missing configuration values.
1574
1575 """
1576
1577
1578 if self.hvparams is None:
1579 self.hvparams = constants.HVC_DEFAULTS
1580 else:
1581 for hypervisor in self.hvparams:
1582 self.hvparams[hypervisor] = FillDict(
1583 constants.HVC_DEFAULTS[hypervisor], self.hvparams[hypervisor])
1584
1585 if self.os_hvp is None:
1586 self.os_hvp = {}
1587
1588
1589 if self.osparams is None:
1590 self.osparams = {}
1591
1592 self.ndparams = UpgradeNDParams(self.ndparams)
1593
1594 self.beparams = UpgradeGroupedParams(self.beparams,
1595 constants.BEC_DEFAULTS)
1596 for beparams_group in self.beparams:
1597 UpgradeBeParams(self.beparams[beparams_group])
1598
1599 migrate_default_bridge = not self.nicparams
1600 self.nicparams = UpgradeGroupedParams(self.nicparams,
1601 constants.NICC_DEFAULTS)
1602 if migrate_default_bridge:
1603 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1604 self.default_bridge
1605
1606 if self.modify_etc_hosts is None:
1607 self.modify_etc_hosts = True
1608
1609 if self.modify_ssh_setup is None:
1610 self.modify_ssh_setup = True
1611
1612
1613
1614
1615 if self.default_bridge is not None:
1616 self.default_bridge = None
1617
1618
1619
1620 if self.default_hypervisor is not None:
1621 self.enabled_hypervisors = ([self.default_hypervisor] +
1622 [hvname for hvname in self.enabled_hypervisors
1623 if hvname != self.default_hypervisor])
1624 self.default_hypervisor = None
1625
1626
1627 if self.maintain_node_health is None:
1628 self.maintain_node_health = False
1629
1630 if self.uid_pool is None:
1631 self.uid_pool = []
1632
1633 if self.default_iallocator is None:
1634 self.default_iallocator = ""
1635
1636
1637 if self.reserved_lvs is None:
1638 self.reserved_lvs = []
1639
1640
1641 if self.hidden_os is None:
1642 self.hidden_os = []
1643
1644 if self.blacklisted_os is None:
1645 self.blacklisted_os = []
1646
1647
1648 if self.primary_ip_family is None:
1649 self.primary_ip_family = AF_INET
1650
1651 if self.master_netmask is None:
1652 ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1653 self.master_netmask = ipcls.iplen
1654
1655 if self.prealloc_wipe_disks is None:
1656 self.prealloc_wipe_disks = False
1657
1658
1659 if self.shared_file_storage_dir is None:
1660 self.shared_file_storage_dir = ""
1661
1662 if self.use_external_mip_script is None:
1663 self.use_external_mip_script = False
1664
1665 if self.diskparams:
1666 self.diskparams = UpgradeDiskParams(self.diskparams)
1667 else:
1668 self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1669
1670
1671 if self.ipolicy is None:
1672 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1673 else:
1674
1675
1676
1677 wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1678 if wrongkeys:
1679
1680 msg = ("Cluster instance policy contains spurious keys: %s" %
1681 utils.CommaJoin(wrongkeys))
1682 raise errors.ConfigurationError(msg)
1683 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1684
1685 @property
1687 """The first hypervisor is the primary.
1688
1689 Useful, for example, for L{Node}'s hv/disk state.
1690
1691 """
1692 return self.enabled_hypervisors[0]
1693
1695 """Custom function for cluster.
1696
1697 """
1698 mydict = super(Cluster, self).ToDict()
1699
1700 if self.tcpudp_port_pool is None:
1701 tcpudp_port_pool = []
1702 else:
1703 tcpudp_port_pool = list(self.tcpudp_port_pool)
1704
1705 mydict["tcpudp_port_pool"] = tcpudp_port_pool
1706
1707 return mydict
1708
1709 @classmethod
1711 """Custom function for cluster.
1712
1713 """
1714 obj = super(Cluster, cls).FromDict(val)
1715
1716 if obj.tcpudp_port_pool is None:
1717 obj.tcpudp_port_pool = set()
1718 elif not isinstance(obj.tcpudp_port_pool, set):
1719 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1720
1721 return obj
1722
1724 """Fill a given diskparams dict with cluster defaults.
1725
1726 @param diskparams: The diskparams
1727 @return: The defaults dict
1728
1729 """
1730 return FillDiskParams(self.diskparams, diskparams)
1731
1732 - def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1733 """Get the default hypervisor parameters for the cluster.
1734
1735 @param hypervisor: the hypervisor name
1736 @param os_name: if specified, we'll also update the defaults for this OS
1737 @param skip_keys: if passed, list of keys not to use
1738 @return: the defaults dict
1739
1740 """
1741 if skip_keys is None:
1742 skip_keys = []
1743
1744 fill_stack = [self.hvparams.get(hypervisor, {})]
1745 if os_name is not None:
1746 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1747 fill_stack.append(os_hvp)
1748
1749 ret_dict = {}
1750 for o_dict in fill_stack:
1751 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1752
1753 return ret_dict
1754
1755 - def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1756 """Fill a given hvparams dict with cluster defaults.
1757
1758 @type hv_name: string
1759 @param hv_name: the hypervisor to use
1760 @type os_name: string
1761 @param os_name: the OS to use for overriding the hypervisor defaults
1762 @type skip_globals: boolean
1763 @param skip_globals: if True, the global hypervisor parameters will
1764 not be filled
1765 @rtype: dict
1766 @return: a copy of the given hvparams with missing keys filled from
1767 the cluster defaults
1768
1769 """
1770 if skip_globals:
1771 skip_keys = constants.HVC_GLOBALS
1772 else:
1773 skip_keys = []
1774
1775 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1776 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1777
1778 - def FillHV(self, instance, skip_globals=False):
1779 """Fill an instance's hvparams dict with cluster defaults.
1780
1781 @type instance: L{objects.Instance}
1782 @param instance: the instance parameter to fill
1783 @type skip_globals: boolean
1784 @param skip_globals: if True, the global hypervisor parameters will
1785 not be filled
1786 @rtype: dict
1787 @return: a copy of the instance's hvparams with missing keys filled from
1788 the cluster defaults
1789
1790 """
1791 return self.SimpleFillHV(instance.hypervisor, instance.os,
1792 instance.hvparams, skip_globals)
1793
1795 """Fill a given beparams dict with cluster defaults.
1796
1797 @type beparams: dict
1798 @param beparams: the dict to fill
1799 @rtype: dict
1800 @return: a copy of the passed in beparams with missing keys filled
1801 from the cluster defaults
1802
1803 """
1804 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1805
1807 """Fill an instance's beparams dict with cluster defaults.
1808
1809 @type instance: L{objects.Instance}
1810 @param instance: the instance parameter to fill
1811 @rtype: dict
1812 @return: a copy of the instance's beparams with missing keys filled from
1813 the cluster defaults
1814
1815 """
1816 return self.SimpleFillBE(instance.beparams)
1817
1819 """Fill a given nicparams dict with cluster defaults.
1820
1821 @type nicparams: dict
1822 @param nicparams: the dict to fill
1823 @rtype: dict
1824 @return: a copy of the passed in nicparams with missing keys filled
1825 from the cluster defaults
1826
1827 """
1828 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1829
1831 """Fill an instance's osparams dict with cluster defaults.
1832
1833 @type os_name: string
1834 @param os_name: the OS name to use
1835 @type os_params: dict
1836 @param os_params: the dict to fill with default values
1837 @rtype: dict
1838 @return: a copy of the instance's osparams with missing keys filled from
1839 the cluster defaults
1840
1841 """
1842 name_only = os_name.split("+", 1)[0]
1843
1844 result = self.osparams.get(name_only, {})
1845
1846 result = FillDict(result, self.osparams.get(os_name, {}))
1847
1848 return FillDict(result, os_params)
1849
1850 @staticmethod
1856
1857 @staticmethod
1863
1864 - def FillND(self, node, nodegroup):
1865 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1866
1867 @type node: L{objects.Node}
1868 @param node: A Node object to fill
1869 @type nodegroup: L{objects.NodeGroup}
1870 @param nodegroup: A Node object to fill
1871 @return a copy of the node's ndparams with defaults filled
1872
1873 """
1874 return self.SimpleFillND(nodegroup.FillND(node))
1875
1877 """Fill a given ndparams dict with defaults.
1878
1879 @type ndparams: dict
1880 @param ndparams: the dict to fill
1881 @rtype: dict
1882 @return: a copy of the passed in ndparams with missing keys filled
1883 from the cluster defaults
1884
1885 """
1886 return FillDict(self.ndparams, ndparams)
1887
1889 """ Fill instance policy dict with defaults.
1890
1891 @type ipolicy: dict
1892 @param ipolicy: the dict to fill
1893 @rtype: dict
1894 @return: a copy of passed ipolicy with missing keys filled from
1895 the cluster defaults
1896
1897 """
1898 return FillIPolicy(self.ipolicy, ipolicy)
1899
1902 """Config object representing the status of a block device."""
1903 __slots__ = [
1904 "dev_path",
1905 "major",
1906 "minor",
1907 "sync_percent",
1908 "estimated_time",
1909 "is_degraded",
1910 "ldisk_status",
1911 ]
1912
1915 """Config object representing the status of an import or export."""
1916 __slots__ = [
1917 "recent_output",
1918 "listen_port",
1919 "connected",
1920 "progress_mbytes",
1921 "progress_throughput",
1922 "progress_eta",
1923 "progress_percent",
1924 "exit_status",
1925 "error_message",
1926 ] + _TIMESTAMPS
1927
1930 """Options for import/export daemon
1931
1932 @ivar key_name: X509 key name (None for cluster certificate)
1933 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
1934 @ivar compress: Compression method (one of L{constants.IEC_ALL})
1935 @ivar magic: Used to ensure the connection goes to the right disk
1936 @ivar ipv6: Whether to use IPv6
1937 @ivar connect_timeout: Number of seconds for establishing connection
1938
1939 """
1940 __slots__ = [
1941 "key_name",
1942 "ca_pem",
1943 "compress",
1944 "magic",
1945 "ipv6",
1946 "connect_timeout",
1947 ]
1948
1951 """Object holding a confd request.
1952
1953 @ivar protocol: confd protocol version
1954 @ivar type: confd query type
1955 @ivar query: query request
1956 @ivar rsalt: requested reply salt
1957
1958 """
1959 __slots__ = [
1960 "protocol",
1961 "type",
1962 "query",
1963 "rsalt",
1964 ]
1965
1968 """Object holding a confd reply.
1969
1970 @ivar protocol: confd protocol version
1971 @ivar status: reply status code (ok, error)
1972 @ivar answer: confd query reply
1973 @ivar serial: configuration serial number
1974
1975 """
1976 __slots__ = [
1977 "protocol",
1978 "status",
1979 "answer",
1980 "serial",
1981 ]
1982
1985 """Object holding a query field definition.
1986
1987 @ivar name: Field name
1988 @ivar title: Human-readable title
1989 @ivar kind: Field type
1990 @ivar doc: Human-readable description
1991
1992 """
1993 __slots__ = [
1994 "name",
1995 "title",
1996 "kind",
1997 "doc",
1998 ]
1999
2002 __slots__ = [
2003 "fields",
2004 ]
2005
2013
2014 @classmethod
2023
2026 """Object holding the response to a query.
2027
2028 @ivar fields: List of L{QueryFieldDefinition} objects
2029 @ivar data: Requested data
2030
2031 """
2032 __slots__ = [
2033 "data",
2034 ]
2035
2038 """Object holding a request for querying available fields.
2039
2040 """
2041 __slots__ = [
2042 "what",
2043 "fields",
2044 ]
2045
2048 """Object holding the response to a query for fields.
2049
2050 @ivar fields: List of L{QueryFieldDefinition} objects
2051
2052 """
2053 __slots__ = []
2054
2057 """Object holding the status of a migration.
2058
2059 """
2060 __slots__ = [
2061 "status",
2062 "transferred_ram",
2063 "total_ram",
2064 ]
2065
2068 """Object describing how to access the console of an instance.
2069
2070 """
2071 __slots__ = [
2072 "instance",
2073 "kind",
2074 "message",
2075 "host",
2076 "port",
2077 "user",
2078 "command",
2079 "display",
2080 ]
2081
2083 """Validates contents of this object.
2084
2085 """
2086 assert self.kind in constants.CONS_ALL, "Unknown console type"
2087 assert self.instance, "Missing instance name"
2088 assert self.message or self.kind in [constants.CONS_SSH,
2089 constants.CONS_SPICE,
2090 constants.CONS_VNC]
2091 assert self.host or self.kind == constants.CONS_MESSAGE
2092 assert self.port or self.kind in [constants.CONS_MESSAGE,
2093 constants.CONS_SSH]
2094 assert self.user or self.kind in [constants.CONS_MESSAGE,
2095 constants.CONS_SPICE,
2096 constants.CONS_VNC]
2097 assert self.command or self.kind in [constants.CONS_MESSAGE,
2098 constants.CONS_SPICE,
2099 constants.CONS_VNC]
2100 assert self.display or self.kind in [constants.CONS_MESSAGE,
2101 constants.CONS_SPICE,
2102 constants.CONS_SSH]
2103 return True
2104
2105
2106 -class Network(TaggableObject):
2107 """Object representing a network definition for ganeti.
2108
2109 """
2110 __slots__ = [
2111 "name",
2112 "serial_no",
2113 "mac_prefix",
2114 "network",
2115 "network6",
2116 "gateway",
2117 "gateway6",
2118 "reservations",
2119 "ext_reservations",
2120 ] + _TIMESTAMPS + _UUID
2121
2123 """Export a dictionary used by hooks with a network's information.
2124
2125 @type prefix: String
2126 @param prefix: Prefix to prepend to the dict entries
2127
2128 """
2129 result = {
2130 "%sNETWORK_NAME" % prefix: self.name,
2131 "%sNETWORK_UUID" % prefix: self.uuid,
2132 "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2133 }
2134 if self.network:
2135 result["%sNETWORK_SUBNET" % prefix] = self.network
2136 if self.gateway:
2137 result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2138 if self.network6:
2139 result["%sNETWORK_SUBNET6" % prefix] = self.network6
2140 if self.gateway6:
2141 result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2142 if self.mac_prefix:
2143 result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2144
2145 return result
2146
2147 @classmethod
2149 """Custom function for networks.
2150
2151 Remove deprecated network_type and family.
2152
2153 """
2154 if "network_type" in val:
2155 del val["network_type"]
2156 if "family" in val:
2157 del val["family"]
2158 obj = super(Network, cls).FromDict(val)
2159 return obj
2160
2164 """Simple wrapper over ConfigParse that allows serialization.
2165
2166 This class is basically ConfigParser.SafeConfigParser with two
2167 additional methods that allow it to serialize/unserialize to/from a
2168 buffer.
2169
2170 """
2172 """Dump this instance and return the string representation."""
2173 buf = StringIO()
2174 self.write(buf)
2175 return buf.getvalue()
2176
2177 @classmethod
2179 """Load data from a string."""
2180 buf = StringIO(data)
2181 cfp = cls()
2182 cfp.readfp(buf)
2183 return cfp
2184
2185 - def get(self, section, option, **kwargs):
2186 value = None
2187 try:
2188 value = super(SerializableConfigParser, self).get(section, option,
2189 **kwargs)
2190 if value.lower() == constants.VALUE_NONE:
2191 value = None
2192 except ConfigParser.NoOptionError:
2193 r = re.compile(r"(disk|nic)\d+_name|nic\d+_(network|vlan)")
2194 match = r.match(option)
2195 if match:
2196 pass
2197 else:
2198 raise
2199
2200 return value
2201
2204 """Information about an LVM physical volume (PV).
2205
2206 @type name: string
2207 @ivar name: name of the PV
2208 @type vg_name: string
2209 @ivar vg_name: name of the volume group containing the PV
2210 @type size: float
2211 @ivar size: size of the PV in MiB
2212 @type free: float
2213 @ivar free: free space in the PV, in MiB
2214 @type attributes: string
2215 @ivar attributes: PV attributes
2216 @type lv_list: list of strings
2217 @ivar lv_list: names of the LVs hosted on the PV
2218 """
2219 __slots__ = [
2220 "name",
2221 "vg_name",
2222 "size",
2223 "free",
2224 "attributes",
2225 "lv_list"
2226 ]
2227
2229 """Is this PV empty?
2230
2231 """
2232 return self.size <= (self.free + 1)
2233
2235 """Is this PV allocatable?
2236
2237 """
2238 return ("a" in self.attributes)
2239