1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Transportable objects for Ganeti.
32
33 This module provides small, mostly data-only objects which are safe to
34 pass to and from external parties.
35
36 """
37
38
39
40
41
42
43
44
45
46
47 import ConfigParser
48 import re
49 import copy
50 import logging
51 import time
52 from cStringIO import StringIO
53
54 from ganeti import errors
55 from ganeti import constants
56 from ganeti import netutils
57 from ganeti import outils
58 from ganeti import utils
59
60 from socket import AF_INET
61
62
63 __all__ = ["ConfigObject", "ConfigData", "NIC", "Disk", "Instance",
64 "OS", "Node", "NodeGroup", "Cluster", "FillDict", "Network"]
65
66 _TIMESTAMPS = ["ctime", "mtime"]
67 _UUID = ["uuid"]
68
69
70 -def FillDict(defaults_dict, custom_dict, skip_keys=None):
71 """Basic function to apply settings on top a default dict.
72
73 @type defaults_dict: dict
74 @param defaults_dict: dictionary holding the default values
75 @type custom_dict: dict
76 @param custom_dict: dictionary holding customized value
77 @type skip_keys: list
78 @param skip_keys: which keys not to fill
79 @rtype: dict
80 @return: dict with the 'full' values
81
82 """
83 ret_dict = copy.deepcopy(defaults_dict)
84 ret_dict.update(custom_dict)
85 if skip_keys:
86 for k in skip_keys:
87 if k in ret_dict:
88 del ret_dict[k]
89 return ret_dict
90
93 """Fills an instance policy with defaults.
94
95 """
96 assert frozenset(default_ipolicy.keys()) == constants.IPOLICY_ALL_KEYS
97 ret_dict = copy.deepcopy(custom_ipolicy)
98 for key in default_ipolicy:
99 if key not in ret_dict:
100 ret_dict[key] = copy.deepcopy(default_ipolicy[key])
101 elif key == constants.ISPECS_STD:
102 ret_dict[key] = FillDict(default_ipolicy[key], ret_dict[key])
103 return ret_dict
104
105
106 -def FillDiskParams(default_dparams, custom_dparams, skip_keys=None):
107 """Fills the disk parameter defaults.
108
109 @see: L{FillDict} for parameters and return value
110
111 """
112 assert frozenset(default_dparams.keys()) == constants.DISK_TEMPLATES
113
114 return dict((dt, FillDict(default_dparams[dt], custom_dparams.get(dt, {}),
115 skip_keys=skip_keys))
116 for dt in constants.DISK_TEMPLATES)
117
120 """Update all groups for the target parameter.
121
122 @type target: dict of dicts
123 @param target: {group: {parameter: value}}
124 @type defaults: dict
125 @param defaults: default parameter values
126
127 """
128 if target is None:
129 target = {constants.PP_DEFAULT: defaults}
130 else:
131 for group in target:
132 target[group] = FillDict(defaults, target[group])
133 return target
134
148
151 """Upgrade the disk parameters.
152
153 @type diskparams: dict
154 @param diskparams: disk parameters to upgrade
155 @rtype: dict
156 @return: the upgraded disk parameters dict
157
158 """
159 if not diskparams:
160 result = {}
161 else:
162 result = FillDiskParams(constants.DISK_DT_DEFAULTS, diskparams)
163
164 return result
165
184
187 """Create empty IPolicy dictionary.
188
189 """
190 return {}
191
194 """A generic config object.
195
196 It has the following properties:
197
198 - provides somewhat safe recursive unpickling and pickling for its classes
199 - unset attributes which are defined in slots are always returned
200 as None instead of raising an error
201
202 Classes derived from this must always declare __slots__ (we use many
203 config objects and the memory reduction is useful)
204
205 """
206 __slots__ = []
207
209 if name not in self.GetAllSlots():
210 raise AttributeError("Invalid object attribute %s.%s" %
211 (type(self).__name__, name))
212 return None
213
219
221 """Validates the slots.
222
223 This method returns L{None} if the validation succeeds, or raises
224 an exception otherwise.
225
226 This method must be implemented by the child classes.
227
228 @rtype: NoneType
229 @return: L{None}, if the validation succeeds
230
231 @raise Exception: validation fails
232
233 """
234
236 """Convert to a dict holding only standard python types.
237
238 The generic routine just dumps all of this object's attributes in
239 a dict. It does not work if the class has children who are
240 ConfigObjects themselves (e.g. the nics list in an Instance), in
241 which case the object should subclass the function in order to
242 make sure all objects returned are only standard python types.
243
244 """
245 result = {}
246 for name in self.GetAllSlots():
247 value = getattr(self, name, None)
248 if value is not None:
249 result[name] = value
250 return result
251
252 __getstate__ = ToDict
253
254 @classmethod
256 """Create an object from a dictionary.
257
258 This generic routine takes a dict, instantiates a new instance of
259 the given class, and sets attributes based on the dict content.
260
261 As for `ToDict`, this does not work if the class has children
262 who are ConfigObjects themselves (e.g. the nics list in an
263 Instance), in which case the object should subclass the function
264 and alter the objects.
265
266 """
267 if not isinstance(val, dict):
268 raise errors.ConfigurationError("Invalid object passed to FromDict:"
269 " expected dict, got %s" % type(val))
270 val_str = dict([(str(k), v) for k, v in val.iteritems()])
271 obj = cls(**val_str)
272 return obj
273
275 """Makes a deep copy of the current object and its children.
276
277 """
278 dict_form = self.ToDict()
279 clone_obj = self.__class__.FromDict(dict_form)
280 return clone_obj
281
283 """Implement __repr__ for ConfigObjects."""
284 return repr(self.ToDict())
285
287 """Implement __eq__ for ConfigObjects."""
288 return isinstance(other, self.__class__) and self.ToDict() == other.ToDict()
289
291 """Fill defaults for missing configuration values.
292
293 This method will be called at configuration load time, and its
294 implementation will be object dependent.
295
296 """
297 pass
298
301 """An generic class supporting tags.
302
303 """
304 __slots__ = ["tags"]
305 VALID_TAG_RE = re.compile(r"^[\w.+*/:@-]+$")
306
307 @classmethod
309 """Check if a tag is valid.
310
311 If the tag is invalid, an errors.TagError will be raised. The
312 function has no return value.
313
314 """
315 if not isinstance(tag, basestring):
316 raise errors.TagError("Invalid tag type (not a string)")
317 if len(tag) > constants.MAX_TAG_LEN:
318 raise errors.TagError("Tag too long (>%d characters)" %
319 constants.MAX_TAG_LEN)
320 if not tag:
321 raise errors.TagError("Tags cannot be empty")
322 if not cls.VALID_TAG_RE.match(tag):
323 raise errors.TagError("Tag contains invalid characters")
324
333
343
354
356 """Taggable-object-specific conversion to standard python types.
357
358 This replaces the tags set with a list.
359
360 """
361 bo = super(TaggableObject, self).ToDict()
362
363 tags = bo.get("tags", None)
364 if isinstance(tags, set):
365 bo["tags"] = list(tags)
366 return bo
367
368 @classmethod
370 """Custom function for instances.
371
372 """
373 obj = super(TaggableObject, cls).FromDict(val)
374 if hasattr(obj, "tags") and isinstance(obj.tags, list):
375 obj.tags = set(obj.tags)
376 return obj
377
380 """Network configuration parameters for the master
381
382 @ivar uuid: master nodes UUID
383 @ivar ip: master IP
384 @ivar netmask: master netmask
385 @ivar netdev: master network device
386 @ivar ip_family: master IP family
387
388 """
389 __slots__ = [
390 "uuid",
391 "ip",
392 "netmask",
393 "netdev",
394 "ip_family",
395 ]
396
399 """Top-level config object."""
400 __slots__ = [
401 "version",
402 "cluster",
403 "nodes",
404 "nodegroups",
405 "instances",
406 "networks",
407 "serial_no",
408 ] + _TIMESTAMPS
409
411 """Custom function for top-level config data.
412
413 This just replaces the list of instances, nodes and the cluster
414 with standard python types.
415
416 """
417 mydict = super(ConfigData, self).ToDict()
418 mydict["cluster"] = mydict["cluster"].ToDict()
419 for key in "nodes", "instances", "nodegroups", "networks":
420 mydict[key] = outils.ContainerToDicts(mydict[key])
421
422 return mydict
423
424 @classmethod
438
440 """Check if in there is at disk of the given type in the configuration.
441
442 @type dev_type: L{constants.DTS_BLOCK}
443 @param dev_type: the type to look for
444 @rtype: boolean
445 @return: boolean indicating if a disk of the given type was found or not
446
447 """
448 for instance in self.instances.values():
449 for disk in instance.disks:
450 if disk.IsBasedOnDiskType(dev_type):
451 return True
452 return False
453
477
479 """Upgrade the cluster's enabled disk templates by inspecting the currently
480 enabled and/or used disk templates.
481
482 """
483 if not self.cluster.enabled_disk_templates:
484 template_set = \
485 set([inst.disk_template for inst in self.instances.values()])
486
487 if self.cluster.volume_group_name:
488 template_set.add(constants.DT_DRBD8)
489 template_set.add(constants.DT_PLAIN)
490
491
492
493 self.cluster.enabled_disk_templates = []
494 for preferred_template in constants.DISK_TEMPLATE_PREFERENCE:
495 if preferred_template in template_set:
496 self.cluster.enabled_disk_templates.append(preferred_template)
497 template_set.remove(preferred_template)
498 self.cluster.enabled_disk_templates.extend(list(template_set))
499 InstancePolicy.UpgradeDiskTemplates(
500 self.cluster.ipolicy, self.cluster.enabled_disk_templates)
501
502
503 -class NIC(ConfigObject):
504 """Config object representing a network card."""
505 __slots__ = ["name", "mac", "ip", "network",
506 "nicparams", "netinfo", "pci"] + _UUID
507
508 @classmethod
525
526
527 -class Disk(ConfigObject):
528 """Config object representing a block device."""
529 __slots__ = (["name", "dev_type", "logical_id", "children", "iv_name",
530 "size", "mode", "params", "spindles", "pci"] + _UUID +
531
532
533 ["dynamic_params"])
534
538
542
544 """Test if this device needs to be opened on a secondary node."""
545 return self.dev_type in (constants.DT_PLAIN,)
546
548 """Return the device path if this device type has a static one.
549
550 Some devices (LVM for example) live always at the same /dev/ path,
551 irrespective of their status. For such devices, we return this
552 path, for others we return None.
553
554 @warning: The path returned is not a normalized pathname; callers
555 should check that it is a valid path.
556
557 """
558 if self.dev_type == constants.DT_PLAIN:
559 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
560 elif self.dev_type == constants.DT_BLOCK:
561 return self.logical_id[1]
562 elif self.dev_type == constants.DT_RBD:
563 return "/dev/%s/%s" % (self.logical_id[0], self.logical_id[1])
564 return None
565
567 """Compute the needed number of children for activation.
568
569 This method will return either -1 (all children) or a positive
570 number denoting the minimum number of children needed for
571 activation (only mirrored devices will usually return >=0).
572
573 Currently, only DRBD8 supports diskless activation (therefore we
574 return 0), for all other we keep the previous semantics and return
575 -1.
576
577 """
578 if self.dev_type == constants.DT_DRBD8:
579 return 0
580 return -1
581
583 """Check if the disk or its children are based on the given type.
584
585 @type dev_type: L{constants.DTS_BLOCK}
586 @param dev_type: the type to look for
587 @rtype: boolean
588 @return: boolean indicating if a device of the given type was found or not
589
590 """
591 if self.children:
592 for child in self.children:
593 if child.IsBasedOnDiskType(dev_type):
594 return True
595 return self.dev_type == dev_type
596
598 """This function returns the nodes this device lives on.
599
600 Given the node on which the parent of the device lives on (or, in
601 case of a top-level device, the primary node of the devices'
602 instance), this function will return a list of nodes on which this
603 devices needs to (or can) be assembled.
604
605 """
606 if self.dev_type in [constants.DT_PLAIN, constants.DT_FILE,
607 constants.DT_BLOCK, constants.DT_RBD,
608 constants.DT_EXT, constants.DT_SHARED_FILE,
609 constants.DT_GLUSTER]:
610 result = [node_uuid]
611 elif self.dev_type in constants.DTS_DRBD:
612 result = [self.logical_id[0], self.logical_id[1]]
613 if node_uuid not in result:
614 raise errors.ConfigurationError("DRBD device passed unknown node")
615 else:
616 raise errors.ProgrammerError("Unhandled device type %s" % self.dev_type)
617 return result
618
620 """Compute the node/disk tree for this disk and its children.
621
622 This method, given the node on which the parent disk lives, will
623 return the list of all (node UUID, disk) pairs which describe the disk
624 tree in the most compact way. For example, a drbd/lvm stack
625 will be returned as (primary_node, drbd) and (secondary_node, drbd)
626 which represents all the top-level devices on the nodes.
627
628 """
629 my_nodes = self.GetNodes(parent_node_uuid)
630 result = [(node, self) for node in my_nodes]
631 if not self.children:
632
633 return result
634 for node in my_nodes:
635 for child in self.children:
636 child_result = child.ComputeNodeTree(node)
637 if len(child_result) == 1:
638
639
640
641 continue
642 else:
643
644
645
646 for subnode, subdisk in child_result:
647 if subnode not in my_nodes:
648 result.append((subnode, subdisk))
649
650
651
652 return result
653
655 """Compute the per-VG growth requirements.
656
657 This only works for VG-based disks.
658
659 @type amount: integer
660 @param amount: the desired increase in (user-visible) disk space
661 @rtype: dict
662 @return: a dictionary of volume-groups and the required size
663
664 """
665 if self.dev_type == constants.DT_PLAIN:
666 return {self.logical_id[0]: amount}
667 elif self.dev_type == constants.DT_DRBD8:
668 if self.children:
669 return self.children[0].ComputeGrowth(amount)
670 else:
671 return {}
672 else:
673
674 return {}
675
695
696 - def Update(self, size=None, mode=None, spindles=None):
697 """Apply changes to size, spindles and mode.
698
699 """
700 if self.dev_type == constants.DT_DRBD8:
701 if self.children:
702 self.children[0].Update(size=size, mode=mode)
703 else:
704 assert not self.children
705
706 if size is not None:
707 self.size = size
708 if mode is not None:
709 self.mode = mode
710 if spindles is not None:
711 self.spindles = spindles
712
714 """Sets recursively the size to zero for the disk and its children.
715
716 """
717 if self.children:
718 for child in self.children:
719 child.UnsetSize()
720 self.size = 0
721
723 """Updates the dynamic disk params for the given node.
724
725 This is mainly used for drbd, which needs ip/port configuration.
726
727 Arguments:
728 - target_node_uuid: the node UUID we wish to configure for
729 - nodes_ip: a mapping of node name to ip
730
731 The target_node must exist in nodes_ip, and should be one of the
732 nodes in the logical ID if this device is a DRBD device.
733
734 """
735 if self.children:
736 for child in self.children:
737 child.UpdateDynamicDiskParams(target_node_uuid, nodes_ip)
738
739 dyn_disk_params = {}
740 if self.logical_id is not None and self.dev_type in constants.DTS_DRBD:
741 pnode_uuid, snode_uuid, _, pminor, sminor, _ = self.logical_id
742 if target_node_uuid not in (pnode_uuid, snode_uuid):
743
744
745
746 self.dynamic_params = dyn_disk_params
747 return
748
749 pnode_ip = nodes_ip.get(pnode_uuid, None)
750 snode_ip = nodes_ip.get(snode_uuid, None)
751 if pnode_ip is None or snode_ip is None:
752 raise errors.ConfigurationError("Can't find primary or secondary node"
753 " for %s" % str(self))
754 if pnode_uuid == target_node_uuid:
755 dyn_disk_params[constants.DDP_LOCAL_IP] = pnode_ip
756 dyn_disk_params[constants.DDP_REMOTE_IP] = snode_ip
757 dyn_disk_params[constants.DDP_LOCAL_MINOR] = pminor
758 dyn_disk_params[constants.DDP_REMOTE_MINOR] = sminor
759 else:
760 dyn_disk_params[constants.DDP_LOCAL_IP] = snode_ip
761 dyn_disk_params[constants.DDP_REMOTE_IP] = pnode_ip
762 dyn_disk_params[constants.DDP_LOCAL_MINOR] = sminor
763 dyn_disk_params[constants.DDP_REMOTE_MINOR] = pminor
764
765 self.dynamic_params = dyn_disk_params
766
767
768 - def ToDict(self, include_dynamic_params=False):
769 """Disk-specific conversion to standard python types.
770
771 This replaces the children lists of objects with lists of
772 standard python types.
773
774 """
775 bo = super(Disk, self).ToDict()
776 if not include_dynamic_params and "dynamic_params" in bo:
777 del bo["dynamic_params"]
778
779 for attr in ("children",):
780 alist = bo.get(attr, None)
781 if alist:
782 bo[attr] = outils.ContainerToDicts(alist)
783 return bo
784
785 @classmethod
787 """Custom function for Disks
788
789 """
790 obj = super(Disk, cls).FromDict(val)
791 if obj.children:
792 obj.children = outils.ContainerFromDicts(obj.children, list, Disk)
793 if obj.logical_id and isinstance(obj.logical_id, list):
794 obj.logical_id = tuple(obj.logical_id)
795 if obj.dev_type in constants.DTS_DRBD:
796
797 if len(obj.logical_id) < 6:
798 obj.logical_id += (None,) * (6 - len(obj.logical_id))
799 return obj
800
802 """Custom str() formatter for disks.
803
804 """
805 if self.dev_type == constants.DT_PLAIN:
806 val = "<LogicalVolume(/dev/%s/%s" % self.logical_id
807 elif self.dev_type in constants.DTS_DRBD:
808 node_a, node_b, port, minor_a, minor_b = self.logical_id[:5]
809 val = "<DRBD8("
810
811 val += ("hosts=%s/%d-%s/%d, port=%s, " %
812 (node_a, minor_a, node_b, minor_b, port))
813 if self.children and self.children.count(None) == 0:
814 val += "backend=%s, metadev=%s" % (self.children[0], self.children[1])
815 else:
816 val += "no local storage"
817 else:
818 val = ("<Disk(type=%s, logical_id=%s, children=%s" %
819 (self.dev_type, self.logical_id, self.children))
820 if self.iv_name is None:
821 val += ", not visible"
822 else:
823 val += ", visible as /dev/%s" % self.iv_name
824 if self.spindles is not None:
825 val += ", spindles=%s" % self.spindles
826 if isinstance(self.size, int):
827 val += ", size=%dm)>" % self.size
828 else:
829 val += ", size='%s')>" % (self.size,)
830 return val
831
833 """Checks that this disk is correctly configured.
834
835 """
836 all_errors = []
837 if self.mode not in constants.DISK_ACCESS_SET:
838 all_errors.append("Disk access mode '%s' is invalid" % (self.mode, ))
839 return all_errors
840
842 """Fill defaults for missing configuration values.
843
844 """
845 if self.children:
846 for child in self.children:
847 child.UpgradeConfig()
848
849
850
851
852
853 if not self.params or not isinstance(self.params, dict):
854 self.params = {}
855
856
857
858
859
860 LEG_DEV_TYPE_MAP = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
861 if self.dev_type in LEG_DEV_TYPE_MAP:
862 self.dev_type = LEG_DEV_TYPE_MAP[self.dev_type]
863
864 @staticmethod
866 """Computes Logical Disk parameters from Disk Template parameters.
867
868 @type disk_template: string
869 @param disk_template: disk template, one of L{constants.DISK_TEMPLATES}
870 @type disk_params: dict
871 @param disk_params: disk template parameters;
872 dict(template_name -> parameters
873 @rtype: list(dict)
874 @return: a list of dicts, one for each node of the disk hierarchy. Each dict
875 contains the LD parameters of the node. The tree is flattened in-order.
876
877 """
878 if disk_template not in constants.DISK_TEMPLATES:
879 raise errors.ProgrammerError("Unknown disk template %s" % disk_template)
880
881 assert disk_template in disk_params
882
883 result = list()
884 dt_params = disk_params[disk_template]
885
886 if disk_template == constants.DT_DRBD8:
887 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_DRBD8], {
888 constants.LDP_RESYNC_RATE: dt_params[constants.DRBD_RESYNC_RATE],
889 constants.LDP_BARRIERS: dt_params[constants.DRBD_DISK_BARRIERS],
890 constants.LDP_NO_META_FLUSH: dt_params[constants.DRBD_META_BARRIERS],
891 constants.LDP_DEFAULT_METAVG: dt_params[constants.DRBD_DEFAULT_METAVG],
892 constants.LDP_DISK_CUSTOM: dt_params[constants.DRBD_DISK_CUSTOM],
893 constants.LDP_NET_CUSTOM: dt_params[constants.DRBD_NET_CUSTOM],
894 constants.LDP_PROTOCOL: dt_params[constants.DRBD_PROTOCOL],
895 constants.LDP_DYNAMIC_RESYNC: dt_params[constants.DRBD_DYNAMIC_RESYNC],
896 constants.LDP_PLAN_AHEAD: dt_params[constants.DRBD_PLAN_AHEAD],
897 constants.LDP_FILL_TARGET: dt_params[constants.DRBD_FILL_TARGET],
898 constants.LDP_DELAY_TARGET: dt_params[constants.DRBD_DELAY_TARGET],
899 constants.LDP_MAX_RATE: dt_params[constants.DRBD_MAX_RATE],
900 constants.LDP_MIN_RATE: dt_params[constants.DRBD_MIN_RATE],
901 }))
902
903
904 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
905 constants.LDP_STRIPES: dt_params[constants.DRBD_DATA_STRIPES],
906 }))
907
908
909 result.append(FillDict(constants.DISK_LD_DEFAULTS[constants.DT_PLAIN], {
910 constants.LDP_STRIPES: dt_params[constants.DRBD_META_STRIPES],
911 }))
912
913 else:
914 defaults = constants.DISK_LD_DEFAULTS[disk_template]
915 values = {}
916 for field in defaults:
917 values[field] = dt_params[field]
918 result.append(FillDict(defaults, values))
919
920 return result
921
924 """Config object representing instance policy limits dictionary.
925
926 Note that this object is not actually used in the config, it's just
927 used as a placeholder for a few functions.
928
929 """
930 @classmethod
938
939 @classmethod
960
961 @classmethod
968
969 @classmethod
1010
1011 @classmethod
1013 """Check the instance policy specs for validity on a given key.
1014
1015 We check if the instance specs makes sense for a given key, that is
1016 if minmaxspecs[min][name] <= stdspec[name] <= minmaxspec[max][name].
1017
1018 @type minmaxspecs: dict
1019 @param minmaxspecs: dictionary with min and max instance spec
1020 @type stdspec: dict
1021 @param stdspec: dictionary with standard instance spec
1022 @type name: string
1023 @param name: what are the limits for
1024 @type check_std: bool
1025 @param check_std: Whether to check std value or just assume compliance
1026 @rtype: bool
1027 @return: C{True} when specs are valid, C{False} when standard spec for the
1028 given name is not valid
1029 @raise errors.ConfigurationError: when min/max specs for the given name
1030 are not valid
1031
1032 """
1033 minspec = minmaxspecs[constants.ISPECS_MIN]
1034 maxspec = minmaxspecs[constants.ISPECS_MAX]
1035 min_v = minspec[name]
1036 max_v = maxspec[name]
1037
1038 if min_v > max_v:
1039 err = ("Invalid specification of min/max values for %s: %s/%s" %
1040 (name, min_v, max_v))
1041 raise errors.ConfigurationError(err)
1042 elif check_std:
1043 std_v = stdspec.get(name, min_v)
1044 return std_v >= min_v and std_v <= max_v
1045 else:
1046 return True
1047
1048 @classmethod
1050 """Checks the disk templates for validity.
1051
1052 """
1053 if not disk_templates:
1054 raise errors.ConfigurationError("Instance policy must contain" +
1055 " at least one disk template")
1056 wrong = frozenset(disk_templates).difference(constants.DISK_TEMPLATES)
1057 if wrong:
1058 raise errors.ConfigurationError("Invalid disk template(s) %s" %
1059 utils.CommaJoin(wrong))
1060
1061 @classmethod
1063 """Checks a parameter.
1064
1065 Currently we expect all parameters to be float values.
1066
1067 """
1068 try:
1069 float(value)
1070 except (TypeError, ValueError), err:
1071 raise errors.ConfigurationError("Invalid value for key" " '%s':"
1072 " '%s', error: %s" % (key, value, err))
1073
1076 """Config object representing an instance."""
1077 __slots__ = [
1078 "name",
1079 "primary_node",
1080 "os",
1081 "hypervisor",
1082 "hvparams",
1083 "beparams",
1084 "osparams",
1085 "admin_state",
1086 "admin_state_source",
1087 "nics",
1088 "disks",
1089 "disk_template",
1090 "disks_active",
1091 "network_port",
1092 "serial_no",
1093 ] + _TIMESTAMPS + _UUID
1094
1096 """Compute the list of secondary nodes.
1097
1098 This is a simple wrapper over _ComputeAllNodes.
1099
1100 """
1101 all_nodes = set(self._ComputeAllNodes())
1102 all_nodes.discard(self.primary_node)
1103 return tuple(all_nodes)
1104
1105 secondary_nodes = property(_ComputeSecondaryNodes, None, None,
1106 "List of names of secondary nodes")
1107
1109 """Compute the list of all nodes.
1110
1111 Since the data is already there (in the drbd disks), keeping it as
1112 a separate normal attribute is redundant and if not properly
1113 synchronised can cause problems. Thus it's better to compute it
1114 dynamically.
1115
1116 """
1117 def _Helper(nodes, device):
1118 """Recursively computes nodes given a top device."""
1119 if device.dev_type in constants.DTS_DRBD:
1120 nodea, nodeb = device.logical_id[:2]
1121 nodes.add(nodea)
1122 nodes.add(nodeb)
1123 if device.children:
1124 for child in device.children:
1125 _Helper(nodes, child)
1126
1127 all_nodes = set()
1128 for device in self.disks:
1129 _Helper(all_nodes, device)
1130
1131 all_nodes.discard(self.primary_node)
1132 return (self.primary_node, ) + tuple(all_nodes)
1133
1134 all_nodes = property(_ComputeAllNodes, None, None,
1135 "List of names of all the nodes of the instance")
1136
1137 - def MapLVsByNode(self, lvmap=None, devs=None, node_uuid=None):
1138 """Provide a mapping of nodes to LVs this instance owns.
1139
1140 This function figures out what logical volumes should belong on
1141 which nodes, recursing through a device tree.
1142
1143 @type lvmap: dict
1144 @param lvmap: optional dictionary to receive the
1145 'node' : ['lv', ...] data.
1146 @type devs: list of L{Disk}
1147 @param devs: disks to get the LV name for. If None, all disk of this
1148 instance are used.
1149 @type node_uuid: string
1150 @param node_uuid: UUID of the node to get the LV names for. If None, the
1151 primary node of this instance is used.
1152 @return: None if lvmap arg is given, otherwise, a dictionary of
1153 the form { 'node_uuid' : ['volume1', 'volume2', ...], ... };
1154 volumeN is of the form "vg_name/lv_name", compatible with
1155 GetVolumeList()
1156
1157 """
1158 if node_uuid is None:
1159 node_uuid = self.primary_node
1160
1161 if lvmap is None:
1162 lvmap = {
1163 node_uuid: [],
1164 }
1165 ret = lvmap
1166 else:
1167 if not node_uuid in lvmap:
1168 lvmap[node_uuid] = []
1169 ret = None
1170
1171 if not devs:
1172 devs = self.disks
1173
1174 for dev in devs:
1175 if dev.dev_type == constants.DT_PLAIN:
1176 lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1])
1177
1178 elif dev.dev_type in constants.DTS_DRBD:
1179 if dev.children:
1180 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[0])
1181 self.MapLVsByNode(lvmap, dev.children, dev.logical_id[1])
1182
1183 elif dev.children:
1184 self.MapLVsByNode(lvmap, dev.children, node_uuid)
1185
1186 return ret
1187
1189 """Find a disk given having a specified index.
1190
1191 This is just a wrapper that does validation of the index.
1192
1193 @type idx: int
1194 @param idx: the disk index
1195 @rtype: L{Disk}
1196 @return: the corresponding disk
1197 @raise errors.OpPrereqError: when the given index is not valid
1198
1199 """
1200 try:
1201 idx = int(idx)
1202 return self.disks[idx]
1203 except (TypeError, ValueError), err:
1204 raise errors.OpPrereqError("Invalid disk index: '%s'" % str(err),
1205 errors.ECODE_INVAL)
1206 except IndexError:
1207 raise errors.OpPrereqError("Invalid disk index: %d (instace has disks"
1208 " 0 to %d" % (idx, len(self.disks) - 1),
1209 errors.ECODE_INVAL)
1210
1212 """Instance-specific conversion to standard python types.
1213
1214 This replaces the children lists of objects with lists of standard
1215 python types.
1216
1217 """
1218 bo = super(Instance, self).ToDict()
1219
1220 for attr in "nics", "disks":
1221 alist = bo.get(attr, None)
1222 if alist:
1223 nlist = outils.ContainerToDicts(alist)
1224 else:
1225 nlist = []
1226 bo[attr] = nlist
1227 return bo
1228
1229 @classmethod
1245
1247 """Fill defaults for missing configuration values.
1248
1249 """
1250 if self.admin_state_source is None:
1251 self.admin_state_source = constants.ADMIN_SOURCE
1252 for nic in self.nics:
1253 nic.UpgradeConfig()
1254 for disk in self.disks:
1255 disk.UpgradeConfig()
1256 if self.hvparams:
1257 for key in constants.HVC_GLOBALS:
1258 try:
1259 del self.hvparams[key]
1260 except KeyError:
1261 pass
1262 if self.osparams is None:
1263 self.osparams = {}
1264 UpgradeBeParams(self.beparams)
1265 if self.disks_active is None:
1266 self.disks_active = self.admin_state == constants.ADMINST_UP
1267
1268
1269 -class OS(ConfigObject):
1270 """Config object representing an operating system.
1271
1272 @type supported_parameters: list
1273 @ivar supported_parameters: a list of tuples, name and description,
1274 containing the supported parameters by this OS
1275
1276 @type VARIANT_DELIM: string
1277 @cvar VARIANT_DELIM: the variant delimiter
1278
1279 """
1280 __slots__ = [
1281 "name",
1282 "path",
1283 "api_versions",
1284 "create_script",
1285 "export_script",
1286 "import_script",
1287 "rename_script",
1288 "verify_script",
1289 "supported_variants",
1290 "supported_parameters",
1291 ]
1292
1293 VARIANT_DELIM = "+"
1294
1295 @classmethod
1297 """Splits the name into the proper name and variant.
1298
1299 @param name: the OS (unprocessed) name
1300 @rtype: list
1301 @return: a list of two elements; if the original name didn't
1302 contain a variant, it's returned as an empty string
1303
1304 """
1305 nv = name.split(cls.VARIANT_DELIM, 1)
1306 if len(nv) == 1:
1307 nv.append("")
1308 return nv
1309
1310 @classmethod
1312 """Returns the proper name of the os (without the variant).
1313
1314 @param name: the OS (unprocessed) name
1315
1316 """
1317 return cls.SplitNameVariant(name)[0]
1318
1319 @classmethod
1321 """Returns the variant the os (without the base name).
1322
1323 @param name: the OS (unprocessed) name
1324
1325 """
1326 return cls.SplitNameVariant(name)[1]
1327
1330 """Config object representing an External Storage Provider.
1331
1332 """
1333 __slots__ = [
1334 "name",
1335 "path",
1336 "create_script",
1337 "remove_script",
1338 "grow_script",
1339 "attach_script",
1340 "detach_script",
1341 "setinfo_script",
1342 "verify_script",
1343 "supported_parameters",
1344 ]
1345
1348 """Hypvervisor state on a node.
1349
1350 @ivar mem_total: Total amount of memory
1351 @ivar mem_node: Memory used by, or reserved for, the node itself (not always
1352 available)
1353 @ivar mem_hv: Memory used by hypervisor or lost due to instance allocation
1354 rounding
1355 @ivar mem_inst: Memory used by instances living on node
1356 @ivar cpu_total: Total node CPU core count
1357 @ivar cpu_node: Number of CPU cores reserved for the node itself
1358
1359 """
1360 __slots__ = [
1361 "mem_total",
1362 "mem_node",
1363 "mem_hv",
1364 "mem_inst",
1365 "cpu_total",
1366 "cpu_node",
1367 ] + _TIMESTAMPS
1368
1371 """Disk state on a node.
1372
1373 """
1374 __slots__ = [
1375 "total",
1376 "reserved",
1377 "overhead",
1378 ] + _TIMESTAMPS
1379
1380
1381 -class Node(TaggableObject):
1382 """Config object representing a node.
1383
1384 @ivar hv_state: Hypervisor state (e.g. number of CPUs)
1385 @ivar hv_state_static: Hypervisor state overriden by user
1386 @ivar disk_state: Disk state (e.g. free space)
1387 @ivar disk_state_static: Disk state overriden by user
1388
1389 """
1390 __slots__ = [
1391 "name",
1392 "primary_ip",
1393 "secondary_ip",
1394 "serial_no",
1395 "master_candidate",
1396 "offline",
1397 "drained",
1398 "group",
1399 "master_capable",
1400 "vm_capable",
1401 "ndparams",
1402 "powered",
1403 "hv_state",
1404 "hv_state_static",
1405 "disk_state",
1406 "disk_state_static",
1407 ] + _TIMESTAMPS + _UUID
1408
1410 """Fill defaults for missing configuration values.
1411
1412 """
1413
1414
1415 if self.master_capable is None:
1416 self.master_capable = True
1417
1418 if self.vm_capable is None:
1419 self.vm_capable = True
1420
1421 if self.ndparams is None:
1422 self.ndparams = {}
1423
1424 for key in constants.NDC_GLOBALS:
1425 if key in self.ndparams:
1426 logging.warning("Ignoring %s node parameter for node %s",
1427 key, self.name)
1428 del self.ndparams[key]
1429
1430 if self.powered is None:
1431 self.powered = True
1432
1434 """Custom function for serializing.
1435
1436 """
1437 data = super(Node, self).ToDict()
1438
1439 hv_state = data.get("hv_state", None)
1440 if hv_state is not None:
1441 data["hv_state"] = outils.ContainerToDicts(hv_state)
1442
1443 disk_state = data.get("disk_state", None)
1444 if disk_state is not None:
1445 data["disk_state"] = \
1446 dict((key, outils.ContainerToDicts(value))
1447 for (key, value) in disk_state.items())
1448
1449 return data
1450
1451 @classmethod
1453 """Custom function for deserializing.
1454
1455 """
1456 obj = super(Node, cls).FromDict(val)
1457
1458 if obj.hv_state is not None:
1459 obj.hv_state = \
1460 outils.ContainerFromDicts(obj.hv_state, dict, NodeHvState)
1461
1462 if obj.disk_state is not None:
1463 obj.disk_state = \
1464 dict((key, outils.ContainerFromDicts(value, dict, NodeDiskState))
1465 for (key, value) in obj.disk_state.items())
1466
1467 return obj
1468
1471 """Config object representing a node group."""
1472 __slots__ = [
1473 "name",
1474 "members",
1475 "ndparams",
1476 "diskparams",
1477 "ipolicy",
1478 "serial_no",
1479 "hv_state_static",
1480 "disk_state_static",
1481 "alloc_policy",
1482 "networks",
1483 ] + _TIMESTAMPS + _UUID
1484
1486 """Custom function for nodegroup.
1487
1488 This discards the members object, which gets recalculated and is only kept
1489 in memory.
1490
1491 """
1492 mydict = super(NodeGroup, self).ToDict()
1493 del mydict["members"]
1494 return mydict
1495
1496 @classmethod
1498 """Custom function for nodegroup.
1499
1500 The members slot is initialized to an empty list, upon deserialization.
1501
1502 """
1503 obj = super(NodeGroup, cls).FromDict(val)
1504 obj.members = []
1505 return obj
1506
1508 """Fill defaults for missing configuration values.
1509
1510 """
1511 if self.ndparams is None:
1512 self.ndparams = {}
1513
1514 if self.serial_no is None:
1515 self.serial_no = 1
1516
1517 if self.alloc_policy is None:
1518 self.alloc_policy = constants.ALLOC_POLICY_PREFERRED
1519
1520
1521
1522 if self.mtime is None:
1523 self.mtime = time.time()
1524
1525 if self.diskparams is None:
1526 self.diskparams = {}
1527 if self.ipolicy is None:
1528 self.ipolicy = MakeEmptyIPolicy()
1529
1530 if self.networks is None:
1531 self.networks = {}
1532
1533 for network, netparams in self.networks.items():
1534 self.networks[network] = FillDict(constants.NICC_DEFAULTS, netparams)
1535
1537 """Return filled out ndparams for L{objects.Node}
1538
1539 @type node: L{objects.Node}
1540 @param node: A Node object to fill
1541 @return a copy of the node's ndparams with defaults filled
1542
1543 """
1544 return self.SimpleFillND(node.ndparams)
1545
1547 """Fill a given ndparams dict with defaults.
1548
1549 @type ndparams: dict
1550 @param ndparams: the dict to fill
1551 @rtype: dict
1552 @return: a copy of the passed in ndparams with missing keys filled
1553 from the node group defaults
1554
1555 """
1556 return FillDict(self.ndparams, ndparams)
1557
1558
1559 -class Cluster(TaggableObject):
1560 """Config object representing the cluster."""
1561 __slots__ = [
1562 "serial_no",
1563 "rsahostkeypub",
1564 "dsahostkeypub",
1565 "highest_used_port",
1566 "tcpudp_port_pool",
1567 "mac_prefix",
1568 "volume_group_name",
1569 "reserved_lvs",
1570 "drbd_usermode_helper",
1571 "default_bridge",
1572 "default_hypervisor",
1573 "master_node",
1574 "master_ip",
1575 "master_netdev",
1576 "master_netmask",
1577 "use_external_mip_script",
1578 "cluster_name",
1579 "file_storage_dir",
1580 "shared_file_storage_dir",
1581 "gluster_storage_dir",
1582 "enabled_hypervisors",
1583 "hvparams",
1584 "ipolicy",
1585 "os_hvp",
1586 "beparams",
1587 "osparams",
1588 "nicparams",
1589 "ndparams",
1590 "diskparams",
1591 "candidate_pool_size",
1592 "modify_etc_hosts",
1593 "modify_ssh_setup",
1594 "maintain_node_health",
1595 "uid_pool",
1596 "default_iallocator",
1597 "default_iallocator_params",
1598 "hidden_os",
1599 "blacklisted_os",
1600 "primary_ip_family",
1601 "prealloc_wipe_disks",
1602 "hv_state_static",
1603 "disk_state_static",
1604 "enabled_disk_templates",
1605 "candidate_certs",
1606 "max_running_jobs",
1607 "enabled_user_shutdown",
1608 ] + _TIMESTAMPS + _UUID
1609
1611 """Fill defaults for missing configuration values.
1612
1613 """
1614
1615
1616 if self.hvparams is None:
1617 self.hvparams = constants.HVC_DEFAULTS
1618 else:
1619 for hypervisor in constants.HYPER_TYPES:
1620 try:
1621 existing_params = self.hvparams[hypervisor]
1622 except KeyError:
1623 existing_params = {}
1624 self.hvparams[hypervisor] = FillDict(
1625 constants.HVC_DEFAULTS[hypervisor], existing_params)
1626
1627 if self.os_hvp is None:
1628 self.os_hvp = {}
1629
1630
1631 if self.osparams is None:
1632 self.osparams = {}
1633
1634 self.ndparams = UpgradeNDParams(self.ndparams)
1635
1636 self.beparams = UpgradeGroupedParams(self.beparams,
1637 constants.BEC_DEFAULTS)
1638 for beparams_group in self.beparams:
1639 UpgradeBeParams(self.beparams[beparams_group])
1640
1641 migrate_default_bridge = not self.nicparams
1642 self.nicparams = UpgradeGroupedParams(self.nicparams,
1643 constants.NICC_DEFAULTS)
1644 if migrate_default_bridge:
1645 self.nicparams[constants.PP_DEFAULT][constants.NIC_LINK] = \
1646 self.default_bridge
1647
1648 if self.modify_etc_hosts is None:
1649 self.modify_etc_hosts = True
1650
1651 if self.modify_ssh_setup is None:
1652 self.modify_ssh_setup = True
1653
1654
1655
1656
1657 if self.default_bridge is not None:
1658 self.default_bridge = None
1659
1660
1661
1662 if self.default_hypervisor is not None:
1663 self.enabled_hypervisors = ([self.default_hypervisor] +
1664 [hvname for hvname in self.enabled_hypervisors
1665 if hvname != self.default_hypervisor])
1666 self.default_hypervisor = None
1667
1668
1669 if self.maintain_node_health is None:
1670 self.maintain_node_health = False
1671
1672 if self.uid_pool is None:
1673 self.uid_pool = []
1674
1675 if self.default_iallocator is None:
1676 self.default_iallocator = ""
1677
1678 if self.default_iallocator_params is None:
1679 self.default_iallocator_params = {}
1680
1681
1682 if self.reserved_lvs is None:
1683 self.reserved_lvs = []
1684
1685
1686 if self.hidden_os is None:
1687 self.hidden_os = []
1688
1689 if self.blacklisted_os is None:
1690 self.blacklisted_os = []
1691
1692
1693 if self.primary_ip_family is None:
1694 self.primary_ip_family = AF_INET
1695
1696 if self.master_netmask is None:
1697 ipcls = netutils.IPAddress.GetClassFromIpFamily(self.primary_ip_family)
1698 self.master_netmask = ipcls.iplen
1699
1700 if self.prealloc_wipe_disks is None:
1701 self.prealloc_wipe_disks = False
1702
1703
1704 if self.shared_file_storage_dir is None:
1705 self.shared_file_storage_dir = ""
1706
1707
1708 if self.gluster_storage_dir is None:
1709 self.gluster_storage_dir = ""
1710
1711 if self.use_external_mip_script is None:
1712 self.use_external_mip_script = False
1713
1714 if self.diskparams:
1715 self.diskparams = UpgradeDiskParams(self.diskparams)
1716 else:
1717 self.diskparams = constants.DISK_DT_DEFAULTS.copy()
1718
1719
1720 if self.ipolicy is None:
1721 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, {})
1722 else:
1723
1724
1725
1726 wrongkeys = frozenset(self.ipolicy.keys()) - constants.IPOLICY_ALL_KEYS
1727 if wrongkeys:
1728
1729 msg = ("Cluster instance policy contains spurious keys: %s" %
1730 utils.CommaJoin(wrongkeys))
1731 raise errors.ConfigurationError(msg)
1732 self.ipolicy = FillIPolicy(constants.IPOLICY_DEFAULTS, self.ipolicy)
1733
1734
1735 if self.hv_state_static is None:
1736 self.hv_state_static = {}
1737 if self.disk_state_static is None:
1738 self.disk_state_static = {}
1739
1740 if self.candidate_certs is None:
1741 self.candidate_certs = {}
1742
1743 if self.max_running_jobs is None:
1744 self.max_running_jobs = constants.LUXID_MAXIMAL_RUNNING_JOBS_DEFAULT
1745
1746 if self.enabled_user_shutdown is None:
1747 self.enabled_user_shutdown = False
1748
1749 @property
1751 """The first hypervisor is the primary.
1752
1753 Useful, for example, for L{Node}'s hv/disk state.
1754
1755 """
1756 return self.enabled_hypervisors[0]
1757
1759 """Custom function for cluster.
1760
1761 """
1762 mydict = super(Cluster, self).ToDict()
1763
1764 if self.tcpudp_port_pool is None:
1765 tcpudp_port_pool = []
1766 else:
1767 tcpudp_port_pool = list(self.tcpudp_port_pool)
1768
1769 mydict["tcpudp_port_pool"] = tcpudp_port_pool
1770
1771 return mydict
1772
1773 @classmethod
1775 """Custom function for cluster.
1776
1777 """
1778 obj = super(Cluster, cls).FromDict(val)
1779
1780 if obj.tcpudp_port_pool is None:
1781 obj.tcpudp_port_pool = set()
1782 elif not isinstance(obj.tcpudp_port_pool, set):
1783 obj.tcpudp_port_pool = set(obj.tcpudp_port_pool)
1784
1785 return obj
1786
1788 """Fill a given diskparams dict with cluster defaults.
1789
1790 @param diskparams: The diskparams
1791 @return: The defaults dict
1792
1793 """
1794 return FillDiskParams(self.diskparams, diskparams)
1795
1796 - def GetHVDefaults(self, hypervisor, os_name=None, skip_keys=None):
1797 """Get the default hypervisor parameters for the cluster.
1798
1799 @param hypervisor: the hypervisor name
1800 @param os_name: if specified, we'll also update the defaults for this OS
1801 @param skip_keys: if passed, list of keys not to use
1802 @return: the defaults dict
1803
1804 """
1805 if skip_keys is None:
1806 skip_keys = []
1807
1808 fill_stack = [self.hvparams.get(hypervisor, {})]
1809 if os_name is not None:
1810 os_hvp = self.os_hvp.get(os_name, {}).get(hypervisor, {})
1811 fill_stack.append(os_hvp)
1812
1813 ret_dict = {}
1814 for o_dict in fill_stack:
1815 ret_dict = FillDict(ret_dict, o_dict, skip_keys=skip_keys)
1816
1817 return ret_dict
1818
1819 - def SimpleFillHV(self, hv_name, os_name, hvparams, skip_globals=False):
1820 """Fill a given hvparams dict with cluster defaults.
1821
1822 @type hv_name: string
1823 @param hv_name: the hypervisor to use
1824 @type os_name: string
1825 @param os_name: the OS to use for overriding the hypervisor defaults
1826 @type skip_globals: boolean
1827 @param skip_globals: if True, the global hypervisor parameters will
1828 not be filled
1829 @rtype: dict
1830 @return: a copy of the given hvparams with missing keys filled from
1831 the cluster defaults
1832
1833 """
1834 if skip_globals:
1835 skip_keys = constants.HVC_GLOBALS
1836 else:
1837 skip_keys = []
1838
1839 def_dict = self.GetHVDefaults(hv_name, os_name, skip_keys=skip_keys)
1840 return FillDict(def_dict, hvparams, skip_keys=skip_keys)
1841
1842 - def FillHV(self, instance, skip_globals=False):
1843 """Fill an instance's hvparams dict with cluster defaults.
1844
1845 @type instance: L{objects.Instance}
1846 @param instance: the instance parameter to fill
1847 @type skip_globals: boolean
1848 @param skip_globals: if True, the global hypervisor parameters will
1849 not be filled
1850 @rtype: dict
1851 @return: a copy of the instance's hvparams with missing keys filled from
1852 the cluster defaults
1853
1854 """
1855 return self.SimpleFillHV(instance.hypervisor, instance.os,
1856 instance.hvparams, skip_globals)
1857
1859 """Fill a given beparams dict with cluster defaults.
1860
1861 @type beparams: dict
1862 @param beparams: the dict to fill
1863 @rtype: dict
1864 @return: a copy of the passed in beparams with missing keys filled
1865 from the cluster defaults
1866
1867 """
1868 return FillDict(self.beparams.get(constants.PP_DEFAULT, {}), beparams)
1869
1871 """Fill an instance's beparams dict with cluster defaults.
1872
1873 @type instance: L{objects.Instance}
1874 @param instance: the instance parameter to fill
1875 @rtype: dict
1876 @return: a copy of the instance's beparams with missing keys filled from
1877 the cluster defaults
1878
1879 """
1880 return self.SimpleFillBE(instance.beparams)
1881
1883 """Fill a given nicparams dict with cluster defaults.
1884
1885 @type nicparams: dict
1886 @param nicparams: the dict to fill
1887 @rtype: dict
1888 @return: a copy of the passed in nicparams with missing keys filled
1889 from the cluster defaults
1890
1891 """
1892 return FillDict(self.nicparams.get(constants.PP_DEFAULT, {}), nicparams)
1893
1895 """Fill an instance's osparams dict with cluster defaults.
1896
1897 @type os_name: string
1898 @param os_name: the OS name to use
1899 @type os_params: dict
1900 @param os_params: the dict to fill with default values
1901 @rtype: dict
1902 @return: a copy of the instance's osparams with missing keys filled from
1903 the cluster defaults
1904
1905 """
1906 name_only = os_name.split("+", 1)[0]
1907
1908 result = self.osparams.get(name_only, {})
1909
1910 result = FillDict(result, self.osparams.get(os_name, {}))
1911
1912 return FillDict(result, os_params)
1913
1914 @staticmethod
1920
1921 @staticmethod
1927
1928 - def FillND(self, node, nodegroup):
1929 """Return filled out ndparams for L{objects.NodeGroup} and L{objects.Node}
1930
1931 @type node: L{objects.Node}
1932 @param node: A Node object to fill
1933 @type nodegroup: L{objects.NodeGroup}
1934 @param nodegroup: A Node object to fill
1935 @return a copy of the node's ndparams with defaults filled
1936
1937 """
1938 return self.SimpleFillND(nodegroup.FillND(node))
1939
1941 """Return filled out ndparams for just L{objects.NodeGroup}
1942
1943 @type nodegroup: L{objects.NodeGroup}
1944 @param nodegroup: A Node object to fill
1945 @return a copy of the node group's ndparams with defaults filled
1946
1947 """
1948 return self.SimpleFillND(nodegroup.SimpleFillND({}))
1949
1951 """Fill a given ndparams dict with defaults.
1952
1953 @type ndparams: dict
1954 @param ndparams: the dict to fill
1955 @rtype: dict
1956 @return: a copy of the passed in ndparams with missing keys filled
1957 from the cluster defaults
1958
1959 """
1960 return FillDict(self.ndparams, ndparams)
1961
1963 """ Fill instance policy dict with defaults.
1964
1965 @type ipolicy: dict
1966 @param ipolicy: the dict to fill
1967 @rtype: dict
1968 @return: a copy of passed ipolicy with missing keys filled from
1969 the cluster defaults
1970
1971 """
1972 return FillIPolicy(self.ipolicy, ipolicy)
1973
1975 """Checks if a particular disk template is enabled.
1976
1977 """
1978 return utils.storage.IsDiskTemplateEnabled(
1979 disk_template, self.enabled_disk_templates)
1980
1986
1993
1996 """Config object representing the status of a block device."""
1997 __slots__ = [
1998 "dev_path",
1999 "major",
2000 "minor",
2001 "sync_percent",
2002 "estimated_time",
2003 "is_degraded",
2004 "ldisk_status",
2005 ]
2006
2009 """Config object representing the status of an import or export."""
2010 __slots__ = [
2011 "recent_output",
2012 "listen_port",
2013 "connected",
2014 "progress_mbytes",
2015 "progress_throughput",
2016 "progress_eta",
2017 "progress_percent",
2018 "exit_status",
2019 "error_message",
2020 ] + _TIMESTAMPS
2021
2024 """Options for import/export daemon
2025
2026 @ivar key_name: X509 key name (None for cluster certificate)
2027 @ivar ca_pem: Remote peer CA in PEM format (None for cluster certificate)
2028 @ivar compress: Compression method (one of L{constants.IEC_ALL})
2029 @ivar magic: Used to ensure the connection goes to the right disk
2030 @ivar ipv6: Whether to use IPv6
2031 @ivar connect_timeout: Number of seconds for establishing connection
2032
2033 """
2034 __slots__ = [
2035 "key_name",
2036 "ca_pem",
2037 "compress",
2038 "magic",
2039 "ipv6",
2040 "connect_timeout",
2041 ]
2042
2045 """Object holding a confd request.
2046
2047 @ivar protocol: confd protocol version
2048 @ivar type: confd query type
2049 @ivar query: query request
2050 @ivar rsalt: requested reply salt
2051
2052 """
2053 __slots__ = [
2054 "protocol",
2055 "type",
2056 "query",
2057 "rsalt",
2058 ]
2059
2062 """Object holding a confd reply.
2063
2064 @ivar protocol: confd protocol version
2065 @ivar status: reply status code (ok, error)
2066 @ivar answer: confd query reply
2067 @ivar serial: configuration serial number
2068
2069 """
2070 __slots__ = [
2071 "protocol",
2072 "status",
2073 "answer",
2074 "serial",
2075 ]
2076
2079 """Object holding a query field definition.
2080
2081 @ivar name: Field name
2082 @ivar title: Human-readable title
2083 @ivar kind: Field type
2084 @ivar doc: Human-readable description
2085
2086 """
2087 __slots__ = [
2088 "name",
2089 "title",
2090 "kind",
2091 "doc",
2092 ]
2093
2096 __slots__ = [
2097 "fields",
2098 ]
2099
2107
2108 @classmethod
2117
2120 """Object holding the response to a query.
2121
2122 @ivar fields: List of L{QueryFieldDefinition} objects
2123 @ivar data: Requested data
2124
2125 """
2126 __slots__ = [
2127 "data",
2128 ]
2129
2132 """Object holding a request for querying available fields.
2133
2134 """
2135 __slots__ = [
2136 "what",
2137 "fields",
2138 ]
2139
2142 """Object holding the response to a query for fields.
2143
2144 @ivar fields: List of L{QueryFieldDefinition} objects
2145
2146 """
2147 __slots__ = []
2148
2151 """Object holding the status of a migration.
2152
2153 """
2154 __slots__ = [
2155 "status",
2156 "transferred_ram",
2157 "total_ram",
2158 ]
2159
2162 """Object describing how to access the console of an instance.
2163
2164 """
2165 __slots__ = [
2166 "instance",
2167 "kind",
2168 "message",
2169 "host",
2170 "port",
2171 "user",
2172 "command",
2173 "display",
2174 ]
2175
2177 """Validates contents of this object.
2178
2179 """
2180 assert self.kind in constants.CONS_ALL, "Unknown console type"
2181 assert self.instance, "Missing instance name"
2182 assert self.message or self.kind in [constants.CONS_SSH,
2183 constants.CONS_SPICE,
2184 constants.CONS_VNC]
2185 assert self.host or self.kind == constants.CONS_MESSAGE
2186 assert self.port or self.kind in [constants.CONS_MESSAGE,
2187 constants.CONS_SSH]
2188 assert self.user or self.kind in [constants.CONS_MESSAGE,
2189 constants.CONS_SPICE,
2190 constants.CONS_VNC]
2191 assert self.command or self.kind in [constants.CONS_MESSAGE,
2192 constants.CONS_SPICE,
2193 constants.CONS_VNC]
2194 assert self.display or self.kind in [constants.CONS_MESSAGE,
2195 constants.CONS_SPICE,
2196 constants.CONS_SSH]
2197
2198
2199 -class Network(TaggableObject):
2200 """Object representing a network definition for ganeti.
2201
2202 """
2203 __slots__ = [
2204 "name",
2205 "serial_no",
2206 "mac_prefix",
2207 "network",
2208 "network6",
2209 "gateway",
2210 "gateway6",
2211 "reservations",
2212 "ext_reservations",
2213 ] + _TIMESTAMPS + _UUID
2214
2216 """Export a dictionary used by hooks with a network's information.
2217
2218 @type prefix: String
2219 @param prefix: Prefix to prepend to the dict entries
2220
2221 """
2222 result = {
2223 "%sNETWORK_NAME" % prefix: self.name,
2224 "%sNETWORK_UUID" % prefix: self.uuid,
2225 "%sNETWORK_TAGS" % prefix: " ".join(self.GetTags()),
2226 }
2227 if self.network:
2228 result["%sNETWORK_SUBNET" % prefix] = self.network
2229 if self.gateway:
2230 result["%sNETWORK_GATEWAY" % prefix] = self.gateway
2231 if self.network6:
2232 result["%sNETWORK_SUBNET6" % prefix] = self.network6
2233 if self.gateway6:
2234 result["%sNETWORK_GATEWAY6" % prefix] = self.gateway6
2235 if self.mac_prefix:
2236 result["%sNETWORK_MAC_PREFIX" % prefix] = self.mac_prefix
2237
2238 return result
2239
2240 @classmethod
2242 """Custom function for networks.
2243
2244 Remove deprecated network_type and family.
2245
2246 """
2247 if "network_type" in val:
2248 del val["network_type"]
2249 if "family" in val:
2250 del val["family"]
2251 obj = super(Network, cls).FromDict(val)
2252 return obj
2253
2257 """Simple wrapper over ConfigParse that allows serialization.
2258
2259 This class is basically ConfigParser.SafeConfigParser with two
2260 additional methods that allow it to serialize/unserialize to/from a
2261 buffer.
2262
2263 """
2265 """Dump this instance and return the string representation."""
2266 buf = StringIO()
2267 self.write(buf)
2268 return buf.getvalue()
2269
2270 @classmethod
2272 """Load data from a string."""
2273 buf = StringIO(data)
2274 cfp = cls()
2275 cfp.readfp(buf)
2276 return cfp
2277
2278 - def get(self, section, option, **kwargs):
2279 value = None
2280 try:
2281 value = super(SerializableConfigParser, self).get(section, option,
2282 **kwargs)
2283 if value.lower() == constants.VALUE_NONE:
2284 value = None
2285 except ConfigParser.NoOptionError:
2286 r = re.compile(r"(disk|nic)\d+_name|nic\d+_(network|vlan)")
2287 match = r.match(option)
2288 if match:
2289 pass
2290 else:
2291 raise
2292
2293 return value
2294
2297 """Information about an LVM physical volume (PV).
2298
2299 @type name: string
2300 @ivar name: name of the PV
2301 @type vg_name: string
2302 @ivar vg_name: name of the volume group containing the PV
2303 @type size: float
2304 @ivar size: size of the PV in MiB
2305 @type free: float
2306 @ivar free: free space in the PV, in MiB
2307 @type attributes: string
2308 @ivar attributes: PV attributes
2309 @type lv_list: list of strings
2310 @ivar lv_list: names of the LVs hosted on the PV
2311 """
2312 __slots__ = [
2313 "name",
2314 "vg_name",
2315 "size",
2316 "free",
2317 "attributes",
2318 "lv_list"
2319 ]
2320
2322 """Is this PV empty?
2323
2324 """
2325 return self.size <= (self.free + 1)
2326
2328 """Is this PV allocatable?
2329
2330 """
2331 return ("a" in self.attributes)
2332