1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Configuration management for Ganeti
32
33 This module provides the interface to the Ganeti cluster configuration.
34
35 The configuration data is stored on every node but is updated on the master
36 only. After each update, the master distributes the data to the other nodes.
37
38 Currently, the data storage format is JSON. YAML was slow and consuming too
39 much memory.
40
41 """
42
43
44
45
46 import copy
47 import os
48 import random
49 import logging
50 import time
51 import itertools
52
53 from ganeti import errors
54 from ganeti import locking
55 from ganeti import utils
56 from ganeti import constants
57 from ganeti import rpc
58 from ganeti import objects
59 from ganeti import serializer
60 from ganeti import uidpool
61 from ganeti import netutils
62 from ganeti import runtime
63 from ganeti import pathutils
64 from ganeti import network
65
66
67 _config_lock = locking.SharedLock("ConfigWriter")
68
69
70 _UPGRADE_CONFIG_JID = "jid-cfg-upgrade"
85
88 """A temporary resource reservation manager.
89
90 This is used to reserve resources in a job, before using them, making sure
91 other jobs cannot get them in the meantime.
92
93 """
95 self._ec_reserved = {}
96
98 for holder_reserved in self._ec_reserved.values():
99 if resource in holder_reserved:
100 return True
101 return False
102
103 - def Reserve(self, ec_id, resource):
104 if self.Reserved(resource):
105 raise errors.ReservationError("Duplicate reservation for resource '%s'"
106 % str(resource))
107 if ec_id not in self._ec_reserved:
108 self._ec_reserved[ec_id] = set([resource])
109 else:
110 self._ec_reserved[ec_id].add(resource)
111
113 if ec_id in self._ec_reserved:
114 del self._ec_reserved[ec_id]
115
117 all_reserved = set()
118 for holder_reserved in self._ec_reserved.values():
119 all_reserved.update(holder_reserved)
120 return all_reserved
121
123 """ Used when you want to retrieve all reservations for a specific
124 execution context. E.g when commiting reserved IPs for a specific
125 network.
126
127 """
128 ec_reserved = set()
129 if ec_id in self._ec_reserved:
130 ec_reserved.update(self._ec_reserved[ec_id])
131 return ec_reserved
132
133 - def Generate(self, existing, generate_one_fn, ec_id):
134 """Generate a new resource of this type
135
136 """
137 assert callable(generate_one_fn)
138
139 all_elems = self.GetReserved()
140 all_elems.update(existing)
141 retries = 64
142 while retries > 0:
143 new_resource = generate_one_fn()
144 if new_resource is not None and new_resource not in all_elems:
145 break
146 else:
147 raise errors.ConfigurationError("Not able generate new resource"
148 " (last tried: %s)" % new_resource)
149 self.Reserve(ec_id, new_resource)
150 return new_resource
151
154 """Wrapper around L{utils.text.MatchNameComponent}.
155
156 """
157 return utils.MatchNameComponent(short_name, names, case_sensitive=False)
158
161 """Checks if instance's disks' C{iv_name} attributes are in order.
162
163 @type disks: list of L{objects.Disk}
164 @param disks: List of disks
165 @rtype: list of tuples; (int, string, string)
166 @return: List of wrongly named disks, each tuple contains disk index,
167 expected and actual name
168
169 """
170 result = []
171
172 for (idx, disk) in enumerate(disks):
173 exp_iv_name = "disk/%s" % idx
174 if disk.iv_name != exp_iv_name:
175 result.append((idx, exp_iv_name, disk.iv_name))
176
177 return result
178
181 """The interface to the cluster configuration.
182
183 @ivar _temporary_lvs: reservation manager for temporary LVs
184 @ivar _all_rms: a list of all temporary reservation managers
185
186 """
216
218 """Returns RPC runner for configuration.
219
220 """
221 return rpc.ConfigRunner(self._context, address_list)
222
223 - def SetContext(self, context):
224 """Sets Ganeti context.
225
226 """
227 self._context = context
228
229
230 @staticmethod
236
237 @locking.ssynchronized(_config_lock, shared=1)
239 """Get the node params populated with cluster defaults.
240
241 @type node: L{objects.Node}
242 @param node: The node we want to know the params for
243 @return: A dict with the filled in node params
244
245 """
246 nodegroup = self._UnlockedGetNodeGroup(node.group)
247 return self._config_data.cluster.FillND(node, nodegroup)
248
249 @locking.ssynchronized(_config_lock, shared=1)
261
262 @locking.ssynchronized(_config_lock, shared=1)
264 """Get the disk params populated with inherit chain.
265
266 @type group: L{objects.NodeGroup}
267 @param group: The group we want to know the params for
268 @return: A dict with the filled in disk params
269
270 """
271 return self._UnlockedGetGroupDiskParams(group)
272
274 """Get the disk params populated with inherit chain down to node-group.
275
276 @type group: L{objects.NodeGroup}
277 @param group: The group we want to know the params for
278 @return: A dict with the filled in disk params
279
280 """
281 return self._config_data.cluster.SimpleFillDP(group.diskparams)
282
284 """Return the network mac prefix if it exists or the cluster level default.
285
286 """
287 prefix = None
288 if net_uuid:
289 nobj = self._UnlockedGetNetwork(net_uuid)
290 if nobj.mac_prefix:
291 prefix = nobj.mac_prefix
292
293 return prefix
294
296 """Return a function that randomly generates a MAC suffic
297 and appends it to the given prefix. If prefix is not given get
298 the cluster level default.
299
300 """
301 if not prefix:
302 prefix = self._config_data.cluster.mac_prefix
303
304 def GenMac():
305 byte1 = random.randrange(0, 256)
306 byte2 = random.randrange(0, 256)
307 byte3 = random.randrange(0, 256)
308 mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
309 return mac
310
311 return GenMac
312
313 @locking.ssynchronized(_config_lock, shared=1)
315 """Generate a MAC for an instance.
316
317 This should check the current instances for duplicates.
318
319 """
320 existing = self._AllMACs()
321 prefix = self._UnlockedGetNetworkMACPrefix(net_uuid)
322 gen_mac = self._GenerateOneMAC(prefix)
323 return self._temporary_ids.Generate(existing, gen_mac, ec_id)
324
325 @locking.ssynchronized(_config_lock, shared=1)
327 """Reserve a MAC for an instance.
328
329 This only checks instances managed by this cluster, it does not
330 check for potential collisions elsewhere.
331
332 """
333 all_macs = self._AllMACs()
334 if mac in all_macs:
335 raise errors.ReservationError("mac already in use")
336 else:
337 self._temporary_macs.Reserve(ec_id, mac)
338
340 """Commit all reserved IP address to their respective pools
341
342 """
343 for action, address, net_uuid in self._temporary_ips.GetECReserved(ec_id):
344 self._UnlockedCommitIp(action, net_uuid, address)
345
358
360 """Give a specific IP address back to an IP pool.
361
362 The IP address is returned to the IP pool designated by pool_id and marked
363 as reserved.
364
365 """
366 self._temporary_ips.Reserve(ec_id,
367 (constants.RELEASE_ACTION, address, net_uuid))
368
369 @locking.ssynchronized(_config_lock, shared=1)
370 - def ReleaseIp(self, net_uuid, address, ec_id):
371 """Give a specified IP address back to an IP pool.
372
373 This is just a wrapper around _UnlockedReleaseIp.
374
375 """
376 if net_uuid:
377 self._UnlockedReleaseIp(net_uuid, address, ec_id)
378
379 @locking.ssynchronized(_config_lock, shared=1)
393
394 _, address, _ = self._temporary_ips.Generate([], gen_one, ec_id)
395 return address
396
416
417 @locking.ssynchronized(_config_lock, shared=1)
418 - def ReserveIp(self, net_uuid, address, ec_id, check=True):
419 """Reserve a given IPv4 address for use by an instance.
420
421 """
422 if net_uuid:
423 return self._UnlockedReserveIp(net_uuid, address, ec_id, check)
424
425 @locking.ssynchronized(_config_lock, shared=1)
427 """Reserve an VG/LV pair for an instance.
428
429 @type lv_name: string
430 @param lv_name: the logical volume name to reserve
431
432 """
433 all_lvs = self._AllLVs()
434 if lv_name in all_lvs:
435 raise errors.ReservationError("LV already in use")
436 else:
437 self._temporary_lvs.Reserve(ec_id, lv_name)
438
439 @locking.ssynchronized(_config_lock, shared=1)
449
451 """Compute the list of all LVs.
452
453 """
454 lvnames = set()
455 for instance in self._config_data.instances.values():
456 node_data = instance.MapLVsByNode()
457 for lv_list in node_data.values():
458 lvnames.update(lv_list)
459 return lvnames
460
462 """Compute the list of all Disks (recursively, including children).
463
464 """
465 def DiskAndAllChildren(disk):
466 """Returns a list containing the given disk and all of his children.
467
468 """
469 disks = [disk]
470 if disk.children:
471 for child_disk in disk.children:
472 disks.extend(DiskAndAllChildren(child_disk))
473 return disks
474
475 disks = []
476 for instance in self._config_data.instances.values():
477 for disk in instance.disks:
478 disks.extend(DiskAndAllChildren(disk))
479 return disks
480
482 """Compute the list of all NICs.
483
484 """
485 nics = []
486 for instance in self._config_data.instances.values():
487 nics.extend(instance.nics)
488 return nics
489
490 - def _AllIDs(self, include_temporary):
491 """Compute the list of all UUIDs and names we have.
492
493 @type include_temporary: boolean
494 @param include_temporary: whether to include the _temporary_ids set
495 @rtype: set
496 @return: a set of IDs
497
498 """
499 existing = set()
500 if include_temporary:
501 existing.update(self._temporary_ids.GetReserved())
502 existing.update(self._AllLVs())
503 existing.update(self._config_data.instances.keys())
504 existing.update(self._config_data.nodes.keys())
505 existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid])
506 return existing
507
509 """Generate an unique UUID.
510
511 This checks the current node, instances and disk names for
512 duplicates.
513
514 @rtype: string
515 @return: the unique id
516
517 """
518 existing = self._AllIDs(include_temporary=False)
519 return self._temporary_ids.Generate(existing, utils.NewUUID, ec_id)
520
521 @locking.ssynchronized(_config_lock, shared=1)
523 """Generate an unique ID.
524
525 This is just a wrapper over the unlocked version.
526
527 @type ec_id: string
528 @param ec_id: unique id for the job to reserve the id to
529
530 """
531 return self._GenerateUniqueID(ec_id)
532
534 """Return all MACs present in the config.
535
536 @rtype: list
537 @return: the list of all MACs
538
539 """
540 result = []
541 for instance in self._config_data.instances.values():
542 for nic in instance.nics:
543 result.append(nic.mac)
544
545 return result
546
548 """Return all DRBD secrets present in the config.
549
550 @rtype: list
551 @return: the list of all DRBD secrets
552
553 """
554 def helper(disk, result):
555 """Recursively gather secrets from this disk."""
556 if disk.dev_type == constants.DT_DRBD8:
557 result.append(disk.logical_id[5])
558 if disk.children:
559 for child in disk.children:
560 helper(child, result)
561
562 result = []
563 for instance in self._config_data.instances.values():
564 for disk in instance.disks:
565 helper(disk, result)
566
567 return result
568
570 """Compute duplicate disk IDs
571
572 @type disk: L{objects.Disk}
573 @param disk: the disk at which to start searching
574 @type l_ids: list
575 @param l_ids: list of current logical ids
576 @rtype: list
577 @return: a list of error messages
578
579 """
580 result = []
581 if disk.logical_id is not None:
582 if disk.logical_id in l_ids:
583 result.append("duplicate logical id %s" % str(disk.logical_id))
584 else:
585 l_ids.append(disk.logical_id)
586
587 if disk.children:
588 for child in disk.children:
589 result.extend(self._CheckDiskIDs(child, l_ids))
590 return result
591
593 """Verify function.
594
595 @rtype: list
596 @return: a list of error messages; a non-empty list signifies
597 configuration errors
598
599 """
600
601 result = []
602 seen_macs = []
603 ports = {}
604 data = self._config_data
605 cluster = data.cluster
606 seen_lids = []
607
608
609 if not cluster.enabled_hypervisors:
610 result.append("enabled hypervisors list doesn't have any entries")
611 invalid_hvs = set(cluster.enabled_hypervisors) - constants.HYPER_TYPES
612 if invalid_hvs:
613 result.append("enabled hypervisors contains invalid entries: %s" %
614 utils.CommaJoin(invalid_hvs))
615 missing_hvp = (set(cluster.enabled_hypervisors) -
616 set(cluster.hvparams.keys()))
617 if missing_hvp:
618 result.append("hypervisor parameters missing for the enabled"
619 " hypervisor(s) %s" % utils.CommaJoin(missing_hvp))
620
621 if not cluster.enabled_disk_templates:
622 result.append("enabled disk templates list doesn't have any entries")
623 invalid_disk_templates = set(cluster.enabled_disk_templates) \
624 - constants.DISK_TEMPLATES
625 if invalid_disk_templates:
626 result.append("enabled disk templates list contains invalid entries:"
627 " %s" % utils.CommaJoin(invalid_disk_templates))
628
629 if cluster.master_node not in data.nodes:
630 result.append("cluster has invalid primary node '%s'" %
631 cluster.master_node)
632
633 def _helper(owner, attr, value, template):
634 try:
635 utils.ForceDictType(value, template)
636 except errors.GenericError, err:
637 result.append("%s has invalid %s: %s" % (owner, attr, err))
638
639 def _helper_nic(owner, params):
640 try:
641 objects.NIC.CheckParameterSyntax(params)
642 except errors.ConfigurationError, err:
643 result.append("%s has invalid nicparams: %s" % (owner, err))
644
645 def _helper_ipolicy(owner, ipolicy, iscluster):
646 try:
647 objects.InstancePolicy.CheckParameterSyntax(ipolicy, iscluster)
648 except errors.ConfigurationError, err:
649 result.append("%s has invalid instance policy: %s" % (owner, err))
650 for key, value in ipolicy.items():
651 if key == constants.ISPECS_MINMAX:
652 for k in range(len(value)):
653 _helper_ispecs(owner, "ipolicy/%s[%s]" % (key, k), value[k])
654 elif key == constants.ISPECS_STD:
655 _helper(owner, "ipolicy/" + key, value,
656 constants.ISPECS_PARAMETER_TYPES)
657 else:
658
659 if key in constants.IPOLICY_PARAMETERS:
660 exp_type = float
661 else:
662 exp_type = list
663 if not isinstance(value, exp_type):
664 result.append("%s has invalid instance policy: for %s,"
665 " expecting %s, got %s" %
666 (owner, key, exp_type.__name__, type(value)))
667
668 def _helper_ispecs(owner, parentkey, params):
669 for (key, value) in params.items():
670 fullkey = "/".join([parentkey, key])
671 _helper(owner, fullkey, value, constants.ISPECS_PARAMETER_TYPES)
672
673
674 _helper("cluster", "beparams", cluster.SimpleFillBE({}),
675 constants.BES_PARAMETER_TYPES)
676 _helper("cluster", "nicparams", cluster.SimpleFillNIC({}),
677 constants.NICS_PARAMETER_TYPES)
678 _helper_nic("cluster", cluster.SimpleFillNIC({}))
679 _helper("cluster", "ndparams", cluster.SimpleFillND({}),
680 constants.NDS_PARAMETER_TYPES)
681 _helper_ipolicy("cluster", cluster.ipolicy, True)
682
683 if constants.DT_RBD in cluster.diskparams:
684 access = cluster.diskparams[constants.DT_RBD][constants.RBD_ACCESS]
685 if access not in constants.DISK_VALID_ACCESS_MODES:
686 result.append(
687 "Invalid value of '%s:%s': '%s' (expected one of %s)" % (
688 constants.DT_RBD, constants.RBD_ACCESS, access,
689 utils.CommaJoin(constants.DISK_VALID_ACCESS_MODES)
690 )
691 )
692
693
694 for instance_uuid in data.instances:
695 instance = data.instances[instance_uuid]
696 if instance.uuid != instance_uuid:
697 result.append("instance '%s' is indexed by wrong UUID '%s'" %
698 (instance.name, instance_uuid))
699 if instance.primary_node not in data.nodes:
700 result.append("instance '%s' has invalid primary node '%s'" %
701 (instance.name, instance.primary_node))
702 for snode in instance.secondary_nodes:
703 if snode not in data.nodes:
704 result.append("instance '%s' has invalid secondary node '%s'" %
705 (instance.name, snode))
706 for idx, nic in enumerate(instance.nics):
707 if nic.mac in seen_macs:
708 result.append("instance '%s' has NIC %d mac %s duplicate" %
709 (instance.name, idx, nic.mac))
710 else:
711 seen_macs.append(nic.mac)
712 if nic.nicparams:
713 filled = cluster.SimpleFillNIC(nic.nicparams)
714 owner = "instance %s nic %d" % (instance.name, idx)
715 _helper(owner, "nicparams",
716 filled, constants.NICS_PARAMETER_TYPES)
717 _helper_nic(owner, filled)
718
719
720 if not instance.disk_template in data.cluster.enabled_disk_templates:
721 result.append("instance '%s' uses the disabled disk template '%s'." %
722 (instance.name, instance.disk_template))
723
724
725 if instance.beparams:
726 _helper("instance %s" % instance.name, "beparams",
727 cluster.FillBE(instance), constants.BES_PARAMETER_TYPES)
728
729
730 for (idx, dsk) in enumerate(instance.disks):
731 if dsk.dev_type in constants.DTS_DRBD:
732 tcp_port = dsk.logical_id[2]
733 if tcp_port not in ports:
734 ports[tcp_port] = []
735 ports[tcp_port].append((instance.name, "drbd disk %s" % idx))
736
737 net_port = getattr(instance, "network_port", None)
738 if net_port is not None:
739 if net_port not in ports:
740 ports[net_port] = []
741 ports[net_port].append((instance.name, "network port"))
742
743
744 for idx, disk in enumerate(instance.disks):
745 result.extend(["instance '%s' disk %d error: %s" %
746 (instance.name, idx, msg) for msg in disk.Verify()])
747 result.extend(self._CheckDiskIDs(disk, seen_lids))
748
749 wrong_names = _CheckInstanceDiskIvNames(instance.disks)
750 if wrong_names:
751 tmp = "; ".join(("name of disk %s should be '%s', but is '%s'" %
752 (idx, exp_name, actual_name))
753 for (idx, exp_name, actual_name) in wrong_names)
754
755 result.append("Instance '%s' has wrongly named disks: %s" %
756 (instance.name, tmp))
757
758
759 for free_port in cluster.tcpudp_port_pool:
760 if free_port not in ports:
761 ports[free_port] = []
762 ports[free_port].append(("cluster", "port marked as free"))
763
764
765 keys = ports.keys()
766 keys.sort()
767 for pnum in keys:
768 pdata = ports[pnum]
769 if len(pdata) > 1:
770 txt = utils.CommaJoin(["%s/%s" % val for val in pdata])
771 result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))
772
773
774 if keys:
775 if keys[-1] > cluster.highest_used_port:
776 result.append("Highest used port mismatch, saved %s, computed %s" %
777 (cluster.highest_used_port, keys[-1]))
778
779 if not data.nodes[cluster.master_node].master_candidate:
780 result.append("Master node is not a master candidate")
781
782
783 mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats()
784 if mc_now < mc_max:
785 result.append("Not enough master candidates: actual %d, target %d" %
786 (mc_now, mc_max))
787
788
789 for node_uuid, node in data.nodes.items():
790 if node.uuid != node_uuid:
791 result.append("Node '%s' is indexed by wrong UUID '%s'" %
792 (node.name, node_uuid))
793 if [node.master_candidate, node.drained, node.offline].count(True) > 1:
794 result.append("Node %s state is invalid: master_candidate=%s,"
795 " drain=%s, offline=%s" %
796 (node.name, node.master_candidate, node.drained,
797 node.offline))
798 if node.group not in data.nodegroups:
799 result.append("Node '%s' has invalid group '%s'" %
800 (node.name, node.group))
801 else:
802 _helper("node %s" % node.name, "ndparams",
803 cluster.FillND(node, data.nodegroups[node.group]),
804 constants.NDS_PARAMETER_TYPES)
805 used_globals = constants.NDC_GLOBALS.intersection(node.ndparams)
806 if used_globals:
807 result.append("Node '%s' has some global parameters set: %s" %
808 (node.name, utils.CommaJoin(used_globals)))
809
810
811 nodegroups_names = set()
812 for nodegroup_uuid in data.nodegroups:
813 nodegroup = data.nodegroups[nodegroup_uuid]
814 if nodegroup.uuid != nodegroup_uuid:
815 result.append("node group '%s' (uuid: '%s') indexed by wrong uuid '%s'"
816 % (nodegroup.name, nodegroup.uuid, nodegroup_uuid))
817 if utils.UUID_RE.match(nodegroup.name.lower()):
818 result.append("node group '%s' (uuid: '%s') has uuid-like name" %
819 (nodegroup.name, nodegroup.uuid))
820 if nodegroup.name in nodegroups_names:
821 result.append("duplicate node group name '%s'" % nodegroup.name)
822 else:
823 nodegroups_names.add(nodegroup.name)
824 group_name = "group %s" % nodegroup.name
825 _helper_ipolicy(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy),
826 False)
827 if nodegroup.ndparams:
828 _helper(group_name, "ndparams",
829 cluster.SimpleFillND(nodegroup.ndparams),
830 constants.NDS_PARAMETER_TYPES)
831
832
833 _, duplicates = self._UnlockedComputeDRBDMap()
834 for node, minor, instance_a, instance_b in duplicates:
835 result.append("DRBD minor %d on node %s is assigned twice to instances"
836 " %s and %s" % (minor, node, instance_a, instance_b))
837
838
839 default_nicparams = cluster.nicparams[constants.PP_DEFAULT]
840 ips = {}
841
842 def _AddIpAddress(ip, name):
843 ips.setdefault(ip, []).append(name)
844
845 _AddIpAddress(cluster.master_ip, "cluster_ip")
846
847 for node in data.nodes.values():
848 _AddIpAddress(node.primary_ip, "node:%s/primary" % node.name)
849 if node.secondary_ip != node.primary_ip:
850 _AddIpAddress(node.secondary_ip, "node:%s/secondary" % node.name)
851
852 for instance in data.instances.values():
853 for idx, nic in enumerate(instance.nics):
854 if nic.ip is None:
855 continue
856
857 nicparams = objects.FillDict(default_nicparams, nic.nicparams)
858 nic_mode = nicparams[constants.NIC_MODE]
859 nic_link = nicparams[constants.NIC_LINK]
860
861 if nic_mode == constants.NIC_MODE_BRIDGED:
862 link = "bridge:%s" % nic_link
863 elif nic_mode == constants.NIC_MODE_ROUTED:
864 link = "route:%s" % nic_link
865 elif nic_mode == constants.NIC_MODE_OVS:
866 link = "ovs:%s" % nic_link
867 else:
868 raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode)
869
870 _AddIpAddress("%s/%s/%s" % (link, nic.ip, nic.network),
871 "instance:%s/nic:%d" % (instance.name, idx))
872
873 for ip, owners in ips.items():
874 if len(owners) > 1:
875 result.append("IP address %s is used by multiple owners: %s" %
876 (ip, utils.CommaJoin(owners)))
877
878 return result
879
880 @locking.ssynchronized(_config_lock, shared=1)
882 """Verify function.
883
884 This is just a wrapper over L{_UnlockedVerifyConfig}.
885
886 @rtype: list
887 @return: a list of error messages; a non-empty list signifies
888 configuration errors
889
890 """
891 return self._UnlockedVerifyConfig()
892
893 @locking.ssynchronized(_config_lock)
895 """Adds a new port to the available port pool.
896
897 @warning: this method does not "flush" the configuration (via
898 L{_WriteConfig}); callers should do that themselves once the
899 configuration is stable
900
901 """
902 if not isinstance(port, int):
903 raise errors.ProgrammerError("Invalid type passed for port")
904
905 self._config_data.cluster.tcpudp_port_pool.add(port)
906
907 @locking.ssynchronized(_config_lock, shared=1)
909 """Returns a copy of the current port list.
910
911 """
912 return self._config_data.cluster.tcpudp_port_pool.copy()
913
914 @locking.ssynchronized(_config_lock)
916 """Allocate a port.
917
918 The port will be taken from the available port pool or from the
919 default port range (and in this case we increase
920 highest_used_port).
921
922 """
923
924 if self._config_data.cluster.tcpudp_port_pool:
925 port = self._config_data.cluster.tcpudp_port_pool.pop()
926 else:
927 port = self._config_data.cluster.highest_used_port + 1
928 if port >= constants.LAST_DRBD_PORT:
929 raise errors.ConfigurationError("The highest used port is greater"
930 " than %s. Aborting." %
931 constants.LAST_DRBD_PORT)
932 self._config_data.cluster.highest_used_port = port
933
934 self._WriteConfig()
935 return port
936
938 """Compute the used DRBD minor/nodes.
939
940 @rtype: (dict, list)
941 @return: dictionary of node_uuid: dict of minor: instance_uuid;
942 the returned dict will have all the nodes in it (even if with
943 an empty list), and a list of duplicates; if the duplicates
944 list is not empty, the configuration is corrupted and its caller
945 should raise an exception
946
947 """
948 def _AppendUsedMinors(get_node_name_fn, instance, disk, used):
949 duplicates = []
950 if disk.dev_type == constants.DT_DRBD8 and len(disk.logical_id) >= 5:
951 node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5]
952 for node_uuid, minor in ((node_a, minor_a), (node_b, minor_b)):
953 assert node_uuid in used, \
954 ("Node '%s' of instance '%s' not found in node list" %
955 (get_node_name_fn(node_uuid), instance.name))
956 if minor in used[node_uuid]:
957 duplicates.append((node_uuid, minor, instance.uuid,
958 used[node_uuid][minor]))
959 else:
960 used[node_uuid][minor] = instance.uuid
961 if disk.children:
962 for child in disk.children:
963 duplicates.extend(_AppendUsedMinors(get_node_name_fn, instance, child,
964 used))
965 return duplicates
966
967 duplicates = []
968 my_dict = dict((node_uuid, {}) for node_uuid in self._config_data.nodes)
969 for instance in self._config_data.instances.itervalues():
970 for disk in instance.disks:
971 duplicates.extend(_AppendUsedMinors(self._UnlockedGetNodeName,
972 instance, disk, my_dict))
973 for (node_uuid, minor), inst_uuid in self._temporary_drbds.iteritems():
974 if minor in my_dict[node_uuid] and my_dict[node_uuid][minor] != inst_uuid:
975 duplicates.append((node_uuid, minor, inst_uuid,
976 my_dict[node_uuid][minor]))
977 else:
978 my_dict[node_uuid][minor] = inst_uuid
979 return my_dict, duplicates
980
981 @locking.ssynchronized(_config_lock)
983 """Compute the used DRBD minor/nodes.
984
985 This is just a wrapper over L{_UnlockedComputeDRBDMap}.
986
987 @return: dictionary of node_uuid: dict of minor: instance_uuid;
988 the returned dict will have all the nodes in it (even if with
989 an empty list).
990
991 """
992 d_map, duplicates = self._UnlockedComputeDRBDMap()
993 if duplicates:
994 raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
995 str(duplicates))
996 return d_map
997
998 @locking.ssynchronized(_config_lock)
1000 """Allocate a drbd minor.
1001
1002 The free minor will be automatically computed from the existing
1003 devices. A node can be given multiple times in order to allocate
1004 multiple minors. The result is the list of minors, in the same
1005 order as the passed nodes.
1006
1007 @type inst_uuid: string
1008 @param inst_uuid: the instance for which we allocate minors
1009
1010 """
1011 assert isinstance(inst_uuid, basestring), \
1012 "Invalid argument '%s' passed to AllocateDRBDMinor" % inst_uuid
1013
1014 d_map, duplicates = self._UnlockedComputeDRBDMap()
1015 if duplicates:
1016 raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
1017 str(duplicates))
1018 result = []
1019 for nuuid in node_uuids:
1020 ndata = d_map[nuuid]
1021 if not ndata:
1022
1023 result.append(0)
1024 ndata[0] = inst_uuid
1025 self._temporary_drbds[(nuuid, 0)] = inst_uuid
1026 continue
1027 keys = ndata.keys()
1028 keys.sort()
1029 ffree = utils.FirstFree(keys)
1030 if ffree is None:
1031
1032
1033 minor = keys[-1] + 1
1034 else:
1035 minor = ffree
1036
1037 assert minor not in d_map[nuuid], \
1038 ("Attempt to reuse allocated DRBD minor %d on node %s,"
1039 " already allocated to instance %s" %
1040 (minor, nuuid, d_map[nuuid][minor]))
1041 ndata[minor] = inst_uuid
1042
1043 r_key = (nuuid, minor)
1044 assert r_key not in self._temporary_drbds, \
1045 ("Attempt to reuse reserved DRBD minor %d on node %s,"
1046 " reserved for instance %s" %
1047 (minor, nuuid, self._temporary_drbds[r_key]))
1048 self._temporary_drbds[r_key] = inst_uuid
1049 result.append(minor)
1050 logging.debug("Request to allocate drbd minors, input: %s, returning %s",
1051 node_uuids, result)
1052 return result
1053
1055 """Release temporary drbd minors allocated for a given instance.
1056
1057 @type inst_uuid: string
1058 @param inst_uuid: the instance for which temporary minors should be
1059 released
1060
1061 """
1062 assert isinstance(inst_uuid, basestring), \
1063 "Invalid argument passed to ReleaseDRBDMinors"
1064 for key, uuid in self._temporary_drbds.items():
1065 if uuid == inst_uuid:
1066 del self._temporary_drbds[key]
1067
1068 @locking.ssynchronized(_config_lock)
1070 """Release temporary drbd minors allocated for a given instance.
1071
1072 This should be called on the error paths, on the success paths
1073 it's automatically called by the ConfigWriter add and update
1074 functions.
1075
1076 This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.
1077
1078 @type inst_uuid: string
1079 @param inst_uuid: the instance for which temporary minors should be
1080 released
1081
1082 """
1083 self._UnlockedReleaseDRBDMinors(inst_uuid)
1084
1085 @locking.ssynchronized(_config_lock, shared=1)
1087 """Get the configuration version.
1088
1089 @return: Config version
1090
1091 """
1092 return self._config_data.version
1093
1094 @locking.ssynchronized(_config_lock, shared=1)
1096 """Get cluster name.
1097
1098 @return: Cluster name
1099
1100 """
1101 return self._config_data.cluster.cluster_name
1102
1103 @locking.ssynchronized(_config_lock, shared=1)
1105 """Get the UUID of the master node for this cluster.
1106
1107 @return: Master node UUID
1108
1109 """
1110 return self._config_data.cluster.master_node
1111
1112 @locking.ssynchronized(_config_lock, shared=1)
1114 """Get the hostname of the master node for this cluster.
1115
1116 @return: Master node hostname
1117
1118 """
1119 return self._UnlockedGetNodeName(self._config_data.cluster.master_node)
1120
1121 @locking.ssynchronized(_config_lock, shared=1)
1123 """Get the master node information for this cluster.
1124
1125 @rtype: objects.Node
1126 @return: Master node L{objects.Node} object
1127
1128 """
1129 return self._UnlockedGetNodeInfo(self._config_data.cluster.master_node)
1130
1131 @locking.ssynchronized(_config_lock, shared=1)
1133 """Get the IP of the master node for this cluster.
1134
1135 @return: Master IP
1136
1137 """
1138 return self._config_data.cluster.master_ip
1139
1140 @locking.ssynchronized(_config_lock, shared=1)
1142 """Get the master network device for this cluster.
1143
1144 """
1145 return self._config_data.cluster.master_netdev
1146
1147 @locking.ssynchronized(_config_lock, shared=1)
1149 """Get the netmask of the master node for this cluster.
1150
1151 """
1152 return self._config_data.cluster.master_netmask
1153
1154 @locking.ssynchronized(_config_lock, shared=1)
1156 """Get flag representing whether to use the external master IP setup script.
1157
1158 """
1159 return self._config_data.cluster.use_external_mip_script
1160
1161 @locking.ssynchronized(_config_lock, shared=1)
1163 """Get the file storage dir for this cluster.
1164
1165 """
1166 return self._config_data.cluster.file_storage_dir
1167
1168 @locking.ssynchronized(_config_lock, shared=1)
1170 """Get the shared file storage dir for this cluster.
1171
1172 """
1173 return self._config_data.cluster.shared_file_storage_dir
1174
1175 @locking.ssynchronized(_config_lock, shared=1)
1177 """Get the hypervisor type for this cluster.
1178
1179 """
1180 return self._config_data.cluster.enabled_hypervisors[0]
1181
1182 @locking.ssynchronized(_config_lock, shared=1)
1184 """Return the rsa hostkey from the config.
1185
1186 @rtype: string
1187 @return: the rsa hostkey
1188
1189 """
1190 return self._config_data.cluster.rsahostkeypub
1191
1192 @locking.ssynchronized(_config_lock, shared=1)
1194 """Return the dsa hostkey from the config.
1195
1196 @rtype: string
1197 @return: the dsa hostkey
1198
1199 """
1200 return self._config_data.cluster.dsahostkeypub
1201
1202 @locking.ssynchronized(_config_lock, shared=1)
1204 """Get the default instance allocator for this cluster.
1205
1206 """
1207 return self._config_data.cluster.default_iallocator
1208
1209 @locking.ssynchronized(_config_lock, shared=1)
1211 """Get cluster primary ip family.
1212
1213 @return: primary ip family
1214
1215 """
1216 return self._config_data.cluster.primary_ip_family
1217
1218 @locking.ssynchronized(_config_lock, shared=1)
1220 """Get network parameters of the master node.
1221
1222 @rtype: L{object.MasterNetworkParameters}
1223 @return: network parameters of the master node
1224
1225 """
1226 cluster = self._config_data.cluster
1227 result = objects.MasterNetworkParameters(
1228 uuid=cluster.master_node, ip=cluster.master_ip,
1229 netmask=cluster.master_netmask, netdev=cluster.master_netdev,
1230 ip_family=cluster.primary_ip_family)
1231
1232 return result
1233
1234 @locking.ssynchronized(_config_lock)
1236 """Add a node group to the configuration.
1237
1238 This method calls group.UpgradeConfig() to fill any missing attributes
1239 according to their default values.
1240
1241 @type group: L{objects.NodeGroup}
1242 @param group: the NodeGroup object to add
1243 @type ec_id: string
1244 @param ec_id: unique id for the job to use when creating a missing UUID
1245 @type check_uuid: bool
1246 @param check_uuid: add an UUID to the group if it doesn't have one or, if
1247 it does, ensure that it does not exist in the
1248 configuration already
1249
1250 """
1251 self._UnlockedAddNodeGroup(group, ec_id, check_uuid)
1252 self._WriteConfig()
1253
1282
1283 @locking.ssynchronized(_config_lock)
1285 """Remove a node group from the configuration.
1286
1287 @type group_uuid: string
1288 @param group_uuid: the UUID of the node group to remove
1289
1290 """
1291 logging.info("Removing node group %s from configuration", group_uuid)
1292
1293 if group_uuid not in self._config_data.nodegroups:
1294 raise errors.ConfigurationError("Unknown node group '%s'" % group_uuid)
1295
1296 assert len(self._config_data.nodegroups) != 1, \
1297 "Group '%s' is the only group, cannot be removed" % group_uuid
1298
1299 del self._config_data.nodegroups[group_uuid]
1300 self._config_data.cluster.serial_no += 1
1301 self._WriteConfig()
1302
1304 """Lookup a node group's UUID.
1305
1306 @type target: string or None
1307 @param target: group name or UUID or None to look for the default
1308 @rtype: string
1309 @return: nodegroup UUID
1310 @raises errors.OpPrereqError: when the target group cannot be found
1311
1312 """
1313 if target is None:
1314 if len(self._config_data.nodegroups) != 1:
1315 raise errors.OpPrereqError("More than one node group exists. Target"
1316 " group must be specified explicitly.")
1317 else:
1318 return self._config_data.nodegroups.keys()[0]
1319 if target in self._config_data.nodegroups:
1320 return target
1321 for nodegroup in self._config_data.nodegroups.values():
1322 if nodegroup.name == target:
1323 return nodegroup.uuid
1324 raise errors.OpPrereqError("Node group '%s' not found" % target,
1325 errors.ECODE_NOENT)
1326
1327 @locking.ssynchronized(_config_lock, shared=1)
1329 """Lookup a node group's UUID.
1330
1331 This function is just a wrapper over L{_UnlockedLookupNodeGroup}.
1332
1333 @type target: string or None
1334 @param target: group name or UUID or None to look for the default
1335 @rtype: string
1336 @return: nodegroup UUID
1337
1338 """
1339 return self._UnlockedLookupNodeGroup(target)
1340
1342 """Lookup a node group.
1343
1344 @type uuid: string
1345 @param uuid: group UUID
1346 @rtype: L{objects.NodeGroup} or None
1347 @return: nodegroup object, or None if not found
1348
1349 """
1350 if uuid not in self._config_data.nodegroups:
1351 return None
1352
1353 return self._config_data.nodegroups[uuid]
1354
1355 @locking.ssynchronized(_config_lock, shared=1)
1357 """Lookup a node group.
1358
1359 @type uuid: string
1360 @param uuid: group UUID
1361 @rtype: L{objects.NodeGroup} or None
1362 @return: nodegroup object, or None if not found
1363
1364 """
1365 return self._UnlockedGetNodeGroup(uuid)
1366
1367 @locking.ssynchronized(_config_lock, shared=1)
1369 """Get the configuration of all node groups.
1370
1371 """
1372 return dict(self._config_data.nodegroups)
1373
1374 @locking.ssynchronized(_config_lock, shared=1)
1376 """Get a list of node groups.
1377
1378 """
1379 return self._config_data.nodegroups.keys()
1380
1381 @locking.ssynchronized(_config_lock, shared=1)
1383 """Get nodes which are member in the same nodegroups as the given nodes.
1384
1385 """
1386 ngfn = lambda node_uuid: self._UnlockedGetNodeInfo(node_uuid).group
1387 return frozenset(member_uuid
1388 for node_uuid in nodes
1389 for member_uuid in
1390 self._UnlockedGetNodeGroup(ngfn(node_uuid)).members)
1391
1392 @locking.ssynchronized(_config_lock, shared=1)
1394 """Get the configuration of multiple node groups.
1395
1396 @param group_uuids: List of node group UUIDs
1397 @rtype: list
1398 @return: List of tuples of (group_uuid, group_info)
1399
1400 """
1401 return [(uuid, self._UnlockedGetNodeGroup(uuid)) for uuid in group_uuids]
1402
1403 @locking.ssynchronized(_config_lock)
1436
1438 """Ensures a given object has a valid UUID.
1439
1440 @param item: the instance or node to be checked
1441 @param ec_id: the execution context id for the uuid reservation
1442
1443 """
1444 if not item.uuid:
1445 item.uuid = self._GenerateUniqueID(ec_id)
1446 else:
1447 self._CheckUniqueUUID(item, include_temporary=True)
1448
1450 """Checks that the UUID of the given object is unique.
1451
1452 @param item: the instance or node to be checked
1453 @param include_temporary: whether temporarily generated UUID's should be
1454 included in the check. If the UUID of the item to be checked is
1455 a temporarily generated one, this has to be C{False}.
1456
1457 """
1458 if not item.uuid:
1459 raise errors.ConfigurationError("'%s' must have an UUID" % (item.name,))
1460 if item.uuid in self._AllIDs(include_temporary=include_temporary):
1461 raise errors.ConfigurationError("Cannot add '%s': UUID %s already"
1462 " in use" % (item.name, item.uuid))
1463
1465 """Set the instance's status to a given value.
1466
1467 """
1468 if inst_uuid not in self._config_data.instances:
1469 raise errors.ConfigurationError("Unknown instance '%s'" %
1470 inst_uuid)
1471 instance = self._config_data.instances[inst_uuid]
1472
1473 if status is None:
1474 status = instance.admin_state
1475 if disks_active is None:
1476 disks_active = instance.disks_active
1477
1478 assert status in constants.ADMINST_ALL, \
1479 "Invalid status '%s' passed to SetInstanceStatus" % (status,)
1480
1481 if instance.admin_state != status or \
1482 instance.disks_active != disks_active:
1483 instance.admin_state = status
1484 instance.disks_active = disks_active
1485 instance.serial_no += 1
1486 instance.mtime = time.time()
1487 self._WriteConfig()
1488
1489 @locking.ssynchronized(_config_lock)
1491 """Mark the instance status to up in the config.
1492
1493 This also sets the instance disks active flag.
1494
1495 """
1496 self._SetInstanceStatus(inst_uuid, constants.ADMINST_UP, True)
1497
1498 @locking.ssynchronized(_config_lock)
1506
1507 @locking.ssynchronized(_config_lock)
1509 """Remove the instance from the configuration.
1510
1511 """
1512 if inst_uuid not in self._config_data.instances:
1513 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid)
1514
1515
1516
1517 inst = self._config_data.instances[inst_uuid]
1518 network_port = getattr(inst, "network_port", None)
1519 if network_port is not None:
1520 self._config_data.cluster.tcpudp_port_pool.add(network_port)
1521
1522 instance = self._UnlockedGetInstanceInfo(inst_uuid)
1523
1524 for nic in instance.nics:
1525 if nic.network and nic.ip:
1526
1527 self._UnlockedCommitIp(constants.RELEASE_ACTION, nic.network, nic.ip)
1528
1529 del self._config_data.instances[inst_uuid]
1530 self._config_data.cluster.serial_no += 1
1531 self._WriteConfig()
1532
1533 @locking.ssynchronized(_config_lock)
1535 """Rename an instance.
1536
1537 This needs to be done in ConfigWriter and not by RemoveInstance
1538 combined with AddInstance as only we can guarantee an atomic
1539 rename.
1540
1541 """
1542 if inst_uuid not in self._config_data.instances:
1543 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid)
1544
1545 inst = self._config_data.instances[inst_uuid]
1546 inst.name = new_name
1547
1548 for (_, disk) in enumerate(inst.disks):
1549 if disk.dev_type in [constants.DT_FILE, constants.DT_SHARED_FILE]:
1550
1551 file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
1552 disk.logical_id = (disk.logical_id[0],
1553 utils.PathJoin(file_storage_dir, inst.name,
1554 os.path.basename(disk.logical_id[1])))
1555
1556
1557 self._config_data.cluster.serial_no += 1
1558
1559 self._WriteConfig()
1560
1561 @locking.ssynchronized(_config_lock)
1563 """Mark the status of an instance to down in the configuration.
1564
1565 This does not touch the instance disks active flag, as shut down instances
1566 can still have active disks.
1567
1568 """
1569 self._SetInstanceStatus(inst_uuid, constants.ADMINST_DOWN, None)
1570
1571 @locking.ssynchronized(_config_lock)
1573 """Mark the status of instance disks active.
1574
1575 """
1576 self._SetInstanceStatus(inst_uuid, None, True)
1577
1578 @locking.ssynchronized(_config_lock)
1580 """Mark the status of instance disks inactive.
1581
1582 """
1583 self._SetInstanceStatus(inst_uuid, None, False)
1584
1586 """Get the list of instances.
1587
1588 This function is for internal use, when the config lock is already held.
1589
1590 """
1591 return self._config_data.instances.keys()
1592
1593 @locking.ssynchronized(_config_lock, shared=1)
1595 """Get the list of instances.
1596
1597 @return: array of instances, ex. ['instance2-uuid', 'instance1-uuid']
1598
1599 """
1600 return self._UnlockedGetInstanceList()
1601
1603 """Attempt to expand an incomplete instance name.
1604
1605 """
1606
1607 all_insts = self.GetAllInstancesInfo().values()
1608 expanded_name = _MatchNameComponentIgnoreCase(
1609 short_name, [inst.name for inst in all_insts])
1610
1611 if expanded_name is not None:
1612
1613 inst = (filter(lambda n: n.name == expanded_name, all_insts)[0])
1614 return (inst.uuid, inst.name)
1615 else:
1616 return (None, None)
1617
1619 """Returns information about an instance.
1620
1621 This function is for internal use, when the config lock is already held.
1622
1623 """
1624 if inst_uuid not in self._config_data.instances:
1625 return None
1626
1627 return self._config_data.instances[inst_uuid]
1628
1629 @locking.ssynchronized(_config_lock, shared=1)
1631 """Returns information about an instance.
1632
1633 It takes the information from the configuration file. Other information of
1634 an instance are taken from the live systems.
1635
1636 @param inst_uuid: UUID of the instance
1637
1638 @rtype: L{objects.Instance}
1639 @return: the instance object
1640
1641 """
1642 return self._UnlockedGetInstanceInfo(inst_uuid)
1643
1644 @locking.ssynchronized(_config_lock, shared=1)
1662
1663 @locking.ssynchronized(_config_lock, shared=1)
1680
1681 @locking.ssynchronized(_config_lock, shared=1)
1683 """Get the configuration of multiple instances.
1684
1685 @param inst_uuids: list of instance UUIDs
1686 @rtype: list
1687 @return: list of tuples (instance UUID, instance_info), where
1688 instance_info is what would GetInstanceInfo return for the
1689 node, while keeping the original order
1690
1691 """
1692 return [(uuid, self._UnlockedGetInstanceInfo(uuid)) for uuid in inst_uuids]
1693
1694 @locking.ssynchronized(_config_lock, shared=1)
1696 """Get the configuration of multiple instances.
1697
1698 @param inst_names: list of instance names
1699 @rtype: list
1700 @return: list of tuples (instance, instance_info), where
1701 instance_info is what would GetInstanceInfo return for the
1702 node, while keeping the original order
1703
1704 """
1705 result = []
1706 for name in inst_names:
1707 instance = self._UnlockedGetInstanceInfoByName(name)
1708 result.append((instance.uuid, instance))
1709 return result
1710
1711 @locking.ssynchronized(_config_lock, shared=1)
1713 """Get the configuration of all instances.
1714
1715 @rtype: dict
1716 @return: dict of (instance, instance_info), where instance_info is what
1717 would GetInstanceInfo return for the node
1718
1719 """
1720 return self._UnlockedGetAllInstancesInfo()
1721
1726
1727 @locking.ssynchronized(_config_lock, shared=1)
1729 """Get instance configuration with a filter.
1730
1731 @type filter_fn: callable
1732 @param filter_fn: Filter function receiving instance object as parameter,
1733 returning boolean. Important: this function is called while the
1734 configuration locks is held. It must not do any complex work or call
1735 functions potentially leading to a deadlock. Ideally it doesn't call any
1736 other functions and just compares instance attributes.
1737
1738 """
1739 return dict((uuid, inst)
1740 for (uuid, inst) in self._config_data.instances.items()
1741 if filter_fn(inst))
1742
1743 @locking.ssynchronized(_config_lock, shared=1)
1745 """Get the L{objects.Instance} object for a named instance.
1746
1747 @param inst_name: name of the instance to get information for
1748 @type inst_name: string
1749 @return: the corresponding L{objects.Instance} instance or None if no
1750 information is available
1751
1752 """
1753 return self._UnlockedGetInstanceInfoByName(inst_name)
1754
1760
1766
1767 @locking.ssynchronized(_config_lock, shared=1)
1769 """Gets the instance name for the passed instance.
1770
1771 @param inst_uuid: instance UUID to get name for
1772 @type inst_uuid: string
1773 @rtype: string
1774 @return: instance name
1775
1776 """
1777 return self._UnlockedGetInstanceName(inst_uuid)
1778
1779 @locking.ssynchronized(_config_lock, shared=1)
1781 """Gets the instance names for the passed list of nodes.
1782
1783 @param inst_uuids: list of instance UUIDs to get names for
1784 @type inst_uuids: list of strings
1785 @rtype: list of strings
1786 @return: list of instance names
1787
1788 """
1789 return self._UnlockedGetInstanceNames(inst_uuids)
1790
1793
1794 @locking.ssynchronized(_config_lock)
1812
1813 @locking.ssynchronized(_config_lock)
1815 """Remove a node from the configuration.
1816
1817 """
1818 logging.info("Removing node %s from configuration", node_uuid)
1819
1820 if node_uuid not in self._config_data.nodes:
1821 raise errors.ConfigurationError("Unknown node '%s'" % node_uuid)
1822
1823 self._UnlockedRemoveNodeFromGroup(self._config_data.nodes[node_uuid])
1824 del self._config_data.nodes[node_uuid]
1825 self._config_data.cluster.serial_no += 1
1826 self._WriteConfig()
1827
1843
1845 """Get the configuration of a node, as stored in the config.
1846
1847 This function is for internal use, when the config lock is already
1848 held.
1849
1850 @param node_uuid: the node UUID
1851
1852 @rtype: L{objects.Node}
1853 @return: the node object
1854
1855 """
1856 if node_uuid not in self._config_data.nodes:
1857 return None
1858
1859 return self._config_data.nodes[node_uuid]
1860
1861 @locking.ssynchronized(_config_lock, shared=1)
1863 """Get the configuration of a node, as stored in the config.
1864
1865 This is just a locked wrapper over L{_UnlockedGetNodeInfo}.
1866
1867 @param node_uuid: the node UUID
1868
1869 @rtype: L{objects.Node}
1870 @return: the node object
1871
1872 """
1873 return self._UnlockedGetNodeInfo(node_uuid)
1874
1875 @locking.ssynchronized(_config_lock, shared=1)
1877 """Get the instances of a node, as stored in the config.
1878
1879 @param node_uuid: the node UUID
1880
1881 @rtype: (list, list)
1882 @return: a tuple with two lists: the primary and the secondary instances
1883
1884 """
1885 pri = []
1886 sec = []
1887 for inst in self._config_data.instances.values():
1888 if inst.primary_node == node_uuid:
1889 pri.append(inst.uuid)
1890 if node_uuid in inst.secondary_nodes:
1891 sec.append(inst.uuid)
1892 return (pri, sec)
1893
1894 @locking.ssynchronized(_config_lock, shared=1)
1896 """Get the instances of a node group.
1897
1898 @param uuid: Node group UUID
1899 @param primary_only: Whether to only consider primary nodes
1900 @rtype: frozenset
1901 @return: List of instance UUIDs in node group
1902
1903 """
1904 if primary_only:
1905 nodes_fn = lambda inst: [inst.primary_node]
1906 else:
1907 nodes_fn = lambda inst: inst.all_nodes
1908
1909 return frozenset(inst.uuid
1910 for inst in self._config_data.instances.values()
1911 for node_uuid in nodes_fn(inst)
1912 if self._UnlockedGetNodeInfo(node_uuid).group == uuid)
1913
1915 """Return the string representation of the list of hyervisor parameters of
1916 the given hypervisor.
1917
1918 @see: C{GetHvparams}
1919
1920 """
1921 result = ""
1922 hvparams = self._config_data.cluster.hvparams[hvname]
1923 for key in hvparams:
1924 result += "%s=%s\n" % (key, hvparams[key])
1925 return result
1926
1927 @locking.ssynchronized(_config_lock, shared=1)
1929 """Return the hypervisor parameters of the given hypervisor.
1930
1931 @type hvname: string
1932 @param hvname: name of a hypervisor
1933 @rtype: string
1934 @return: string containing key-value-pairs, one pair on each line;
1935 format: KEY=VALUE
1936
1937 """
1938 return self._UnlockedGetHvparamsString(hvname)
1939
1941 """Return the list of nodes which are in the configuration.
1942
1943 This function is for internal use, when the config lock is already
1944 held.
1945
1946 @rtype: list
1947
1948 """
1949 return self._config_data.nodes.keys()
1950
1951 @locking.ssynchronized(_config_lock, shared=1)
1953 """Return the list of nodes which are in the configuration.
1954
1955 """
1956 return self._UnlockedGetNodeList()
1957
1965
1966 @locking.ssynchronized(_config_lock, shared=1)
1972
1973 @locking.ssynchronized(_config_lock, shared=1)
1981
1982 @locking.ssynchronized(_config_lock, shared=1)
1990
1991 @locking.ssynchronized(_config_lock, shared=1)
1999
2000 @locking.ssynchronized(_config_lock, shared=1)
2002 """Get the configuration of multiple nodes.
2003
2004 @param node_uuids: list of node UUIDs
2005 @rtype: list
2006 @return: list of tuples of (node, node_info), where node_info is
2007 what would GetNodeInfo return for the node, in the original
2008 order
2009
2010 """
2011 return [(uuid, self._UnlockedGetNodeInfo(uuid)) for uuid in node_uuids]
2012
2014 """Gets configuration of all nodes.
2015
2016 @note: See L{GetAllNodesInfo}
2017
2018 """
2019 return dict([(node_uuid, self._UnlockedGetNodeInfo(node_uuid))
2020 for node_uuid in self._UnlockedGetNodeList()])
2021
2022 @locking.ssynchronized(_config_lock, shared=1)
2024 """Get the configuration of all nodes.
2025
2026 @rtype: dict
2027 @return: dict of (node, node_info), where node_info is what
2028 would GetNodeInfo return for the node
2029
2030 """
2031 return self._UnlockedGetAllNodesInfo()
2032
2038
2039 @locking.ssynchronized(_config_lock, shared=1)
2041 """Get the L{objects.Node} object for a named node.
2042
2043 @param node_name: name of the node to get information for
2044 @type node_name: string
2045 @return: the corresponding L{objects.Node} instance or None if no
2046 information is available
2047
2048 """
2049 return self._UnlockedGetNodeInfoByName(node_name)
2050
2061
2062 @locking.ssynchronized(_config_lock, shared=1)
2064 """Gets the node name for the passed node.
2065
2066 @param node_spec: node to get names for
2067 @type node_spec: either node UUID or a L{objects.Node} object
2068 @rtype: string
2069 @return: node name
2070
2071 """
2072 return self._UnlockedGetNodeName(node_spec)
2073
2076
2077 @locking.ssynchronized(_config_lock, shared=1)
2079 """Gets the node names for the passed list of nodes.
2080
2081 @param node_specs: list of nodes to get names for
2082 @type node_specs: list of either node UUIDs or L{objects.Node} objects
2083 @rtype: list of strings
2084 @return: list of node names
2085
2086 """
2087 return self._UnlockedGetNodeNames(node_specs)
2088
2089 @locking.ssynchronized(_config_lock, shared=1)
2091 """Returns groups for a list of nodes.
2092
2093 @type node_uuids: list of string
2094 @param node_uuids: List of node UUIDs
2095 @rtype: frozenset
2096
2097 """
2098 return frozenset(self._UnlockedGetNodeInfo(uuid).group
2099 for uuid in node_uuids)
2100
2102 """Get the number of current and maximum desired and possible candidates.
2103
2104 @type exceptions: list
2105 @param exceptions: if passed, list of nodes that should be ignored
2106 @rtype: tuple
2107 @return: tuple of (current, desired and possible, possible)
2108
2109 """
2110 mc_now = mc_should = mc_max = 0
2111 for node in self._config_data.nodes.values():
2112 if exceptions and node.uuid in exceptions:
2113 continue
2114 if not (node.offline or node.drained) and node.master_capable:
2115 mc_max += 1
2116 if node.master_candidate:
2117 mc_now += 1
2118 mc_should = min(mc_max, self._config_data.cluster.candidate_pool_size)
2119 return (mc_now, mc_should, mc_max)
2120
2121 @locking.ssynchronized(_config_lock, shared=1)
2123 """Get the number of current and maximum possible candidates.
2124
2125 This is just a wrapper over L{_UnlockedGetMasterCandidateStats}.
2126
2127 @type exceptions: list
2128 @param exceptions: if passed, list of nodes that should be ignored
2129 @rtype: tuple
2130 @return: tuple of (current, max)
2131
2132 """
2133 return self._UnlockedGetMasterCandidateStats(exceptions)
2134
2135 @locking.ssynchronized(_config_lock)
2136 - def MaintainCandidatePool(self, exception_node_uuids):
2137 """Try to grow the candidate pool to the desired size.
2138
2139 @type exception_node_uuids: list
2140 @param exception_node_uuids: if passed, list of nodes that should be ignored
2141 @rtype: list
2142 @return: list with the adjusted nodes (L{objects.Node} instances)
2143
2144 """
2145 mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats(
2146 exception_node_uuids)
2147 mod_list = []
2148 if mc_now < mc_max:
2149 node_list = self._config_data.nodes.keys()
2150 random.shuffle(node_list)
2151 for uuid in node_list:
2152 if mc_now >= mc_max:
2153 break
2154 node = self._config_data.nodes[uuid]
2155 if (node.master_candidate or node.offline or node.drained or
2156 node.uuid in exception_node_uuids or not node.master_capable):
2157 continue
2158 mod_list.append(node)
2159 node.master_candidate = True
2160 node.serial_no += 1
2161 mc_now += 1
2162 if mc_now != mc_max:
2163
2164 logging.warning("Warning: MaintainCandidatePool didn't manage to"
2165 " fill the candidate pool (%d/%d)", mc_now, mc_max)
2166 if mod_list:
2167 self._config_data.cluster.serial_no += 1
2168 self._WriteConfig()
2169
2170 return mod_list
2171
2173 """Add a given node to the specified group.
2174
2175 """
2176 if nodegroup_uuid not in self._config_data.nodegroups:
2177
2178
2179
2180
2181 raise errors.OpExecError("Unknown node group: %s" % nodegroup_uuid)
2182 if node_uuid not in self._config_data.nodegroups[nodegroup_uuid].members:
2183 self._config_data.nodegroups[nodegroup_uuid].members.append(node_uuid)
2184
2186 """Remove a given node from its group.
2187
2188 """
2189 nodegroup = node.group
2190 if nodegroup not in self._config_data.nodegroups:
2191 logging.warning("Warning: node '%s' has unknown node group '%s'"
2192 " (while being removed from it)", node.uuid, nodegroup)
2193 nodegroup_obj = self._config_data.nodegroups[nodegroup]
2194 if node.uuid not in nodegroup_obj.members:
2195 logging.warning("Warning: node '%s' not a member of its node group '%s'"
2196 " (while being removed from it)", node.uuid, nodegroup)
2197 else:
2198 nodegroup_obj.members.remove(node.uuid)
2199
2200 @locking.ssynchronized(_config_lock)
2202 """Changes the group of a number of nodes.
2203
2204 @type mods: list of tuples; (node name, new group UUID)
2205 @param mods: Node membership modifications
2206
2207 """
2208 groups = self._config_data.nodegroups
2209 nodes = self._config_data.nodes
2210
2211 resmod = []
2212
2213
2214 for (node_uuid, new_group_uuid) in mods:
2215 try:
2216 node = nodes[node_uuid]
2217 except KeyError:
2218 raise errors.ConfigurationError("Unable to find node '%s'" % node_uuid)
2219
2220 if node.group == new_group_uuid:
2221
2222 logging.debug("Node '%s' was assigned to its current group (%s)",
2223 node_uuid, node.group)
2224 continue
2225
2226
2227 try:
2228 old_group = groups[node.group]
2229 except KeyError:
2230 raise errors.ConfigurationError("Unable to find old group '%s'" %
2231 node.group)
2232
2233
2234 try:
2235 new_group = groups[new_group_uuid]
2236 except KeyError:
2237 raise errors.ConfigurationError("Unable to find new group '%s'" %
2238 new_group_uuid)
2239
2240 assert node.uuid in old_group.members, \
2241 ("Inconsistent configuration: node '%s' not listed in members for its"
2242 " old group '%s'" % (node.uuid, old_group.uuid))
2243 assert node.uuid not in new_group.members, \
2244 ("Inconsistent configuration: node '%s' already listed in members for"
2245 " its new group '%s'" % (node.uuid, new_group.uuid))
2246
2247 resmod.append((node, old_group, new_group))
2248
2249
2250 for (node, old_group, new_group) in resmod:
2251 assert node.uuid != new_group.uuid and old_group.uuid != new_group.uuid, \
2252 "Assigning to current group is not possible"
2253
2254 node.group = new_group.uuid
2255
2256
2257 if node.uuid in old_group.members:
2258 old_group.members.remove(node.uuid)
2259 if node.uuid not in new_group.members:
2260 new_group.members.append(node.uuid)
2261
2262
2263 now = time.time()
2264 for obj in frozenset(itertools.chain(*resmod)):
2265 obj.serial_no += 1
2266 obj.mtime = now
2267
2268
2269 self._config_data.cluster.serial_no += 1
2270
2271 self._WriteConfig()
2272
2274 """Bump up the serial number of the config.
2275
2276 """
2277 self._config_data.serial_no += 1
2278 self._config_data.mtime = time.time()
2279
2281 """Returns all objects with uuid attributes.
2282
2283 """
2284 return (self._config_data.instances.values() +
2285 self._config_data.nodes.values() +
2286 self._config_data.nodegroups.values() +
2287 self._config_data.networks.values() +
2288 self._AllDisks() +
2289 self._AllNICs() +
2290 [self._config_data.cluster])
2291
2293 """Read the config data from disk.
2294
2295 """
2296 raw_data = utils.ReadFile(self._cfg_file)
2297
2298 try:
2299 data_dict = serializer.Load(raw_data)
2300
2301 _ValidateConfig(data_dict)
2302 data = objects.ConfigData.FromDict(data_dict)
2303 except errors.ConfigVersionMismatch:
2304 raise
2305 except Exception, err:
2306 raise errors.ConfigurationError(err)
2307
2308 if (not hasattr(data, "cluster") or
2309 not hasattr(data.cluster, "rsahostkeypub")):
2310 raise errors.ConfigurationError("Incomplete configuration"
2311 " (missing cluster.rsahostkeypub)")
2312
2313 if not data.cluster.master_node in data.nodes:
2314 msg = ("The configuration denotes node %s as master, but does not"
2315 " contain information about this node" %
2316 data.cluster.master_node)
2317 raise errors.ConfigurationError(msg)
2318
2319 master_info = data.nodes[data.cluster.master_node]
2320 if master_info.name != self._my_hostname and not accept_foreign:
2321 msg = ("The configuration denotes node %s as master, while my"
2322 " hostname is %s; opening a foreign configuration is only"
2323 " possible in accept_foreign mode" %
2324 (master_info.name, self._my_hostname))
2325 raise errors.ConfigurationError(msg)
2326
2327 self._config_data = data
2328
2329
2330 self._last_cluster_serial = -1
2331
2332
2333 self._UpgradeConfig()
2334
2335 self._cfg_id = utils.GetFileID(path=self._cfg_file)
2336
2338 """Run any upgrade steps.
2339
2340 This method performs both in-object upgrades and also update some data
2341 elements that need uniqueness across the whole configuration or interact
2342 with other objects.
2343
2344 @warning: this function will call L{_WriteConfig()}, but also
2345 L{DropECReservations} so it needs to be called only from a
2346 "safe" place (the constructor). If one wanted to call it with
2347 the lock held, a DropECReservationUnlocked would need to be
2348 created first, to avoid causing deadlock.
2349
2350 """
2351
2352
2353 oldconf = copy.deepcopy(self._config_data.ToDict())
2354
2355
2356 self._config_data.UpgradeConfig()
2357
2358 for item in self._AllUUIDObjects():
2359 if item.uuid is None:
2360 item.uuid = self._GenerateUniqueID(_UPGRADE_CONFIG_JID)
2361 if not self._config_data.nodegroups:
2362 default_nodegroup_name = constants.INITIAL_NODE_GROUP_NAME
2363 default_nodegroup = objects.NodeGroup(name=default_nodegroup_name,
2364 members=[])
2365 self._UnlockedAddNodeGroup(default_nodegroup, _UPGRADE_CONFIG_JID, True)
2366 for node in self._config_data.nodes.values():
2367 if not node.group:
2368 node.group = self.LookupNodeGroup(None)
2369
2370
2371
2372
2373 self._UnlockedAddNodeToGroup(node.uuid, node.group)
2374
2375 modified = (oldconf != self._config_data.ToDict())
2376 if modified:
2377 self._WriteConfig()
2378
2379
2380 self.DropECReservations(_UPGRADE_CONFIG_JID)
2381 else:
2382 config_errors = self._UnlockedVerifyConfig()
2383 if config_errors:
2384 errmsg = ("Loaded configuration data is not consistent: %s" %
2385 (utils.CommaJoin(config_errors)))
2386 logging.critical(errmsg)
2387
2389 """Distribute the configuration to the other nodes.
2390
2391 Currently, this only copies the configuration file. In the future,
2392 it could be used to encapsulate the 2/3-phase update mechanism.
2393
2394 """
2395 if self._offline:
2396 return True
2397
2398 bad = False
2399
2400 node_list = []
2401 addr_list = []
2402 myhostname = self._my_hostname
2403
2404
2405
2406
2407 for node_uuid in self._UnlockedGetNodeList():
2408 node_info = self._UnlockedGetNodeInfo(node_uuid)
2409 if node_info.name == myhostname or not node_info.master_candidate:
2410 continue
2411 node_list.append(node_info.name)
2412 addr_list.append(node_info.primary_ip)
2413
2414
2415 result = \
2416 self._GetRpc(addr_list).call_upload_file(node_list, self._cfg_file)
2417 for to_node, to_result in result.items():
2418 msg = to_result.fail_msg
2419 if msg:
2420 msg = ("Copy of file %s to node %s failed: %s" %
2421 (self._cfg_file, to_node, msg))
2422 logging.error(msg)
2423
2424 if feedback_fn:
2425 feedback_fn(msg)
2426
2427 bad = True
2428
2429 return not bad
2430
2431 - def _WriteConfig(self, destination=None, feedback_fn=None):
2432 """Write the configuration data to persistent storage.
2433
2434 """
2435 assert feedback_fn is None or callable(feedback_fn)
2436
2437
2438
2439
2440
2441 config_errors = self._UnlockedVerifyConfig()
2442 if config_errors:
2443 errmsg = ("Configuration data is not consistent: %s" %
2444 (utils.CommaJoin(config_errors)))
2445 logging.critical(errmsg)
2446 if feedback_fn:
2447 feedback_fn(errmsg)
2448
2449 if destination is None:
2450 destination = self._cfg_file
2451 self._BumpSerialNo()
2452 txt = serializer.Dump(self._config_data.ToDict())
2453
2454 getents = self._getents()
2455 try:
2456 fd = utils.SafeWriteFile(destination, self._cfg_id, data=txt,
2457 close=False, gid=getents.confd_gid, mode=0640)
2458 except errors.LockError:
2459 raise errors.ConfigurationError("The configuration file has been"
2460 " modified since the last write, cannot"
2461 " update")
2462 try:
2463 self._cfg_id = utils.GetFileID(fd=fd)
2464 finally:
2465 os.close(fd)
2466
2467 self.write_count += 1
2468
2469
2470 self._DistributeConfig(feedback_fn)
2471
2472
2473 if self._last_cluster_serial < self._config_data.cluster.serial_no:
2474 if not self._offline:
2475 result = self._GetRpc(None).call_write_ssconf_files(
2476 self._UnlockedGetNodeNames(self._UnlockedGetOnlineNodeList()),
2477 self._UnlockedGetSsconfValues())
2478
2479 for nname, nresu in result.items():
2480 msg = nresu.fail_msg
2481 if msg:
2482 errmsg = ("Error while uploading ssconf files to"
2483 " node %s: %s" % (nname, msg))
2484 logging.warning(errmsg)
2485
2486 if feedback_fn:
2487 feedback_fn(errmsg)
2488
2489 self._last_cluster_serial = self._config_data.cluster.serial_no
2490
2492 """Get the hvparams of all given hypervisors from the config.
2493
2494 @type hypervisors: list of string
2495 @param hypervisors: list of hypervisor names
2496 @rtype: dict of strings
2497 @returns: dictionary mapping the hypervisor name to a string representation
2498 of the hypervisor's hvparams
2499
2500 """
2501 hvparams = {}
2502 for hv in hypervisors:
2503 hvparams[hv] = self._UnlockedGetHvparamsString(hv)
2504 return hvparams
2505
2506 @staticmethod
2508 """Extends the ssconf_values dictionary by hvparams.
2509
2510 @type ssconf_values: dict of strings
2511 @param ssconf_values: dictionary mapping ssconf_keys to strings
2512 representing the content of ssconf files
2513 @type all_hvparams: dict of strings
2514 @param all_hvparams: dictionary mapping hypervisor names to a string
2515 representation of their hvparams
2516 @rtype: same as ssconf_values
2517 @returns: the ssconf_values dictionary extended by hvparams
2518
2519 """
2520 for hv in all_hvparams:
2521 ssconf_key = constants.SS_HVPARAMS_PREF + hv
2522 ssconf_values[ssconf_key] = all_hvparams[hv]
2523 return ssconf_values
2524
2526 """Return the values needed by ssconf.
2527
2528 @rtype: dict
2529 @return: a dictionary with keys the ssconf names and values their
2530 associated value
2531
2532 """
2533 fn = "\n".join
2534 instance_names = utils.NiceSort(
2535 [inst.name for inst in
2536 self._UnlockedGetAllInstancesInfo().values()])
2537 node_infos = self._UnlockedGetAllNodesInfo().values()
2538 node_names = [node.name for node in node_infos]
2539 node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip)
2540 for ninfo in node_infos]
2541 node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip)
2542 for ninfo in node_infos]
2543
2544 instance_data = fn(instance_names)
2545 off_data = fn(node.name for node in node_infos if node.offline)
2546 on_data = fn(node.name for node in node_infos if not node.offline)
2547 mc_data = fn(node.name for node in node_infos if node.master_candidate)
2548 mc_ips_data = fn(node.primary_ip for node in node_infos
2549 if node.master_candidate)
2550 node_data = fn(node_names)
2551 node_pri_ips_data = fn(node_pri_ips)
2552 node_snd_ips_data = fn(node_snd_ips)
2553
2554 cluster = self._config_data.cluster
2555 cluster_tags = fn(cluster.GetTags())
2556
2557 hypervisor_list = fn(cluster.enabled_hypervisors)
2558 all_hvparams = self._GetAllHvparamsStrings(constants.HYPER_TYPES)
2559
2560 uid_pool = uidpool.FormatUidPool(cluster.uid_pool, separator="\n")
2561
2562 nodegroups = ["%s %s" % (nodegroup.uuid, nodegroup.name) for nodegroup in
2563 self._config_data.nodegroups.values()]
2564 nodegroups_data = fn(utils.NiceSort(nodegroups))
2565 networks = ["%s %s" % (net.uuid, net.name) for net in
2566 self._config_data.networks.values()]
2567 networks_data = fn(utils.NiceSort(networks))
2568
2569 ssconf_values = {
2570 constants.SS_CLUSTER_NAME: cluster.cluster_name,
2571 constants.SS_CLUSTER_TAGS: cluster_tags,
2572 constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir,
2573 constants.SS_SHARED_FILE_STORAGE_DIR: cluster.shared_file_storage_dir,
2574 constants.SS_MASTER_CANDIDATES: mc_data,
2575 constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data,
2576 constants.SS_MASTER_IP: cluster.master_ip,
2577 constants.SS_MASTER_NETDEV: cluster.master_netdev,
2578 constants.SS_MASTER_NETMASK: str(cluster.master_netmask),
2579 constants.SS_MASTER_NODE: self._UnlockedGetNodeName(cluster.master_node),
2580 constants.SS_NODE_LIST: node_data,
2581 constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data,
2582 constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data,
2583 constants.SS_OFFLINE_NODES: off_data,
2584 constants.SS_ONLINE_NODES: on_data,
2585 constants.SS_PRIMARY_IP_FAMILY: str(cluster.primary_ip_family),
2586 constants.SS_INSTANCE_LIST: instance_data,
2587 constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION,
2588 constants.SS_HYPERVISOR_LIST: hypervisor_list,
2589 constants.SS_MAINTAIN_NODE_HEALTH: str(cluster.maintain_node_health),
2590 constants.SS_UID_POOL: uid_pool,
2591 constants.SS_NODEGROUPS: nodegroups_data,
2592 constants.SS_NETWORKS: networks_data,
2593 }
2594 ssconf_values = self._ExtendByAllHvparamsStrings(ssconf_values,
2595 all_hvparams)
2596 bad_values = [(k, v) for k, v in ssconf_values.items()
2597 if not isinstance(v, (str, basestring))]
2598 if bad_values:
2599 err = utils.CommaJoin("%s=%s" % (k, v) for k, v in bad_values)
2600 raise errors.ConfigurationError("Some ssconf key(s) have non-string"
2601 " values: %s" % err)
2602 return ssconf_values
2603
2604 @locking.ssynchronized(_config_lock, shared=1)
2610
2611 @locking.ssynchronized(_config_lock, shared=1)
2613 """Return the volume group name.
2614
2615 """
2616 return self._config_data.cluster.volume_group_name
2617
2618 @locking.ssynchronized(_config_lock)
2620 """Set the volume group name.
2621
2622 """
2623 self._config_data.cluster.volume_group_name = vg_name
2624 self._config_data.cluster.serial_no += 1
2625 self._WriteConfig()
2626
2627 @locking.ssynchronized(_config_lock, shared=1)
2629 """Return DRBD usermode helper.
2630
2631 """
2632 return self._config_data.cluster.drbd_usermode_helper
2633
2634 @locking.ssynchronized(_config_lock)
2636 """Set DRBD usermode helper.
2637
2638 """
2639 self._config_data.cluster.drbd_usermode_helper = drbd_helper
2640 self._config_data.cluster.serial_no += 1
2641 self._WriteConfig()
2642
2643 @locking.ssynchronized(_config_lock, shared=1)
2645 """Return the mac prefix.
2646
2647 """
2648 return self._config_data.cluster.mac_prefix
2649
2650 @locking.ssynchronized(_config_lock, shared=1)
2652 """Returns information about the cluster
2653
2654 @rtype: L{objects.Cluster}
2655 @return: the cluster object
2656
2657 """
2658 return self._config_data.cluster
2659
2660 @locking.ssynchronized(_config_lock, shared=1)
2662 """Check if in there is at disk of the given type in the configuration.
2663
2664 """
2665 return self._config_data.HasAnyDiskOfType(dev_type)
2666
2667 @locking.ssynchronized(_config_lock)
2668 - def Update(self, target, feedback_fn, ec_id=None):
2669 """Notify function to be called after updates.
2670
2671 This function must be called when an object (as returned by
2672 GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
2673 caller wants the modifications saved to the backing store. Note
2674 that all modified objects will be saved, but the target argument
2675 is the one the caller wants to ensure that it's saved.
2676
2677 @param target: an instance of either L{objects.Cluster},
2678 L{objects.Node} or L{objects.Instance} which is existing in
2679 the cluster
2680 @param feedback_fn: Callable feedback function
2681
2682 """
2683 if self._config_data is None:
2684 raise errors.ProgrammerError("Configuration file not read,"
2685 " cannot save.")
2686 update_serial = False
2687 if isinstance(target, objects.Cluster):
2688 test = target == self._config_data.cluster
2689 elif isinstance(target, objects.Node):
2690 test = target in self._config_data.nodes.values()
2691 update_serial = True
2692 elif isinstance(target, objects.Instance):
2693 test = target in self._config_data.instances.values()
2694 elif isinstance(target, objects.NodeGroup):
2695 test = target in self._config_data.nodegroups.values()
2696 elif isinstance(target, objects.Network):
2697 test = target in self._config_data.networks.values()
2698 else:
2699 raise errors.ProgrammerError("Invalid object type (%s) passed to"
2700 " ConfigWriter.Update" % type(target))
2701 if not test:
2702 raise errors.ConfigurationError("Configuration updated since object"
2703 " has been read or unknown object")
2704 target.serial_no += 1
2705 target.mtime = now = time.time()
2706
2707 if update_serial:
2708
2709 self._config_data.cluster.serial_no += 1
2710 self._config_data.cluster.mtime = now
2711
2712 if isinstance(target, objects.Instance):
2713 self._UnlockedReleaseDRBDMinors(target.uuid)
2714
2715 if ec_id is not None:
2716
2717 self._UnlockedCommitTemporaryIps(ec_id)
2718
2719 self._WriteConfig(feedback_fn=feedback_fn)
2720
2721 @locking.ssynchronized(_config_lock)
2723 """Drop per-execution-context reservations
2724
2725 """
2726 for rm in self._all_rms:
2727 rm.DropECReservations(ec_id)
2728
2729 @locking.ssynchronized(_config_lock, shared=1)
2731 """Get configuration info of all the networks.
2732
2733 """
2734 return dict(self._config_data.networks)
2735
2737 """Get the list of networks.
2738
2739 This function is for internal use, when the config lock is already held.
2740
2741 """
2742 return self._config_data.networks.keys()
2743
2744 @locking.ssynchronized(_config_lock, shared=1)
2746 """Get the list of networks.
2747
2748 @return: array of networks, ex. ["main", "vlan100", "200]
2749
2750 """
2751 return self._UnlockedGetNetworkList()
2752
2753 @locking.ssynchronized(_config_lock, shared=1)
2755 """Get a list of network names
2756
2757 """
2758 names = [net.name
2759 for net in self._config_data.networks.values()]
2760 return names
2761
2763 """Returns information about a network.
2764
2765 This function is for internal use, when the config lock is already held.
2766
2767 """
2768 if uuid not in self._config_data.networks:
2769 return None
2770
2771 return self._config_data.networks[uuid]
2772
2773 @locking.ssynchronized(_config_lock, shared=1)
2775 """Returns information about a network.
2776
2777 It takes the information from the configuration file.
2778
2779 @param uuid: UUID of the network
2780
2781 @rtype: L{objects.Network}
2782 @return: the network object
2783
2784 """
2785 return self._UnlockedGetNetwork(uuid)
2786
2787 @locking.ssynchronized(_config_lock)
2788 - def AddNetwork(self, net, ec_id, check_uuid=True):
2789 """Add a network to the configuration.
2790
2791 @type net: L{objects.Network}
2792 @param net: the Network object to add
2793 @type ec_id: string
2794 @param ec_id: unique id for the job to use when creating a missing UUID
2795
2796 """
2797 self._UnlockedAddNetwork(net, ec_id, check_uuid)
2798 self._WriteConfig()
2799
2801 """Add a network to the configuration.
2802
2803 """
2804 logging.info("Adding network %s to configuration", net.name)
2805
2806 if check_uuid:
2807 self._EnsureUUID(net, ec_id)
2808
2809 net.serial_no = 1
2810 net.ctime = net.mtime = time.time()
2811 self._config_data.networks[net.uuid] = net
2812 self._config_data.cluster.serial_no += 1
2813
2815 """Lookup a network's UUID.
2816
2817 @type target: string
2818 @param target: network name or UUID
2819 @rtype: string
2820 @return: network UUID
2821 @raises errors.OpPrereqError: when the target network cannot be found
2822
2823 """
2824 if target is None:
2825 return None
2826 if target in self._config_data.networks:
2827 return target
2828 for net in self._config_data.networks.values():
2829 if net.name == target:
2830 return net.uuid
2831 raise errors.OpPrereqError("Network '%s' not found" % target,
2832 errors.ECODE_NOENT)
2833
2834 @locking.ssynchronized(_config_lock, shared=1)
2836 """Lookup a network's UUID.
2837
2838 This function is just a wrapper over L{_UnlockedLookupNetwork}.
2839
2840 @type target: string
2841 @param target: network name or UUID
2842 @rtype: string
2843 @return: network UUID
2844
2845 """
2846 return self._UnlockedLookupNetwork(target)
2847
2848 @locking.ssynchronized(_config_lock)
2850 """Remove a network from the configuration.
2851
2852 @type network_uuid: string
2853 @param network_uuid: the UUID of the network to remove
2854
2855 """
2856 logging.info("Removing network %s from configuration", network_uuid)
2857
2858 if network_uuid not in self._config_data.networks:
2859 raise errors.ConfigurationError("Unknown network '%s'" % network_uuid)
2860
2861 del self._config_data.networks[network_uuid]
2862 self._config_data.cluster.serial_no += 1
2863 self._WriteConfig()
2864
2866 """Get the netparams (mode, link) of a network.
2867
2868 Get a network's netparams for a given node.
2869
2870 @type net_uuid: string
2871 @param net_uuid: network uuid
2872 @type node_uuid: string
2873 @param node_uuid: node UUID
2874 @rtype: dict or None
2875 @return: netparams
2876
2877 """
2878 node_info = self._UnlockedGetNodeInfo(node_uuid)
2879 nodegroup_info = self._UnlockedGetNodeGroup(node_info.group)
2880 netparams = nodegroup_info.networks.get(net_uuid, None)
2881
2882 return netparams
2883
2884 @locking.ssynchronized(_config_lock, shared=1)
2886 """Locking wrapper of _UnlockedGetGroupNetParams()
2887
2888 """
2889 return self._UnlockedGetGroupNetParams(net_uuid, node_uuid)
2890
2891 @locking.ssynchronized(_config_lock, shared=1)
2893 """Check IP uniqueness in nodegroup.
2894
2895 Check networks that are connected in the node's node group
2896 if ip is contained in any of them. Used when creating/adding
2897 a NIC to ensure uniqueness among nodegroups.
2898
2899 @type ip: string
2900 @param ip: ip address
2901 @type node_uuid: string
2902 @param node_uuid: node UUID
2903 @rtype: (string, dict) or (None, None)
2904 @return: (network name, netparams)
2905
2906 """
2907 if ip is None:
2908 return (None, None)
2909 node_info = self._UnlockedGetNodeInfo(node_uuid)
2910 nodegroup_info = self._UnlockedGetNodeGroup(node_info.group)
2911 for net_uuid in nodegroup_info.networks.keys():
2912 net_info = self._UnlockedGetNetwork(net_uuid)
2913 pool = network.AddressPool(net_info)
2914 if pool.Contains(ip):
2915 return (net_info.name, nodegroup_info.networks[net_uuid])
2916
2917 return (None, None)
2918