1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 """Configuration management for Ganeti
23
24 This module provides the interface to the Ganeti cluster configuration.
25
26 The configuration data is stored on every node but is updated on the master
27 only. After each update, the master distributes the data to the other nodes.
28
29 Currently, the data storage format is JSON. YAML was slow and consuming too
30 much memory.
31
32 """
33
34
35
36
37 import copy
38 import os
39 import random
40 import logging
41 import time
42 import itertools
43
44 from ganeti import errors
45 from ganeti import locking
46 from ganeti import utils
47 from ganeti import constants
48 from ganeti import rpc
49 from ganeti import objects
50 from ganeti import serializer
51 from ganeti import uidpool
52 from ganeti import netutils
53 from ganeti import runtime
54 from ganeti import pathutils
55 from ganeti import network
56
57
58 _config_lock = locking.SharedLock("ConfigWriter")
59
60
61 _UPGRADE_CONFIG_JID = "jid-cfg-upgrade"
75
78 """A temporary resource reservation manager.
79
80 This is used to reserve resources in a job, before using them, making sure
81 other jobs cannot get them in the meantime.
82
83 """
85 self._ec_reserved = {}
86
88 for holder_reserved in self._ec_reserved.values():
89 if resource in holder_reserved:
90 return True
91 return False
92
93 - def Reserve(self, ec_id, resource):
94 if self.Reserved(resource):
95 raise errors.ReservationError("Duplicate reservation for resource '%s'"
96 % str(resource))
97 if ec_id not in self._ec_reserved:
98 self._ec_reserved[ec_id] = set([resource])
99 else:
100 self._ec_reserved[ec_id].add(resource)
101
103 if ec_id in self._ec_reserved:
104 del self._ec_reserved[ec_id]
105
107 all_reserved = set()
108 for holder_reserved in self._ec_reserved.values():
109 all_reserved.update(holder_reserved)
110 return all_reserved
111
113 """ Used when you want to retrieve all reservations for a specific
114 execution context. E.g when commiting reserved IPs for a specific
115 network.
116
117 """
118 ec_reserved = set()
119 if ec_id in self._ec_reserved:
120 ec_reserved.update(self._ec_reserved[ec_id])
121 return ec_reserved
122
123 - def Generate(self, existing, generate_one_fn, ec_id):
124 """Generate a new resource of this type
125
126 """
127 assert callable(generate_one_fn)
128
129 all_elems = self.GetReserved()
130 all_elems.update(existing)
131 retries = 64
132 while retries > 0:
133 new_resource = generate_one_fn()
134 if new_resource is not None and new_resource not in all_elems:
135 break
136 else:
137 raise errors.ConfigurationError("Not able generate new resource"
138 " (last tried: %s)" % new_resource)
139 self.Reserve(ec_id, new_resource)
140 return new_resource
141
144 """Wrapper around L{utils.text.MatchNameComponent}.
145
146 """
147 return utils.MatchNameComponent(short_name, names, case_sensitive=False)
148
151 """Checks if instance's disks' C{iv_name} attributes are in order.
152
153 @type disks: list of L{objects.Disk}
154 @param disks: List of disks
155 @rtype: list of tuples; (int, string, string)
156 @return: List of wrongly named disks, each tuple contains disk index,
157 expected and actual name
158
159 """
160 result = []
161
162 for (idx, disk) in enumerate(disks):
163 exp_iv_name = "disk/%s" % idx
164 if disk.iv_name != exp_iv_name:
165 result.append((idx, exp_iv_name, disk.iv_name))
166
167 return result
168
171 """The interface to the cluster configuration.
172
173 @ivar _temporary_lvs: reservation manager for temporary LVs
174 @ivar _all_rms: a list of all temporary reservation managers
175
176 """
206
208 """Returns RPC runner for configuration.
209
210 """
211 return rpc.ConfigRunner(self._context, address_list)
212
213 - def SetContext(self, context):
214 """Sets Ganeti context.
215
216 """
217 self._context = context
218
219
220 @staticmethod
226
227 @locking.ssynchronized(_config_lock, shared=1)
229 """Get the node params populated with cluster defaults.
230
231 @type node: L{objects.Node}
232 @param node: The node we want to know the params for
233 @return: A dict with the filled in node params
234
235 """
236 nodegroup = self._UnlockedGetNodeGroup(node.group)
237 return self._config_data.cluster.FillND(node, nodegroup)
238
239 @locking.ssynchronized(_config_lock, shared=1)
251
252 @locking.ssynchronized(_config_lock, shared=1)
254 """Get the disk params populated with inherit chain.
255
256 @type group: L{objects.NodeGroup}
257 @param group: The group we want to know the params for
258 @return: A dict with the filled in disk params
259
260 """
261 return self._UnlockedGetGroupDiskParams(group)
262
264 """Get the disk params populated with inherit chain down to node-group.
265
266 @type group: L{objects.NodeGroup}
267 @param group: The group we want to know the params for
268 @return: A dict with the filled in disk params
269
270 """
271 return self._config_data.cluster.SimpleFillDP(group.diskparams)
272
274 """Return the network mac prefix if it exists or the cluster level default.
275
276 """
277 prefix = None
278 if net_uuid:
279 nobj = self._UnlockedGetNetwork(net_uuid)
280 if nobj.mac_prefix:
281 prefix = nobj.mac_prefix
282
283 return prefix
284
286 """Return a function that randomly generates a MAC suffic
287 and appends it to the given prefix. If prefix is not given get
288 the cluster level default.
289
290 """
291 if not prefix:
292 prefix = self._config_data.cluster.mac_prefix
293
294 def GenMac():
295 byte1 = random.randrange(0, 256)
296 byte2 = random.randrange(0, 256)
297 byte3 = random.randrange(0, 256)
298 mac = "%s:%02x:%02x:%02x" % (prefix, byte1, byte2, byte3)
299 return mac
300
301 return GenMac
302
303 @locking.ssynchronized(_config_lock, shared=1)
305 """Generate a MAC for an instance.
306
307 This should check the current instances for duplicates.
308
309 """
310 existing = self._AllMACs()
311 prefix = self._UnlockedGetNetworkMACPrefix(net_uuid)
312 gen_mac = self._GenerateOneMAC(prefix)
313 return self._temporary_ids.Generate(existing, gen_mac, ec_id)
314
315 @locking.ssynchronized(_config_lock, shared=1)
317 """Reserve a MAC for an instance.
318
319 This only checks instances managed by this cluster, it does not
320 check for potential collisions elsewhere.
321
322 """
323 all_macs = self._AllMACs()
324 if mac in all_macs:
325 raise errors.ReservationError("mac already in use")
326 else:
327 self._temporary_macs.Reserve(ec_id, mac)
328
330 """Commit all reserved IP address to their respective pools
331
332 """
333 for action, address, net_uuid in self._temporary_ips.GetECReserved(ec_id):
334 self._UnlockedCommitIp(action, net_uuid, address)
335
348
350 """Give a specific IP address back to an IP pool.
351
352 The IP address is returned to the IP pool designated by pool_id and marked
353 as reserved.
354
355 """
356 self._temporary_ips.Reserve(ec_id,
357 (constants.RELEASE_ACTION, address, net_uuid))
358
359 @locking.ssynchronized(_config_lock, shared=1)
360 - def ReleaseIp(self, net_uuid, address, ec_id):
361 """Give a specified IP address back to an IP pool.
362
363 This is just a wrapper around _UnlockedReleaseIp.
364
365 """
366 if net_uuid:
367 self._UnlockedReleaseIp(net_uuid, address, ec_id)
368
369 @locking.ssynchronized(_config_lock, shared=1)
383
384 _, address, _ = self._temporary_ips.Generate([], gen_one, ec_id)
385 return address
386
403
404 @locking.ssynchronized(_config_lock, shared=1)
405 - def ReserveIp(self, net_uuid, address, ec_id):
406 """Reserve a given IPv4 address for use by an instance.
407
408 """
409 if net_uuid:
410 return self._UnlockedReserveIp(net_uuid, address, ec_id)
411
412 @locking.ssynchronized(_config_lock, shared=1)
414 """Reserve an VG/LV pair for an instance.
415
416 @type lv_name: string
417 @param lv_name: the logical volume name to reserve
418
419 """
420 all_lvs = self._AllLVs()
421 if lv_name in all_lvs:
422 raise errors.ReservationError("LV already in use")
423 else:
424 self._temporary_lvs.Reserve(ec_id, lv_name)
425
426 @locking.ssynchronized(_config_lock, shared=1)
436
438 """Compute the list of all LVs.
439
440 """
441 lvnames = set()
442 for instance in self._config_data.instances.values():
443 node_data = instance.MapLVsByNode()
444 for lv_list in node_data.values():
445 lvnames.update(lv_list)
446 return lvnames
447
449 """Compute the list of all Disks (recursively, including children).
450
451 """
452 def DiskAndAllChildren(disk):
453 """Returns a list containing the given disk and all of his children.
454
455 """
456 disks = [disk]
457 if disk.children:
458 for child_disk in disk.children:
459 disks.extend(DiskAndAllChildren(child_disk))
460 return disks
461
462 disks = []
463 for instance in self._config_data.instances.values():
464 for disk in instance.disks:
465 disks.extend(DiskAndAllChildren(disk))
466 return disks
467
469 """Compute the list of all NICs.
470
471 """
472 nics = []
473 for instance in self._config_data.instances.values():
474 nics.extend(instance.nics)
475 return nics
476
477 - def _AllIDs(self, include_temporary):
478 """Compute the list of all UUIDs and names we have.
479
480 @type include_temporary: boolean
481 @param include_temporary: whether to include the _temporary_ids set
482 @rtype: set
483 @return: a set of IDs
484
485 """
486 existing = set()
487 if include_temporary:
488 existing.update(self._temporary_ids.GetReserved())
489 existing.update(self._AllLVs())
490 existing.update(self._config_data.instances.keys())
491 existing.update(self._config_data.nodes.keys())
492 existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid])
493 return existing
494
496 """Generate an unique UUID.
497
498 This checks the current node, instances and disk names for
499 duplicates.
500
501 @rtype: string
502 @return: the unique id
503
504 """
505 existing = self._AllIDs(include_temporary=False)
506 return self._temporary_ids.Generate(existing, utils.NewUUID, ec_id)
507
508 @locking.ssynchronized(_config_lock, shared=1)
510 """Generate an unique ID.
511
512 This is just a wrapper over the unlocked version.
513
514 @type ec_id: string
515 @param ec_id: unique id for the job to reserve the id to
516
517 """
518 return self._GenerateUniqueID(ec_id)
519
521 """Return all MACs present in the config.
522
523 @rtype: list
524 @return: the list of all MACs
525
526 """
527 result = []
528 for instance in self._config_data.instances.values():
529 for nic in instance.nics:
530 result.append(nic.mac)
531
532 return result
533
535 """Return all DRBD secrets present in the config.
536
537 @rtype: list
538 @return: the list of all DRBD secrets
539
540 """
541 def helper(disk, result):
542 """Recursively gather secrets from this disk."""
543 if disk.dev_type == constants.DT_DRBD8:
544 result.append(disk.logical_id[5])
545 if disk.children:
546 for child in disk.children:
547 helper(child, result)
548
549 result = []
550 for instance in self._config_data.instances.values():
551 for disk in instance.disks:
552 helper(disk, result)
553
554 return result
555
557 """Compute duplicate disk IDs
558
559 @type disk: L{objects.Disk}
560 @param disk: the disk at which to start searching
561 @type l_ids: list
562 @param l_ids: list of current logical ids
563 @type p_ids: list
564 @param p_ids: list of current physical ids
565 @rtype: list
566 @return: a list of error messages
567
568 """
569 result = []
570 if disk.logical_id is not None:
571 if disk.logical_id in l_ids:
572 result.append("duplicate logical id %s" % str(disk.logical_id))
573 else:
574 l_ids.append(disk.logical_id)
575 if disk.physical_id is not None:
576 if disk.physical_id in p_ids:
577 result.append("duplicate physical id %s" % str(disk.physical_id))
578 else:
579 p_ids.append(disk.physical_id)
580
581 if disk.children:
582 for child in disk.children:
583 result.extend(self._CheckDiskIDs(child, l_ids, p_ids))
584 return result
585
587 """Verify function.
588
589 @rtype: list
590 @return: a list of error messages; a non-empty list signifies
591 configuration errors
592
593 """
594
595 result = []
596 seen_macs = []
597 ports = {}
598 data = self._config_data
599 cluster = data.cluster
600 seen_lids = []
601 seen_pids = []
602
603
604 if not cluster.enabled_hypervisors:
605 result.append("enabled hypervisors list doesn't have any entries")
606 invalid_hvs = set(cluster.enabled_hypervisors) - constants.HYPER_TYPES
607 if invalid_hvs:
608 result.append("enabled hypervisors contains invalid entries: %s" %
609 utils.CommaJoin(invalid_hvs))
610 missing_hvp = (set(cluster.enabled_hypervisors) -
611 set(cluster.hvparams.keys()))
612 if missing_hvp:
613 result.append("hypervisor parameters missing for the enabled"
614 " hypervisor(s) %s" % utils.CommaJoin(missing_hvp))
615
616 if not cluster.enabled_disk_templates:
617 result.append("enabled disk templates list doesn't have any entries")
618 invalid_disk_templates = set(cluster.enabled_disk_templates) \
619 - constants.DISK_TEMPLATES
620 if invalid_disk_templates:
621 result.append("enabled disk templates list contains invalid entries:"
622 " %s" % utils.CommaJoin(invalid_disk_templates))
623
624 if cluster.master_node not in data.nodes:
625 result.append("cluster has invalid primary node '%s'" %
626 cluster.master_node)
627
628 def _helper(owner, attr, value, template):
629 try:
630 utils.ForceDictType(value, template)
631 except errors.GenericError, err:
632 result.append("%s has invalid %s: %s" % (owner, attr, err))
633
634 def _helper_nic(owner, params):
635 try:
636 objects.NIC.CheckParameterSyntax(params)
637 except errors.ConfigurationError, err:
638 result.append("%s has invalid nicparams: %s" % (owner, err))
639
640 def _helper_ipolicy(owner, ipolicy, iscluster):
641 try:
642 objects.InstancePolicy.CheckParameterSyntax(ipolicy, iscluster)
643 except errors.ConfigurationError, err:
644 result.append("%s has invalid instance policy: %s" % (owner, err))
645 for key, value in ipolicy.items():
646 if key == constants.ISPECS_MINMAX:
647 for k in range(len(value)):
648 _helper_ispecs(owner, "ipolicy/%s[%s]" % (key, k), value[k])
649 elif key == constants.ISPECS_STD:
650 _helper(owner, "ipolicy/" + key, value,
651 constants.ISPECS_PARAMETER_TYPES)
652 else:
653
654 if key in constants.IPOLICY_PARAMETERS:
655 exp_type = float
656 else:
657 exp_type = list
658 if not isinstance(value, exp_type):
659 result.append("%s has invalid instance policy: for %s,"
660 " expecting %s, got %s" %
661 (owner, key, exp_type.__name__, type(value)))
662
663 def _helper_ispecs(owner, parentkey, params):
664 for (key, value) in params.items():
665 fullkey = "/".join([parentkey, key])
666 _helper(owner, fullkey, value, constants.ISPECS_PARAMETER_TYPES)
667
668
669 _helper("cluster", "beparams", cluster.SimpleFillBE({}),
670 constants.BES_PARAMETER_TYPES)
671 _helper("cluster", "nicparams", cluster.SimpleFillNIC({}),
672 constants.NICS_PARAMETER_TYPES)
673 _helper_nic("cluster", cluster.SimpleFillNIC({}))
674 _helper("cluster", "ndparams", cluster.SimpleFillND({}),
675 constants.NDS_PARAMETER_TYPES)
676 _helper_ipolicy("cluster", cluster.ipolicy, True)
677
678
679 for instance_uuid in data.instances:
680 instance = data.instances[instance_uuid]
681 if instance.uuid != instance_uuid:
682 result.append("instance '%s' is indexed by wrong UUID '%s'" %
683 (instance.name, instance_uuid))
684 if instance.primary_node not in data.nodes:
685 result.append("instance '%s' has invalid primary node '%s'" %
686 (instance.name, instance.primary_node))
687 for snode in instance.secondary_nodes:
688 if snode not in data.nodes:
689 result.append("instance '%s' has invalid secondary node '%s'" %
690 (instance.name, snode))
691 for idx, nic in enumerate(instance.nics):
692 if nic.mac in seen_macs:
693 result.append("instance '%s' has NIC %d mac %s duplicate" %
694 (instance.name, idx, nic.mac))
695 else:
696 seen_macs.append(nic.mac)
697 if nic.nicparams:
698 filled = cluster.SimpleFillNIC(nic.nicparams)
699 owner = "instance %s nic %d" % (instance.name, idx)
700 _helper(owner, "nicparams",
701 filled, constants.NICS_PARAMETER_TYPES)
702 _helper_nic(owner, filled)
703
704
705 if not instance.disk_template in data.cluster.enabled_disk_templates:
706 result.append("instance '%s' uses the disabled disk template '%s'." %
707 (instance.name, instance.disk_template))
708
709
710 if instance.beparams:
711 _helper("instance %s" % instance.name, "beparams",
712 cluster.FillBE(instance), constants.BES_PARAMETER_TYPES)
713
714
715 for (idx, dsk) in enumerate(instance.disks):
716 if dsk.dev_type in constants.DTS_DRBD:
717 tcp_port = dsk.logical_id[2]
718 if tcp_port not in ports:
719 ports[tcp_port] = []
720 ports[tcp_port].append((instance.name, "drbd disk %s" % idx))
721
722 net_port = getattr(instance, "network_port", None)
723 if net_port is not None:
724 if net_port not in ports:
725 ports[net_port] = []
726 ports[net_port].append((instance.name, "network port"))
727
728
729 for idx, disk in enumerate(instance.disks):
730 result.extend(["instance '%s' disk %d error: %s" %
731 (instance.name, idx, msg) for msg in disk.Verify()])
732 result.extend(self._CheckDiskIDs(disk, seen_lids, seen_pids))
733
734 wrong_names = _CheckInstanceDiskIvNames(instance.disks)
735 if wrong_names:
736 tmp = "; ".join(("name of disk %s should be '%s', but is '%s'" %
737 (idx, exp_name, actual_name))
738 for (idx, exp_name, actual_name) in wrong_names)
739
740 result.append("Instance '%s' has wrongly named disks: %s" %
741 (instance.name, tmp))
742
743
744 for free_port in cluster.tcpudp_port_pool:
745 if free_port not in ports:
746 ports[free_port] = []
747 ports[free_port].append(("cluster", "port marked as free"))
748
749
750 keys = ports.keys()
751 keys.sort()
752 for pnum in keys:
753 pdata = ports[pnum]
754 if len(pdata) > 1:
755 txt = utils.CommaJoin(["%s/%s" % val for val in pdata])
756 result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt))
757
758
759 if keys:
760 if keys[-1] > cluster.highest_used_port:
761 result.append("Highest used port mismatch, saved %s, computed %s" %
762 (cluster.highest_used_port, keys[-1]))
763
764 if not data.nodes[cluster.master_node].master_candidate:
765 result.append("Master node is not a master candidate")
766
767
768 mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats()
769 if mc_now < mc_max:
770 result.append("Not enough master candidates: actual %d, target %d" %
771 (mc_now, mc_max))
772
773
774 for node_uuid, node in data.nodes.items():
775 if node.uuid != node_uuid:
776 result.append("Node '%s' is indexed by wrong UUID '%s'" %
777 (node.name, node_uuid))
778 if [node.master_candidate, node.drained, node.offline].count(True) > 1:
779 result.append("Node %s state is invalid: master_candidate=%s,"
780 " drain=%s, offline=%s" %
781 (node.name, node.master_candidate, node.drained,
782 node.offline))
783 if node.group not in data.nodegroups:
784 result.append("Node '%s' has invalid group '%s'" %
785 (node.name, node.group))
786 else:
787 _helper("node %s" % node.name, "ndparams",
788 cluster.FillND(node, data.nodegroups[node.group]),
789 constants.NDS_PARAMETER_TYPES)
790 used_globals = constants.NDC_GLOBALS.intersection(node.ndparams)
791 if used_globals:
792 result.append("Node '%s' has some global parameters set: %s" %
793 (node.name, utils.CommaJoin(used_globals)))
794
795
796 nodegroups_names = set()
797 for nodegroup_uuid in data.nodegroups:
798 nodegroup = data.nodegroups[nodegroup_uuid]
799 if nodegroup.uuid != nodegroup_uuid:
800 result.append("node group '%s' (uuid: '%s') indexed by wrong uuid '%s'"
801 % (nodegroup.name, nodegroup.uuid, nodegroup_uuid))
802 if utils.UUID_RE.match(nodegroup.name.lower()):
803 result.append("node group '%s' (uuid: '%s') has uuid-like name" %
804 (nodegroup.name, nodegroup.uuid))
805 if nodegroup.name in nodegroups_names:
806 result.append("duplicate node group name '%s'" % nodegroup.name)
807 else:
808 nodegroups_names.add(nodegroup.name)
809 group_name = "group %s" % nodegroup.name
810 _helper_ipolicy(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy),
811 False)
812 if nodegroup.ndparams:
813 _helper(group_name, "ndparams",
814 cluster.SimpleFillND(nodegroup.ndparams),
815 constants.NDS_PARAMETER_TYPES)
816
817
818 _, duplicates = self._UnlockedComputeDRBDMap()
819 for node, minor, instance_a, instance_b in duplicates:
820 result.append("DRBD minor %d on node %s is assigned twice to instances"
821 " %s and %s" % (minor, node, instance_a, instance_b))
822
823
824 default_nicparams = cluster.nicparams[constants.PP_DEFAULT]
825 ips = {}
826
827 def _AddIpAddress(ip, name):
828 ips.setdefault(ip, []).append(name)
829
830 _AddIpAddress(cluster.master_ip, "cluster_ip")
831
832 for node in data.nodes.values():
833 _AddIpAddress(node.primary_ip, "node:%s/primary" % node.name)
834 if node.secondary_ip != node.primary_ip:
835 _AddIpAddress(node.secondary_ip, "node:%s/secondary" % node.name)
836
837 for instance in data.instances.values():
838 for idx, nic in enumerate(instance.nics):
839 if nic.ip is None:
840 continue
841
842 nicparams = objects.FillDict(default_nicparams, nic.nicparams)
843 nic_mode = nicparams[constants.NIC_MODE]
844 nic_link = nicparams[constants.NIC_LINK]
845
846 if nic_mode == constants.NIC_MODE_BRIDGED:
847 link = "bridge:%s" % nic_link
848 elif nic_mode == constants.NIC_MODE_ROUTED:
849 link = "route:%s" % nic_link
850 else:
851 raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode)
852
853 _AddIpAddress("%s/%s/%s" % (link, nic.ip, nic.network),
854 "instance:%s/nic:%d" % (instance.name, idx))
855
856 for ip, owners in ips.items():
857 if len(owners) > 1:
858 result.append("IP address %s is used by multiple owners: %s" %
859 (ip, utils.CommaJoin(owners)))
860
861 return result
862
863 @locking.ssynchronized(_config_lock, shared=1)
865 """Verify function.
866
867 This is just a wrapper over L{_UnlockedVerifyConfig}.
868
869 @rtype: list
870 @return: a list of error messages; a non-empty list signifies
871 configuration errors
872
873 """
874 return self._UnlockedVerifyConfig()
875
877 """Convert the unique ID to the ID needed on the target nodes.
878
879 This is used only for drbd, which needs ip/port configuration.
880
881 The routine descends down and updates its children also, because
882 this helps when the only the top device is passed to the remote
883 node.
884
885 This function is for internal use, when the config lock is already held.
886
887 """
888 if disk.children:
889 for child in disk.children:
890 self._UnlockedSetDiskID(child, node_uuid)
891
892 if disk.logical_id is None and disk.physical_id is not None:
893 return
894 if disk.dev_type == constants.DT_DRBD8:
895 pnode, snode, port, pminor, sminor, secret = disk.logical_id
896 if node_uuid not in (pnode, snode):
897 raise errors.ConfigurationError("DRBD device not knowing node %s" %
898 node_uuid)
899 pnode_info = self._UnlockedGetNodeInfo(pnode)
900 snode_info = self._UnlockedGetNodeInfo(snode)
901 if pnode_info is None or snode_info is None:
902 raise errors.ConfigurationError("Can't find primary or secondary node"
903 " for %s" % str(disk))
904 p_data = (pnode_info.secondary_ip, port)
905 s_data = (snode_info.secondary_ip, port)
906 if pnode == node_uuid:
907 disk.physical_id = p_data + s_data + (pminor, secret)
908 else:
909 disk.physical_id = s_data + p_data + (sminor, secret)
910 else:
911 disk.physical_id = disk.logical_id
912 return
913
914 @locking.ssynchronized(_config_lock)
916 """Convert the unique ID to the ID needed on the target nodes.
917
918 This is used only for drbd, which needs ip/port configuration.
919
920 The routine descends down and updates its children also, because
921 this helps when the only the top device is passed to the remote
922 node.
923
924 """
925 return self._UnlockedSetDiskID(disk, node_uuid)
926
927 @locking.ssynchronized(_config_lock)
929 """Adds a new port to the available port pool.
930
931 @warning: this method does not "flush" the configuration (via
932 L{_WriteConfig}); callers should do that themselves once the
933 configuration is stable
934
935 """
936 if not isinstance(port, int):
937 raise errors.ProgrammerError("Invalid type passed for port")
938
939 self._config_data.cluster.tcpudp_port_pool.add(port)
940
941 @locking.ssynchronized(_config_lock, shared=1)
943 """Returns a copy of the current port list.
944
945 """
946 return self._config_data.cluster.tcpudp_port_pool.copy()
947
948 @locking.ssynchronized(_config_lock)
950 """Allocate a port.
951
952 The port will be taken from the available port pool or from the
953 default port range (and in this case we increase
954 highest_used_port).
955
956 """
957
958 if self._config_data.cluster.tcpudp_port_pool:
959 port = self._config_data.cluster.tcpudp_port_pool.pop()
960 else:
961 port = self._config_data.cluster.highest_used_port + 1
962 if port >= constants.LAST_DRBD_PORT:
963 raise errors.ConfigurationError("The highest used port is greater"
964 " than %s. Aborting." %
965 constants.LAST_DRBD_PORT)
966 self._config_data.cluster.highest_used_port = port
967
968 self._WriteConfig()
969 return port
970
972 """Compute the used DRBD minor/nodes.
973
974 @rtype: (dict, list)
975 @return: dictionary of node_uuid: dict of minor: instance_uuid;
976 the returned dict will have all the nodes in it (even if with
977 an empty list), and a list of duplicates; if the duplicates
978 list is not empty, the configuration is corrupted and its caller
979 should raise an exception
980
981 """
982 def _AppendUsedMinors(get_node_name_fn, instance, disk, used):
983 duplicates = []
984 if disk.dev_type == constants.DT_DRBD8 and len(disk.logical_id) >= 5:
985 node_a, node_b, _, minor_a, minor_b = disk.logical_id[:5]
986 for node_uuid, minor in ((node_a, minor_a), (node_b, minor_b)):
987 assert node_uuid in used, \
988 ("Node '%s' of instance '%s' not found in node list" %
989 (get_node_name_fn(node_uuid), instance.name))
990 if minor in used[node_uuid]:
991 duplicates.append((node_uuid, minor, instance.uuid,
992 used[node_uuid][minor]))
993 else:
994 used[node_uuid][minor] = instance.uuid
995 if disk.children:
996 for child in disk.children:
997 duplicates.extend(_AppendUsedMinors(get_node_name_fn, instance, child,
998 used))
999 return duplicates
1000
1001 duplicates = []
1002 my_dict = dict((node_uuid, {}) for node_uuid in self._config_data.nodes)
1003 for instance in self._config_data.instances.itervalues():
1004 for disk in instance.disks:
1005 duplicates.extend(_AppendUsedMinors(self._UnlockedGetNodeName,
1006 instance, disk, my_dict))
1007 for (node_uuid, minor), inst_uuid in self._temporary_drbds.iteritems():
1008 if minor in my_dict[node_uuid] and my_dict[node_uuid][minor] != inst_uuid:
1009 duplicates.append((node_uuid, minor, inst_uuid,
1010 my_dict[node_uuid][minor]))
1011 else:
1012 my_dict[node_uuid][minor] = inst_uuid
1013 return my_dict, duplicates
1014
1015 @locking.ssynchronized(_config_lock)
1017 """Compute the used DRBD minor/nodes.
1018
1019 This is just a wrapper over L{_UnlockedComputeDRBDMap}.
1020
1021 @return: dictionary of node_uuid: dict of minor: instance_uuid;
1022 the returned dict will have all the nodes in it (even if with
1023 an empty list).
1024
1025 """
1026 d_map, duplicates = self._UnlockedComputeDRBDMap()
1027 if duplicates:
1028 raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
1029 str(duplicates))
1030 return d_map
1031
1032 @locking.ssynchronized(_config_lock)
1034 """Allocate a drbd minor.
1035
1036 The free minor will be automatically computed from the existing
1037 devices. A node can be given multiple times in order to allocate
1038 multiple minors. The result is the list of minors, in the same
1039 order as the passed nodes.
1040
1041 @type inst_uuid: string
1042 @param inst_uuid: the instance for which we allocate minors
1043
1044 """
1045 assert isinstance(inst_uuid, basestring), \
1046 "Invalid argument '%s' passed to AllocateDRBDMinor" % inst_uuid
1047
1048 d_map, duplicates = self._UnlockedComputeDRBDMap()
1049 if duplicates:
1050 raise errors.ConfigurationError("Duplicate DRBD ports detected: %s" %
1051 str(duplicates))
1052 result = []
1053 for nuuid in node_uuids:
1054 ndata = d_map[nuuid]
1055 if not ndata:
1056
1057 result.append(0)
1058 ndata[0] = inst_uuid
1059 self._temporary_drbds[(nuuid, 0)] = inst_uuid
1060 continue
1061 keys = ndata.keys()
1062 keys.sort()
1063 ffree = utils.FirstFree(keys)
1064 if ffree is None:
1065
1066
1067 minor = keys[-1] + 1
1068 else:
1069 minor = ffree
1070
1071 assert minor not in d_map[nuuid], \
1072 ("Attempt to reuse allocated DRBD minor %d on node %s,"
1073 " already allocated to instance %s" %
1074 (minor, nuuid, d_map[nuuid][minor]))
1075 ndata[minor] = inst_uuid
1076
1077 r_key = (nuuid, minor)
1078 assert r_key not in self._temporary_drbds, \
1079 ("Attempt to reuse reserved DRBD minor %d on node %s,"
1080 " reserved for instance %s" %
1081 (minor, nuuid, self._temporary_drbds[r_key]))
1082 self._temporary_drbds[r_key] = inst_uuid
1083 result.append(minor)
1084 logging.debug("Request to allocate drbd minors, input: %s, returning %s",
1085 node_uuids, result)
1086 return result
1087
1089 """Release temporary drbd minors allocated for a given instance.
1090
1091 @type inst_uuid: string
1092 @param inst_uuid: the instance for which temporary minors should be
1093 released
1094
1095 """
1096 assert isinstance(inst_uuid, basestring), \
1097 "Invalid argument passed to ReleaseDRBDMinors"
1098 for key, uuid in self._temporary_drbds.items():
1099 if uuid == inst_uuid:
1100 del self._temporary_drbds[key]
1101
1102 @locking.ssynchronized(_config_lock)
1104 """Release temporary drbd minors allocated for a given instance.
1105
1106 This should be called on the error paths, on the success paths
1107 it's automatically called by the ConfigWriter add and update
1108 functions.
1109
1110 This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}.
1111
1112 @type inst_uuid: string
1113 @param inst_uuid: the instance for which temporary minors should be
1114 released
1115
1116 """
1117 self._UnlockedReleaseDRBDMinors(inst_uuid)
1118
1119 @locking.ssynchronized(_config_lock, shared=1)
1121 """Get the configuration version.
1122
1123 @return: Config version
1124
1125 """
1126 return self._config_data.version
1127
1128 @locking.ssynchronized(_config_lock, shared=1)
1130 """Get cluster name.
1131
1132 @return: Cluster name
1133
1134 """
1135 return self._config_data.cluster.cluster_name
1136
1137 @locking.ssynchronized(_config_lock, shared=1)
1139 """Get the UUID of the master node for this cluster.
1140
1141 @return: Master node UUID
1142
1143 """
1144 return self._config_data.cluster.master_node
1145
1146 @locking.ssynchronized(_config_lock, shared=1)
1148 """Get the hostname of the master node for this cluster.
1149
1150 @return: Master node hostname
1151
1152 """
1153 return self._UnlockedGetNodeName(self._config_data.cluster.master_node)
1154
1155 @locking.ssynchronized(_config_lock, shared=1)
1157 """Get the IP of the master node for this cluster.
1158
1159 @return: Master IP
1160
1161 """
1162 return self._config_data.cluster.master_ip
1163
1164 @locking.ssynchronized(_config_lock, shared=1)
1166 """Get the master network device for this cluster.
1167
1168 """
1169 return self._config_data.cluster.master_netdev
1170
1171 @locking.ssynchronized(_config_lock, shared=1)
1173 """Get the netmask of the master node for this cluster.
1174
1175 """
1176 return self._config_data.cluster.master_netmask
1177
1178 @locking.ssynchronized(_config_lock, shared=1)
1180 """Get flag representing whether to use the external master IP setup script.
1181
1182 """
1183 return self._config_data.cluster.use_external_mip_script
1184
1185 @locking.ssynchronized(_config_lock, shared=1)
1187 """Get the file storage dir for this cluster.
1188
1189 """
1190 return self._config_data.cluster.file_storage_dir
1191
1192 @locking.ssynchronized(_config_lock, shared=1)
1194 """Get the shared file storage dir for this cluster.
1195
1196 """
1197 return self._config_data.cluster.shared_file_storage_dir
1198
1199 @locking.ssynchronized(_config_lock, shared=1)
1201 """Get the hypervisor type for this cluster.
1202
1203 """
1204 return self._config_data.cluster.enabled_hypervisors[0]
1205
1206 @locking.ssynchronized(_config_lock, shared=1)
1208 """Return the rsa hostkey from the config.
1209
1210 @rtype: string
1211 @return: the rsa hostkey
1212
1213 """
1214 return self._config_data.cluster.rsahostkeypub
1215
1216 @locking.ssynchronized(_config_lock, shared=1)
1218 """Return the dsa hostkey from the config.
1219
1220 @rtype: string
1221 @return: the dsa hostkey
1222
1223 """
1224 return self._config_data.cluster.dsahostkeypub
1225
1226 @locking.ssynchronized(_config_lock, shared=1)
1228 """Get the default instance allocator for this cluster.
1229
1230 """
1231 return self._config_data.cluster.default_iallocator
1232
1233 @locking.ssynchronized(_config_lock, shared=1)
1235 """Get cluster primary ip family.
1236
1237 @return: primary ip family
1238
1239 """
1240 return self._config_data.cluster.primary_ip_family
1241
1242 @locking.ssynchronized(_config_lock, shared=1)
1244 """Get network parameters of the master node.
1245
1246 @rtype: L{object.MasterNetworkParameters}
1247 @return: network parameters of the master node
1248
1249 """
1250 cluster = self._config_data.cluster
1251 result = objects.MasterNetworkParameters(
1252 uuid=cluster.master_node, ip=cluster.master_ip,
1253 netmask=cluster.master_netmask, netdev=cluster.master_netdev,
1254 ip_family=cluster.primary_ip_family)
1255
1256 return result
1257
1258 @locking.ssynchronized(_config_lock)
1260 """Add a node group to the configuration.
1261
1262 This method calls group.UpgradeConfig() to fill any missing attributes
1263 according to their default values.
1264
1265 @type group: L{objects.NodeGroup}
1266 @param group: the NodeGroup object to add
1267 @type ec_id: string
1268 @param ec_id: unique id for the job to use when creating a missing UUID
1269 @type check_uuid: bool
1270 @param check_uuid: add an UUID to the group if it doesn't have one or, if
1271 it does, ensure that it does not exist in the
1272 configuration already
1273
1274 """
1275 self._UnlockedAddNodeGroup(group, ec_id, check_uuid)
1276 self._WriteConfig()
1277
1306
1307 @locking.ssynchronized(_config_lock)
1309 """Remove a node group from the configuration.
1310
1311 @type group_uuid: string
1312 @param group_uuid: the UUID of the node group to remove
1313
1314 """
1315 logging.info("Removing node group %s from configuration", group_uuid)
1316
1317 if group_uuid not in self._config_data.nodegroups:
1318 raise errors.ConfigurationError("Unknown node group '%s'" % group_uuid)
1319
1320 assert len(self._config_data.nodegroups) != 1, \
1321 "Group '%s' is the only group, cannot be removed" % group_uuid
1322
1323 del self._config_data.nodegroups[group_uuid]
1324 self._config_data.cluster.serial_no += 1
1325 self._WriteConfig()
1326
1328 """Lookup a node group's UUID.
1329
1330 @type target: string or None
1331 @param target: group name or UUID or None to look for the default
1332 @rtype: string
1333 @return: nodegroup UUID
1334 @raises errors.OpPrereqError: when the target group cannot be found
1335
1336 """
1337 if target is None:
1338 if len(self._config_data.nodegroups) != 1:
1339 raise errors.OpPrereqError("More than one node group exists. Target"
1340 " group must be specified explicitly.")
1341 else:
1342 return self._config_data.nodegroups.keys()[0]
1343 if target in self._config_data.nodegroups:
1344 return target
1345 for nodegroup in self._config_data.nodegroups.values():
1346 if nodegroup.name == target:
1347 return nodegroup.uuid
1348 raise errors.OpPrereqError("Node group '%s' not found" % target,
1349 errors.ECODE_NOENT)
1350
1351 @locking.ssynchronized(_config_lock, shared=1)
1353 """Lookup a node group's UUID.
1354
1355 This function is just a wrapper over L{_UnlockedLookupNodeGroup}.
1356
1357 @type target: string or None
1358 @param target: group name or UUID or None to look for the default
1359 @rtype: string
1360 @return: nodegroup UUID
1361
1362 """
1363 return self._UnlockedLookupNodeGroup(target)
1364
1366 """Lookup a node group.
1367
1368 @type uuid: string
1369 @param uuid: group UUID
1370 @rtype: L{objects.NodeGroup} or None
1371 @return: nodegroup object, or None if not found
1372
1373 """
1374 if uuid not in self._config_data.nodegroups:
1375 return None
1376
1377 return self._config_data.nodegroups[uuid]
1378
1379 @locking.ssynchronized(_config_lock, shared=1)
1381 """Lookup a node group.
1382
1383 @type uuid: string
1384 @param uuid: group UUID
1385 @rtype: L{objects.NodeGroup} or None
1386 @return: nodegroup object, or None if not found
1387
1388 """
1389 return self._UnlockedGetNodeGroup(uuid)
1390
1391 @locking.ssynchronized(_config_lock, shared=1)
1393 """Get the configuration of all node groups.
1394
1395 """
1396 return dict(self._config_data.nodegroups)
1397
1398 @locking.ssynchronized(_config_lock, shared=1)
1400 """Get a list of node groups.
1401
1402 """
1403 return self._config_data.nodegroups.keys()
1404
1405 @locking.ssynchronized(_config_lock, shared=1)
1407 """Get nodes which are member in the same nodegroups as the given nodes.
1408
1409 """
1410 ngfn = lambda node_uuid: self._UnlockedGetNodeInfo(node_uuid).group
1411 return frozenset(member_uuid
1412 for node_uuid in nodes
1413 for member_uuid in
1414 self._UnlockedGetNodeGroup(ngfn(node_uuid)).members)
1415
1416 @locking.ssynchronized(_config_lock, shared=1)
1418 """Get the configuration of multiple node groups.
1419
1420 @param group_uuids: List of node group UUIDs
1421 @rtype: list
1422 @return: List of tuples of (group_uuid, group_info)
1423
1424 """
1425 return [(uuid, self._UnlockedGetNodeGroup(uuid)) for uuid in group_uuids]
1426
1427 @locking.ssynchronized(_config_lock)
1460
1462 """Ensures a given object has a valid UUID.
1463
1464 @param item: the instance or node to be checked
1465 @param ec_id: the execution context id for the uuid reservation
1466
1467 """
1468 if not item.uuid:
1469 item.uuid = self._GenerateUniqueID(ec_id)
1470 else:
1471 self._CheckUniqueUUID(item, include_temporary=True)
1472
1474 """Checks that the UUID of the given object is unique.
1475
1476 @param item: the instance or node to be checked
1477 @param include_temporary: whether temporarily generated UUID's should be
1478 included in the check. If the UUID of the item to be checked is
1479 a temporarily generated one, this has to be C{False}.
1480
1481 """
1482 if not item.uuid:
1483 raise errors.ConfigurationError("'%s' must have an UUID" % (item.name,))
1484 if item.uuid in self._AllIDs(include_temporary=include_temporary):
1485 raise errors.ConfigurationError("Cannot add '%s': UUID %s already"
1486 " in use" % (item.name, item.uuid))
1487
1489 """Set the instance's status to a given value.
1490
1491 """
1492 if inst_uuid not in self._config_data.instances:
1493 raise errors.ConfigurationError("Unknown instance '%s'" %
1494 inst_uuid)
1495 instance = self._config_data.instances[inst_uuid]
1496
1497 if status is None:
1498 status = instance.admin_state
1499 if disks_active is None:
1500 disks_active = instance.disks_active
1501
1502 assert status in constants.ADMINST_ALL, \
1503 "Invalid status '%s' passed to SetInstanceStatus" % (status,)
1504
1505 if instance.admin_state != status or \
1506 instance.disks_active != disks_active:
1507 instance.admin_state = status
1508 instance.disks_active = disks_active
1509 instance.serial_no += 1
1510 instance.mtime = time.time()
1511 self._WriteConfig()
1512
1513 @locking.ssynchronized(_config_lock)
1515 """Mark the instance status to up in the config.
1516
1517 This also sets the instance disks active flag.
1518
1519 """
1520 self._SetInstanceStatus(inst_uuid, constants.ADMINST_UP, True)
1521
1522 @locking.ssynchronized(_config_lock)
1530
1531 @locking.ssynchronized(_config_lock)
1533 """Remove the instance from the configuration.
1534
1535 """
1536 if inst_uuid not in self._config_data.instances:
1537 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid)
1538
1539
1540
1541 inst = self._config_data.instances[inst_uuid]
1542 network_port = getattr(inst, "network_port", None)
1543 if network_port is not None:
1544 self._config_data.cluster.tcpudp_port_pool.add(network_port)
1545
1546 instance = self._UnlockedGetInstanceInfo(inst_uuid)
1547
1548 for nic in instance.nics:
1549 if nic.network and nic.ip:
1550
1551 self._UnlockedCommitIp(constants.RELEASE_ACTION, nic.network, nic.ip)
1552
1553 del self._config_data.instances[inst_uuid]
1554 self._config_data.cluster.serial_no += 1
1555 self._WriteConfig()
1556
1557 @locking.ssynchronized(_config_lock)
1559 """Rename an instance.
1560
1561 This needs to be done in ConfigWriter and not by RemoveInstance
1562 combined with AddInstance as only we can guarantee an atomic
1563 rename.
1564
1565 """
1566 if inst_uuid not in self._config_data.instances:
1567 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid)
1568
1569 inst = self._config_data.instances[inst_uuid]
1570 inst.name = new_name
1571
1572 for (_, disk) in enumerate(inst.disks):
1573 if disk.dev_type in [constants.DT_FILE, constants.DT_SHARED_FILE]:
1574
1575 file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1]))
1576 disk.logical_id = (disk.logical_id[0],
1577 utils.PathJoin(file_storage_dir, inst.name,
1578 os.path.basename(disk.logical_id[1])))
1579 disk.physical_id = disk.logical_id
1580
1581
1582 self._config_data.cluster.serial_no += 1
1583
1584 self._WriteConfig()
1585
1586 @locking.ssynchronized(_config_lock)
1588 """Mark the status of an instance to down in the configuration.
1589
1590 This does not touch the instance disks active flag, as shut down instances
1591 can still have active disks.
1592
1593 """
1594 self._SetInstanceStatus(inst_uuid, constants.ADMINST_DOWN, None)
1595
1596 @locking.ssynchronized(_config_lock)
1598 """Mark the status of instance disks active.
1599
1600 """
1601 self._SetInstanceStatus(inst_uuid, None, True)
1602
1603 @locking.ssynchronized(_config_lock)
1605 """Mark the status of instance disks inactive.
1606
1607 """
1608 self._SetInstanceStatus(inst_uuid, None, False)
1609
1611 """Get the list of instances.
1612
1613 This function is for internal use, when the config lock is already held.
1614
1615 """
1616 return self._config_data.instances.keys()
1617
1618 @locking.ssynchronized(_config_lock, shared=1)
1620 """Get the list of instances.
1621
1622 @return: array of instances, ex. ['instance2-uuid', 'instance1-uuid']
1623
1624 """
1625 return self._UnlockedGetInstanceList()
1626
1628 """Attempt to expand an incomplete instance name.
1629
1630 """
1631
1632 all_insts = self.GetAllInstancesInfo().values()
1633 expanded_name = _MatchNameComponentIgnoreCase(
1634 short_name, [inst.name for inst in all_insts])
1635
1636 if expanded_name is not None:
1637
1638 inst = (filter(lambda n: n.name == expanded_name, all_insts)[0])
1639 return (inst.uuid, inst.name)
1640 else:
1641 return (None, None)
1642
1644 """Returns information about an instance.
1645
1646 This function is for internal use, when the config lock is already held.
1647
1648 """
1649 if inst_uuid not in self._config_data.instances:
1650 return None
1651
1652 return self._config_data.instances[inst_uuid]
1653
1654 @locking.ssynchronized(_config_lock, shared=1)
1656 """Returns information about an instance.
1657
1658 It takes the information from the configuration file. Other information of
1659 an instance are taken from the live systems.
1660
1661 @param inst_uuid: UUID of the instance
1662
1663 @rtype: L{objects.Instance}
1664 @return: the instance object
1665
1666 """
1667 return self._UnlockedGetInstanceInfo(inst_uuid)
1668
1669 @locking.ssynchronized(_config_lock, shared=1)
1687
1688 @locking.ssynchronized(_config_lock, shared=1)
1705
1706 @locking.ssynchronized(_config_lock, shared=1)
1708 """Get the configuration of multiple instances.
1709
1710 @param inst_uuids: list of instance UUIDs
1711 @rtype: list
1712 @return: list of tuples (instance UUID, instance_info), where
1713 instance_info is what would GetInstanceInfo return for the
1714 node, while keeping the original order
1715
1716 """
1717 return [(uuid, self._UnlockedGetInstanceInfo(uuid)) for uuid in inst_uuids]
1718
1719 @locking.ssynchronized(_config_lock, shared=1)
1721 """Get the configuration of multiple instances.
1722
1723 @param inst_names: list of instance names
1724 @rtype: list
1725 @return: list of tuples (instance, instance_info), where
1726 instance_info is what would GetInstanceInfo return for the
1727 node, while keeping the original order
1728
1729 """
1730 result = []
1731 for name in inst_names:
1732 instance = self._UnlockedGetInstanceInfoByName(name)
1733 result.append((instance.uuid, instance))
1734 return result
1735
1736 @locking.ssynchronized(_config_lock, shared=1)
1738 """Get the configuration of all instances.
1739
1740 @rtype: dict
1741 @return: dict of (instance, instance_info), where instance_info is what
1742 would GetInstanceInfo return for the node
1743
1744 """
1745 return self._UnlockedGetAllInstancesInfo()
1746
1751
1752 @locking.ssynchronized(_config_lock, shared=1)
1754 """Get instance configuration with a filter.
1755
1756 @type filter_fn: callable
1757 @param filter_fn: Filter function receiving instance object as parameter,
1758 returning boolean. Important: this function is called while the
1759 configuration locks is held. It must not do any complex work or call
1760 functions potentially leading to a deadlock. Ideally it doesn't call any
1761 other functions and just compares instance attributes.
1762
1763 """
1764 return dict((uuid, inst)
1765 for (uuid, inst) in self._config_data.instances.items()
1766 if filter_fn(inst))
1767
1768 @locking.ssynchronized(_config_lock, shared=1)
1770 """Get the L{objects.Instance} object for a named instance.
1771
1772 @param inst_name: name of the instance to get information for
1773 @type inst_name: string
1774 @return: the corresponding L{objects.Instance} instance or None if no
1775 information is available
1776
1777 """
1778 return self._UnlockedGetInstanceInfoByName(inst_name)
1779
1785
1791
1792 @locking.ssynchronized(_config_lock, shared=1)
1794 """Gets the instance name for the passed instance.
1795
1796 @param inst_uuid: instance UUID to get name for
1797 @type inst_uuid: string
1798 @rtype: string
1799 @return: instance name
1800
1801 """
1802 return self._UnlockedGetInstanceName(inst_uuid)
1803
1804 @locking.ssynchronized(_config_lock, shared=1)
1806 """Gets the instance names for the passed list of nodes.
1807
1808 @param inst_uuids: list of instance UUIDs to get names for
1809 @type inst_uuids: list of strings
1810 @rtype: list of strings
1811 @return: list of instance names
1812
1813 """
1814 return self._UnlockedGetInstanceNames(inst_uuids)
1815
1818
1819 @locking.ssynchronized(_config_lock)
1837
1838 @locking.ssynchronized(_config_lock)
1840 """Remove a node from the configuration.
1841
1842 """
1843 logging.info("Removing node %s from configuration", node_uuid)
1844
1845 if node_uuid not in self._config_data.nodes:
1846 raise errors.ConfigurationError("Unknown node '%s'" % node_uuid)
1847
1848 self._UnlockedRemoveNodeFromGroup(self._config_data.nodes[node_uuid])
1849 del self._config_data.nodes[node_uuid]
1850 self._config_data.cluster.serial_no += 1
1851 self._WriteConfig()
1852
1868
1870 """Get the configuration of a node, as stored in the config.
1871
1872 This function is for internal use, when the config lock is already
1873 held.
1874
1875 @param node_uuid: the node UUID
1876
1877 @rtype: L{objects.Node}
1878 @return: the node object
1879
1880 """
1881 if node_uuid not in self._config_data.nodes:
1882 return None
1883
1884 return self._config_data.nodes[node_uuid]
1885
1886 @locking.ssynchronized(_config_lock, shared=1)
1888 """Get the configuration of a node, as stored in the config.
1889
1890 This is just a locked wrapper over L{_UnlockedGetNodeInfo}.
1891
1892 @param node_uuid: the node UUID
1893
1894 @rtype: L{objects.Node}
1895 @return: the node object
1896
1897 """
1898 return self._UnlockedGetNodeInfo(node_uuid)
1899
1900 @locking.ssynchronized(_config_lock, shared=1)
1902 """Get the instances of a node, as stored in the config.
1903
1904 @param node_uuid: the node UUID
1905
1906 @rtype: (list, list)
1907 @return: a tuple with two lists: the primary and the secondary instances
1908
1909 """
1910 pri = []
1911 sec = []
1912 for inst in self._config_data.instances.values():
1913 if inst.primary_node == node_uuid:
1914 pri.append(inst.uuid)
1915 if node_uuid in inst.secondary_nodes:
1916 sec.append(inst.uuid)
1917 return (pri, sec)
1918
1919 @locking.ssynchronized(_config_lock, shared=1)
1921 """Get the instances of a node group.
1922
1923 @param uuid: Node group UUID
1924 @param primary_only: Whether to only consider primary nodes
1925 @rtype: frozenset
1926 @return: List of instance UUIDs in node group
1927
1928 """
1929 if primary_only:
1930 nodes_fn = lambda inst: [inst.primary_node]
1931 else:
1932 nodes_fn = lambda inst: inst.all_nodes
1933
1934 return frozenset(inst.uuid
1935 for inst in self._config_data.instances.values()
1936 for node_uuid in nodes_fn(inst)
1937 if self._UnlockedGetNodeInfo(node_uuid).group == uuid)
1938
1940 """Return the string representation of the list of hyervisor parameters of
1941 the given hypervisor.
1942
1943 @see: C{GetHvparams}
1944
1945 """
1946 result = ""
1947 hvparams = self._config_data.cluster.hvparams[hvname]
1948 for key in hvparams:
1949 result += "%s=%s\n" % (key, hvparams[key])
1950 return result
1951
1952 @locking.ssynchronized(_config_lock, shared=1)
1954 """Return the hypervisor parameters of the given hypervisor.
1955
1956 @type hvname: string
1957 @param hvname: name of a hypervisor
1958 @rtype: string
1959 @return: string containing key-value-pairs, one pair on each line;
1960 format: KEY=VALUE
1961
1962 """
1963 return self._UnlockedGetHvparamsString(hvname)
1964
1966 """Return the list of nodes which are in the configuration.
1967
1968 This function is for internal use, when the config lock is already
1969 held.
1970
1971 @rtype: list
1972
1973 """
1974 return self._config_data.nodes.keys()
1975
1976 @locking.ssynchronized(_config_lock, shared=1)
1978 """Return the list of nodes which are in the configuration.
1979
1980 """
1981 return self._UnlockedGetNodeList()
1982
1990
1991 @locking.ssynchronized(_config_lock, shared=1)
1997
1998 @locking.ssynchronized(_config_lock, shared=1)
2006
2007 @locking.ssynchronized(_config_lock, shared=1)
2015
2016 @locking.ssynchronized(_config_lock, shared=1)
2018 """Get the configuration of multiple nodes.
2019
2020 @param node_uuids: list of node UUIDs
2021 @rtype: list
2022 @return: list of tuples of (node, node_info), where node_info is
2023 what would GetNodeInfo return for the node, in the original
2024 order
2025
2026 """
2027 return [(uuid, self._UnlockedGetNodeInfo(uuid)) for uuid in node_uuids]
2028
2030 """Gets configuration of all nodes.
2031
2032 @note: See L{GetAllNodesInfo}
2033
2034 """
2035 return dict([(node_uuid, self._UnlockedGetNodeInfo(node_uuid))
2036 for node_uuid in self._UnlockedGetNodeList()])
2037
2038 @locking.ssynchronized(_config_lock, shared=1)
2040 """Get the configuration of all nodes.
2041
2042 @rtype: dict
2043 @return: dict of (node, node_info), where node_info is what
2044 would GetNodeInfo return for the node
2045
2046 """
2047 return self._UnlockedGetAllNodesInfo()
2048
2054
2055 @locking.ssynchronized(_config_lock, shared=1)
2057 """Get the L{objects.Node} object for a named node.
2058
2059 @param node_name: name of the node to get information for
2060 @type node_name: string
2061 @return: the corresponding L{objects.Node} instance or None if no
2062 information is available
2063
2064 """
2065 return self._UnlockedGetNodeInfoByName(node_name)
2066
2077
2078 @locking.ssynchronized(_config_lock, shared=1)
2080 """Gets the node name for the passed node.
2081
2082 @param node_spec: node to get names for
2083 @type node_spec: either node UUID or a L{objects.Node} object
2084 @rtype: string
2085 @return: node name
2086
2087 """
2088 return self._UnlockedGetNodeName(node_spec)
2089
2092
2093 @locking.ssynchronized(_config_lock, shared=1)
2095 """Gets the node names for the passed list of nodes.
2096
2097 @param node_specs: list of nodes to get names for
2098 @type node_specs: list of either node UUIDs or L{objects.Node} objects
2099 @rtype: list of strings
2100 @return: list of node names
2101
2102 """
2103 return self._UnlockedGetNodeNames(node_specs)
2104
2105 @locking.ssynchronized(_config_lock, shared=1)
2107 """Returns groups for a list of nodes.
2108
2109 @type node_uuids: list of string
2110 @param node_uuids: List of node UUIDs
2111 @rtype: frozenset
2112
2113 """
2114 return frozenset(self._UnlockedGetNodeInfo(uuid).group
2115 for uuid in node_uuids)
2116
2118 """Get the number of current and maximum desired and possible candidates.
2119
2120 @type exceptions: list
2121 @param exceptions: if passed, list of nodes that should be ignored
2122 @rtype: tuple
2123 @return: tuple of (current, desired and possible, possible)
2124
2125 """
2126 mc_now = mc_should = mc_max = 0
2127 for node in self._config_data.nodes.values():
2128 if exceptions and node.uuid in exceptions:
2129 continue
2130 if not (node.offline or node.drained) and node.master_capable:
2131 mc_max += 1
2132 if node.master_candidate:
2133 mc_now += 1
2134 mc_should = min(mc_max, self._config_data.cluster.candidate_pool_size)
2135 return (mc_now, mc_should, mc_max)
2136
2137 @locking.ssynchronized(_config_lock, shared=1)
2139 """Get the number of current and maximum possible candidates.
2140
2141 This is just a wrapper over L{_UnlockedGetMasterCandidateStats}.
2142
2143 @type exceptions: list
2144 @param exceptions: if passed, list of nodes that should be ignored
2145 @rtype: tuple
2146 @return: tuple of (current, max)
2147
2148 """
2149 return self._UnlockedGetMasterCandidateStats(exceptions)
2150
2151 @locking.ssynchronized(_config_lock)
2152 - def MaintainCandidatePool(self, exception_node_uuids):
2153 """Try to grow the candidate pool to the desired size.
2154
2155 @type exception_node_uuids: list
2156 @param exception_node_uuids: if passed, list of nodes that should be ignored
2157 @rtype: list
2158 @return: list with the adjusted nodes (L{objects.Node} instances)
2159
2160 """
2161 mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats(
2162 exception_node_uuids)
2163 mod_list = []
2164 if mc_now < mc_max:
2165 node_list = self._config_data.nodes.keys()
2166 random.shuffle(node_list)
2167 for uuid in node_list:
2168 if mc_now >= mc_max:
2169 break
2170 node = self._config_data.nodes[uuid]
2171 if (node.master_candidate or node.offline or node.drained or
2172 node.uuid in exception_node_uuids or not node.master_capable):
2173 continue
2174 mod_list.append(node)
2175 node.master_candidate = True
2176 node.serial_no += 1
2177 mc_now += 1
2178 if mc_now != mc_max:
2179
2180 logging.warning("Warning: MaintainCandidatePool didn't manage to"
2181 " fill the candidate pool (%d/%d)", mc_now, mc_max)
2182 if mod_list:
2183 self._config_data.cluster.serial_no += 1
2184 self._WriteConfig()
2185
2186 return mod_list
2187
2189 """Add a given node to the specified group.
2190
2191 """
2192 if nodegroup_uuid not in self._config_data.nodegroups:
2193
2194
2195
2196
2197 raise errors.OpExecError("Unknown node group: %s" % nodegroup_uuid)
2198 if node_uuid not in self._config_data.nodegroups[nodegroup_uuid].members:
2199 self._config_data.nodegroups[nodegroup_uuid].members.append(node_uuid)
2200
2202 """Remove a given node from its group.
2203
2204 """
2205 nodegroup = node.group
2206 if nodegroup not in self._config_data.nodegroups:
2207 logging.warning("Warning: node '%s' has unknown node group '%s'"
2208 " (while being removed from it)", node.uuid, nodegroup)
2209 nodegroup_obj = self._config_data.nodegroups[nodegroup]
2210 if node.uuid not in nodegroup_obj.members:
2211 logging.warning("Warning: node '%s' not a member of its node group '%s'"
2212 " (while being removed from it)", node.uuid, nodegroup)
2213 else:
2214 nodegroup_obj.members.remove(node.uuid)
2215
2216 @locking.ssynchronized(_config_lock)
2218 """Changes the group of a number of nodes.
2219
2220 @type mods: list of tuples; (node name, new group UUID)
2221 @param mods: Node membership modifications
2222
2223 """
2224 groups = self._config_data.nodegroups
2225 nodes = self._config_data.nodes
2226
2227 resmod = []
2228
2229
2230 for (node_uuid, new_group_uuid) in mods:
2231 try:
2232 node = nodes[node_uuid]
2233 except KeyError:
2234 raise errors.ConfigurationError("Unable to find node '%s'" % node_uuid)
2235
2236 if node.group == new_group_uuid:
2237
2238 logging.debug("Node '%s' was assigned to its current group (%s)",
2239 node_uuid, node.group)
2240 continue
2241
2242
2243 try:
2244 old_group = groups[node.group]
2245 except KeyError:
2246 raise errors.ConfigurationError("Unable to find old group '%s'" %
2247 node.group)
2248
2249
2250 try:
2251 new_group = groups[new_group_uuid]
2252 except KeyError:
2253 raise errors.ConfigurationError("Unable to find new group '%s'" %
2254 new_group_uuid)
2255
2256 assert node.uuid in old_group.members, \
2257 ("Inconsistent configuration: node '%s' not listed in members for its"
2258 " old group '%s'" % (node.uuid, old_group.uuid))
2259 assert node.uuid not in new_group.members, \
2260 ("Inconsistent configuration: node '%s' already listed in members for"
2261 " its new group '%s'" % (node.uuid, new_group.uuid))
2262
2263 resmod.append((node, old_group, new_group))
2264
2265
2266 for (node, old_group, new_group) in resmod:
2267 assert node.uuid != new_group.uuid and old_group.uuid != new_group.uuid, \
2268 "Assigning to current group is not possible"
2269
2270 node.group = new_group.uuid
2271
2272
2273 if node.uuid in old_group.members:
2274 old_group.members.remove(node.uuid)
2275 if node.uuid not in new_group.members:
2276 new_group.members.append(node.uuid)
2277
2278
2279 now = time.time()
2280 for obj in frozenset(itertools.chain(*resmod)):
2281 obj.serial_no += 1
2282 obj.mtime = now
2283
2284
2285 self._config_data.cluster.serial_no += 1
2286
2287 self._WriteConfig()
2288
2290 """Bump up the serial number of the config.
2291
2292 """
2293 self._config_data.serial_no += 1
2294 self._config_data.mtime = time.time()
2295
2297 """Returns all objects with uuid attributes.
2298
2299 """
2300 return (self._config_data.instances.values() +
2301 self._config_data.nodes.values() +
2302 self._config_data.nodegroups.values() +
2303 self._config_data.networks.values() +
2304 self._AllDisks() +
2305 self._AllNICs() +
2306 [self._config_data.cluster])
2307
2309 """Read the config data from disk.
2310
2311 """
2312 raw_data = utils.ReadFile(self._cfg_file)
2313
2314 try:
2315 data = objects.ConfigData.FromDict(serializer.Load(raw_data))
2316 except Exception, err:
2317 raise errors.ConfigurationError(err)
2318
2319
2320 _ValidateConfig(data)
2321
2322 if (not hasattr(data, "cluster") or
2323 not hasattr(data.cluster, "rsahostkeypub")):
2324 raise errors.ConfigurationError("Incomplete configuration"
2325 " (missing cluster.rsahostkeypub)")
2326
2327 if not data.cluster.master_node in data.nodes:
2328 msg = ("The configuration denotes node %s as master, but does not"
2329 " contain information about this node" %
2330 data.cluster.master_node)
2331 raise errors.ConfigurationError(msg)
2332
2333 master_info = data.nodes[data.cluster.master_node]
2334 if master_info.name != self._my_hostname and not accept_foreign:
2335 msg = ("The configuration denotes node %s as master, while my"
2336 " hostname is %s; opening a foreign configuration is only"
2337 " possible in accept_foreign mode" %
2338 (master_info.name, self._my_hostname))
2339 raise errors.ConfigurationError(msg)
2340
2341 self._config_data = data
2342
2343
2344 self._last_cluster_serial = -1
2345
2346
2347 self._UpgradeConfig()
2348
2349 self._cfg_id = utils.GetFileID(path=self._cfg_file)
2350
2352 """Run any upgrade steps.
2353
2354 This method performs both in-object upgrades and also update some data
2355 elements that need uniqueness across the whole configuration or interact
2356 with other objects.
2357
2358 @warning: this function will call L{_WriteConfig()}, but also
2359 L{DropECReservations} so it needs to be called only from a
2360 "safe" place (the constructor). If one wanted to call it with
2361 the lock held, a DropECReservationUnlocked would need to be
2362 created first, to avoid causing deadlock.
2363
2364 """
2365
2366
2367 oldconf = copy.deepcopy(self._config_data.ToDict())
2368
2369
2370 self._config_data.UpgradeConfig()
2371
2372 for item in self._AllUUIDObjects():
2373 if item.uuid is None:
2374 item.uuid = self._GenerateUniqueID(_UPGRADE_CONFIG_JID)
2375 if not self._config_data.nodegroups:
2376 default_nodegroup_name = constants.INITIAL_NODE_GROUP_NAME
2377 default_nodegroup = objects.NodeGroup(name=default_nodegroup_name,
2378 members=[])
2379 self._UnlockedAddNodeGroup(default_nodegroup, _UPGRADE_CONFIG_JID, True)
2380 for node in self._config_data.nodes.values():
2381 if not node.group:
2382 node.group = self.LookupNodeGroup(None)
2383
2384
2385
2386
2387 self._UnlockedAddNodeToGroup(node.uuid, node.group)
2388
2389 modified = (oldconf != self._config_data.ToDict())
2390 if modified:
2391 self._WriteConfig()
2392
2393
2394 self.DropECReservations(_UPGRADE_CONFIG_JID)
2395 else:
2396 config_errors = self._UnlockedVerifyConfig()
2397 if config_errors:
2398 errmsg = ("Loaded configuration data is not consistent: %s" %
2399 (utils.CommaJoin(config_errors)))
2400 logging.critical(errmsg)
2401
2403 """Distribute the configuration to the other nodes.
2404
2405 Currently, this only copies the configuration file. In the future,
2406 it could be used to encapsulate the 2/3-phase update mechanism.
2407
2408 """
2409 if self._offline:
2410 return True
2411
2412 bad = False
2413
2414 node_list = []
2415 addr_list = []
2416 myhostname = self._my_hostname
2417
2418
2419
2420
2421 for node_uuid in self._UnlockedGetNodeList():
2422 node_info = self._UnlockedGetNodeInfo(node_uuid)
2423 if node_info.name == myhostname or not node_info.master_candidate:
2424 continue
2425 node_list.append(node_info.name)
2426 addr_list.append(node_info.primary_ip)
2427
2428
2429 result = \
2430 self._GetRpc(addr_list).call_upload_file(node_list, self._cfg_file)
2431 for to_node, to_result in result.items():
2432 msg = to_result.fail_msg
2433 if msg:
2434 msg = ("Copy of file %s to node %s failed: %s" %
2435 (self._cfg_file, to_node, msg))
2436 logging.error(msg)
2437
2438 if feedback_fn:
2439 feedback_fn(msg)
2440
2441 bad = True
2442
2443 return not bad
2444
2445 - def _WriteConfig(self, destination=None, feedback_fn=None):
2446 """Write the configuration data to persistent storage.
2447
2448 """
2449 assert feedback_fn is None or callable(feedback_fn)
2450
2451
2452
2453
2454
2455 config_errors = self._UnlockedVerifyConfig()
2456 if config_errors:
2457 errmsg = ("Configuration data is not consistent: %s" %
2458 (utils.CommaJoin(config_errors)))
2459 logging.critical(errmsg)
2460 if feedback_fn:
2461 feedback_fn(errmsg)
2462
2463 if destination is None:
2464 destination = self._cfg_file
2465 self._BumpSerialNo()
2466 txt = serializer.Dump(self._config_data.ToDict())
2467
2468 getents = self._getents()
2469 try:
2470 fd = utils.SafeWriteFile(destination, self._cfg_id, data=txt,
2471 close=False, gid=getents.confd_gid, mode=0640)
2472 except errors.LockError:
2473 raise errors.ConfigurationError("The configuration file has been"
2474 " modified since the last write, cannot"
2475 " update")
2476 try:
2477 self._cfg_id = utils.GetFileID(fd=fd)
2478 finally:
2479 os.close(fd)
2480
2481 self.write_count += 1
2482
2483
2484 self._DistributeConfig(feedback_fn)
2485
2486
2487 if self._last_cluster_serial < self._config_data.cluster.serial_no:
2488 if not self._offline:
2489 result = self._GetRpc(None).call_write_ssconf_files(
2490 self._UnlockedGetNodeNames(self._UnlockedGetOnlineNodeList()),
2491 self._UnlockedGetSsconfValues())
2492
2493 for nname, nresu in result.items():
2494 msg = nresu.fail_msg
2495 if msg:
2496 errmsg = ("Error while uploading ssconf files to"
2497 " node %s: %s" % (nname, msg))
2498 logging.warning(errmsg)
2499
2500 if feedback_fn:
2501 feedback_fn(errmsg)
2502
2503 self._last_cluster_serial = self._config_data.cluster.serial_no
2504
2506 """Get the hvparams of all given hypervisors from the config.
2507
2508 @type hypervisors: list of string
2509 @param hypervisors: list of hypervisor names
2510 @rtype: dict of strings
2511 @returns: dictionary mapping the hypervisor name to a string representation
2512 of the hypervisor's hvparams
2513
2514 """
2515 hvparams = {}
2516 for hv in hypervisors:
2517 hvparams[hv] = self._UnlockedGetHvparamsString(hv)
2518 return hvparams
2519
2520 @staticmethod
2522 """Extends the ssconf_values dictionary by hvparams.
2523
2524 @type ssconf_values: dict of strings
2525 @param ssconf_values: dictionary mapping ssconf_keys to strings
2526 representing the content of ssconf files
2527 @type all_hvparams: dict of strings
2528 @param all_hvparams: dictionary mapping hypervisor names to a string
2529 representation of their hvparams
2530 @rtype: same as ssconf_values
2531 @returns: the ssconf_values dictionary extended by hvparams
2532
2533 """
2534 for hv in all_hvparams:
2535 ssconf_key = constants.SS_HVPARAMS_PREF + hv
2536 ssconf_values[ssconf_key] = all_hvparams[hv]
2537 return ssconf_values
2538
2540 """Return the values needed by ssconf.
2541
2542 @rtype: dict
2543 @return: a dictionary with keys the ssconf names and values their
2544 associated value
2545
2546 """
2547 fn = "\n".join
2548 instance_names = utils.NiceSort(
2549 [inst.name for inst in
2550 self._UnlockedGetAllInstancesInfo().values()])
2551 node_infos = self._UnlockedGetAllNodesInfo().values()
2552 node_names = [node.name for node in node_infos]
2553 node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip)
2554 for ninfo in node_infos]
2555 node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip)
2556 for ninfo in node_infos]
2557
2558 instance_data = fn(instance_names)
2559 off_data = fn(node.name for node in node_infos if node.offline)
2560 on_data = fn(node.name for node in node_infos if not node.offline)
2561 mc_data = fn(node.name for node in node_infos if node.master_candidate)
2562 mc_ips_data = fn(node.primary_ip for node in node_infos
2563 if node.master_candidate)
2564 node_data = fn(node_names)
2565 node_pri_ips_data = fn(node_pri_ips)
2566 node_snd_ips_data = fn(node_snd_ips)
2567
2568 cluster = self._config_data.cluster
2569 cluster_tags = fn(cluster.GetTags())
2570
2571 hypervisor_list = fn(cluster.enabled_hypervisors)
2572 all_hvparams = self._GetAllHvparamsStrings(constants.HYPER_TYPES)
2573
2574 uid_pool = uidpool.FormatUidPool(cluster.uid_pool, separator="\n")
2575
2576 nodegroups = ["%s %s" % (nodegroup.uuid, nodegroup.name) for nodegroup in
2577 self._config_data.nodegroups.values()]
2578 nodegroups_data = fn(utils.NiceSort(nodegroups))
2579 networks = ["%s %s" % (net.uuid, net.name) for net in
2580 self._config_data.networks.values()]
2581 networks_data = fn(utils.NiceSort(networks))
2582
2583 ssconf_values = {
2584 constants.SS_CLUSTER_NAME: cluster.cluster_name,
2585 constants.SS_CLUSTER_TAGS: cluster_tags,
2586 constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir,
2587 constants.SS_SHARED_FILE_STORAGE_DIR: cluster.shared_file_storage_dir,
2588 constants.SS_MASTER_CANDIDATES: mc_data,
2589 constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data,
2590 constants.SS_MASTER_IP: cluster.master_ip,
2591 constants.SS_MASTER_NETDEV: cluster.master_netdev,
2592 constants.SS_MASTER_NETMASK: str(cluster.master_netmask),
2593 constants.SS_MASTER_NODE: self._UnlockedGetNodeName(cluster.master_node),
2594 constants.SS_NODE_LIST: node_data,
2595 constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data,
2596 constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data,
2597 constants.SS_OFFLINE_NODES: off_data,
2598 constants.SS_ONLINE_NODES: on_data,
2599 constants.SS_PRIMARY_IP_FAMILY: str(cluster.primary_ip_family),
2600 constants.SS_INSTANCE_LIST: instance_data,
2601 constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION,
2602 constants.SS_HYPERVISOR_LIST: hypervisor_list,
2603 constants.SS_MAINTAIN_NODE_HEALTH: str(cluster.maintain_node_health),
2604 constants.SS_UID_POOL: uid_pool,
2605 constants.SS_NODEGROUPS: nodegroups_data,
2606 constants.SS_NETWORKS: networks_data,
2607 }
2608 ssconf_values = self._ExtendByAllHvparamsStrings(ssconf_values,
2609 all_hvparams)
2610 bad_values = [(k, v) for k, v in ssconf_values.items()
2611 if not isinstance(v, (str, basestring))]
2612 if bad_values:
2613 err = utils.CommaJoin("%s=%s" % (k, v) for k, v in bad_values)
2614 raise errors.ConfigurationError("Some ssconf key(s) have non-string"
2615 " values: %s" % err)
2616 return ssconf_values
2617
2618 @locking.ssynchronized(_config_lock, shared=1)
2624
2625 @locking.ssynchronized(_config_lock, shared=1)
2627 """Return the volume group name.
2628
2629 """
2630 return self._config_data.cluster.volume_group_name
2631
2632 @locking.ssynchronized(_config_lock)
2634 """Set the volume group name.
2635
2636 """
2637 self._config_data.cluster.volume_group_name = vg_name
2638 self._config_data.cluster.serial_no += 1
2639 self._WriteConfig()
2640
2641 @locking.ssynchronized(_config_lock, shared=1)
2643 """Return DRBD usermode helper.
2644
2645 """
2646 return self._config_data.cluster.drbd_usermode_helper
2647
2648 @locking.ssynchronized(_config_lock)
2650 """Set DRBD usermode helper.
2651
2652 """
2653 self._config_data.cluster.drbd_usermode_helper = drbd_helper
2654 self._config_data.cluster.serial_no += 1
2655 self._WriteConfig()
2656
2657 @locking.ssynchronized(_config_lock, shared=1)
2659 """Return the mac prefix.
2660
2661 """
2662 return self._config_data.cluster.mac_prefix
2663
2664 @locking.ssynchronized(_config_lock, shared=1)
2666 """Returns information about the cluster
2667
2668 @rtype: L{objects.Cluster}
2669 @return: the cluster object
2670
2671 """
2672 return self._config_data.cluster
2673
2674 @locking.ssynchronized(_config_lock, shared=1)
2676 """Check if in there is at disk of the given type in the configuration.
2677
2678 """
2679 return self._config_data.HasAnyDiskOfType(dev_type)
2680
2681 @locking.ssynchronized(_config_lock)
2682 - def Update(self, target, feedback_fn, ec_id=None):
2683 """Notify function to be called after updates.
2684
2685 This function must be called when an object (as returned by
2686 GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the
2687 caller wants the modifications saved to the backing store. Note
2688 that all modified objects will be saved, but the target argument
2689 is the one the caller wants to ensure that it's saved.
2690
2691 @param target: an instance of either L{objects.Cluster},
2692 L{objects.Node} or L{objects.Instance} which is existing in
2693 the cluster
2694 @param feedback_fn: Callable feedback function
2695
2696 """
2697 if self._config_data is None:
2698 raise errors.ProgrammerError("Configuration file not read,"
2699 " cannot save.")
2700 update_serial = False
2701 if isinstance(target, objects.Cluster):
2702 test = target == self._config_data.cluster
2703 elif isinstance(target, objects.Node):
2704 test = target in self._config_data.nodes.values()
2705 update_serial = True
2706 elif isinstance(target, objects.Instance):
2707 test = target in self._config_data.instances.values()
2708 elif isinstance(target, objects.NodeGroup):
2709 test = target in self._config_data.nodegroups.values()
2710 elif isinstance(target, objects.Network):
2711 test = target in self._config_data.networks.values()
2712 else:
2713 raise errors.ProgrammerError("Invalid object type (%s) passed to"
2714 " ConfigWriter.Update" % type(target))
2715 if not test:
2716 raise errors.ConfigurationError("Configuration updated since object"
2717 " has been read or unknown object")
2718 target.serial_no += 1
2719 target.mtime = now = time.time()
2720
2721 if update_serial:
2722
2723 self._config_data.cluster.serial_no += 1
2724 self._config_data.cluster.mtime = now
2725
2726 if isinstance(target, objects.Instance):
2727 self._UnlockedReleaseDRBDMinors(target.uuid)
2728
2729 if ec_id is not None:
2730
2731 self._UnlockedCommitTemporaryIps(ec_id)
2732
2733 self._WriteConfig(feedback_fn=feedback_fn)
2734
2735 @locking.ssynchronized(_config_lock)
2737 """Drop per-execution-context reservations
2738
2739 """
2740 for rm in self._all_rms:
2741 rm.DropECReservations(ec_id)
2742
2743 @locking.ssynchronized(_config_lock, shared=1)
2745 """Get configuration info of all the networks.
2746
2747 """
2748 return dict(self._config_data.networks)
2749
2751 """Get the list of networks.
2752
2753 This function is for internal use, when the config lock is already held.
2754
2755 """
2756 return self._config_data.networks.keys()
2757
2758 @locking.ssynchronized(_config_lock, shared=1)
2760 """Get the list of networks.
2761
2762 @return: array of networks, ex. ["main", "vlan100", "200]
2763
2764 """
2765 return self._UnlockedGetNetworkList()
2766
2767 @locking.ssynchronized(_config_lock, shared=1)
2769 """Get a list of network names
2770
2771 """
2772 names = [net.name
2773 for net in self._config_data.networks.values()]
2774 return names
2775
2777 """Returns information about a network.
2778
2779 This function is for internal use, when the config lock is already held.
2780
2781 """
2782 if uuid not in self._config_data.networks:
2783 return None
2784
2785 return self._config_data.networks[uuid]
2786
2787 @locking.ssynchronized(_config_lock, shared=1)
2789 """Returns information about a network.
2790
2791 It takes the information from the configuration file.
2792
2793 @param uuid: UUID of the network
2794
2795 @rtype: L{objects.Network}
2796 @return: the network object
2797
2798 """
2799 return self._UnlockedGetNetwork(uuid)
2800
2801 @locking.ssynchronized(_config_lock)
2802 - def AddNetwork(self, net, ec_id, check_uuid=True):
2803 """Add a network to the configuration.
2804
2805 @type net: L{objects.Network}
2806 @param net: the Network object to add
2807 @type ec_id: string
2808 @param ec_id: unique id for the job to use when creating a missing UUID
2809
2810 """
2811 self._UnlockedAddNetwork(net, ec_id, check_uuid)
2812 self._WriteConfig()
2813
2815 """Add a network to the configuration.
2816
2817 """
2818 logging.info("Adding network %s to configuration", net.name)
2819
2820 if check_uuid:
2821 self._EnsureUUID(net, ec_id)
2822
2823 net.serial_no = 1
2824 net.ctime = net.mtime = time.time()
2825 self._config_data.networks[net.uuid] = net
2826 self._config_data.cluster.serial_no += 1
2827
2829 """Lookup a network's UUID.
2830
2831 @type target: string
2832 @param target: network name or UUID
2833 @rtype: string
2834 @return: network UUID
2835 @raises errors.OpPrereqError: when the target network cannot be found
2836
2837 """
2838 if target is None:
2839 return None
2840 if target in self._config_data.networks:
2841 return target
2842 for net in self._config_data.networks.values():
2843 if net.name == target:
2844 return net.uuid
2845 raise errors.OpPrereqError("Network '%s' not found" % target,
2846 errors.ECODE_NOENT)
2847
2848 @locking.ssynchronized(_config_lock, shared=1)
2850 """Lookup a network's UUID.
2851
2852 This function is just a wrapper over L{_UnlockedLookupNetwork}.
2853
2854 @type target: string
2855 @param target: network name or UUID
2856 @rtype: string
2857 @return: network UUID
2858
2859 """
2860 return self._UnlockedLookupNetwork(target)
2861
2862 @locking.ssynchronized(_config_lock)
2864 """Remove a network from the configuration.
2865
2866 @type network_uuid: string
2867 @param network_uuid: the UUID of the network to remove
2868
2869 """
2870 logging.info("Removing network %s from configuration", network_uuid)
2871
2872 if network_uuid not in self._config_data.networks:
2873 raise errors.ConfigurationError("Unknown network '%s'" % network_uuid)
2874
2875 del self._config_data.networks[network_uuid]
2876 self._config_data.cluster.serial_no += 1
2877 self._WriteConfig()
2878
2880 """Get the netparams (mode, link) of a network.
2881
2882 Get a network's netparams for a given node.
2883
2884 @type net_uuid: string
2885 @param net_uuid: network uuid
2886 @type node_uuid: string
2887 @param node_uuid: node UUID
2888 @rtype: dict or None
2889 @return: netparams
2890
2891 """
2892 node_info = self._UnlockedGetNodeInfo(node_uuid)
2893 nodegroup_info = self._UnlockedGetNodeGroup(node_info.group)
2894 netparams = nodegroup_info.networks.get(net_uuid, None)
2895
2896 return netparams
2897
2898 @locking.ssynchronized(_config_lock, shared=1)
2900 """Locking wrapper of _UnlockedGetGroupNetParams()
2901
2902 """
2903 return self._UnlockedGetGroupNetParams(net_uuid, node_uuid)
2904
2905 @locking.ssynchronized(_config_lock, shared=1)
2907 """Check IP uniqueness in nodegroup.
2908
2909 Check networks that are connected in the node's node group
2910 if ip is contained in any of them. Used when creating/adding
2911 a NIC to ensure uniqueness among nodegroups.
2912
2913 @type ip: string
2914 @param ip: ip address
2915 @type node_uuid: string
2916 @param node_uuid: node UUID
2917 @rtype: (string, dict) or (None, None)
2918 @return: (network name, netparams)
2919
2920 """
2921 if ip is None:
2922 return (None, None)
2923 node_info = self._UnlockedGetNodeInfo(node_uuid)
2924 nodegroup_info = self._UnlockedGetNodeGroup(node_info.group)
2925 for net_uuid in nodegroup_info.networks.keys():
2926 net_info = self._UnlockedGetNetwork(net_uuid)
2927 pool = network.AddressPool(net_info)
2928 if pool.Contains(ip):
2929 return (net_info.name, nodegroup_info.networks[net_uuid])
2930
2931 return (None, None)
2932