1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Logical units dealing with the cluster."""
32
33 import copy
34 import itertools
35 import logging
36 import operator
37 import os
38 import re
39 import time
40
41 from ganeti import compat
42 from ganeti import constants
43 from ganeti import errors
44 from ganeti import hypervisor
45 from ganeti import locking
46 from ganeti import masterd
47 from ganeti import netutils
48 from ganeti import objects
49 from ganeti import opcodes
50 from ganeti import pathutils
51 from ganeti import query
52 import ganeti.rpc.node as rpc
53 from ganeti import runtime
54 from ganeti import ssh
55 from ganeti import uidpool
56 from ganeti import utils
57 from ganeti import vcluster
58
59 from ganeti.cmdlib.base import NoHooksLU, QueryBase, LogicalUnit, \
60 ResultWithJobs
61 from ganeti.cmdlib.common import ShareAll, RunPostHook, \
62 ComputeAncillaryFiles, RedistributeAncillaryFiles, UploadHelper, \
63 GetWantedInstances, MergeAndVerifyHvState, MergeAndVerifyDiskState, \
64 GetUpdatedIPolicy, ComputeNewInstanceViolations, GetUpdatedParams, \
65 CheckOSParams, CheckHVParams, AdjustCandidatePool, CheckNodePVs, \
66 ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob, \
67 CheckIpolicyVsDiskTemplates, CheckDiskAccessModeValidity, \
68 CheckDiskAccessModeConsistency, GetClientCertDigest, \
69 AddInstanceCommunicationNetworkOp, ConnectInstanceCommunicationNetworkOp, \
70 CheckImageValidity, CheckDiskAccessModeConsistency, EnsureKvmdOnNodes
71
72 import ganeti.masterd.instance
76 """Renew the cluster's crypto tokens.
77
78 """
79
80 _MAX_NUM_RETRIES = 3
81 REQ_BGL = False
82
89
91 """Check prerequisites.
92
93 This checks whether the cluster is empty.
94
95 Any errors are signaled by raising errors.OpPrereqError.
96
97 """
98 self._ssh_renewal_suppressed = \
99 not self.cfg.GetClusterInfo().modify_ssh_setup and self.op.ssh_keys
100
102 """Renews the nodes' SSL certificates.
103
104 Note that most of this operation is done in gnt_cluster.py, this LU only
105 takes care of the renewal of the client SSL certificates.
106
107 """
108 master_uuid = self.cfg.GetMasterNode()
109 cluster = self.cfg.GetClusterInfo()
110
111 logging.debug("Renewing the master's SSL node certificate."
112 " Master's UUID: %s.", master_uuid)
113
114
115 digest_map = {}
116 master_digest = utils.GetCertificateDigest(
117 cert_filename=pathutils.NODED_CLIENT_CERT_FILE)
118 digest_map[master_uuid] = master_digest
119 logging.debug("Adding the master's SSL node certificate digest to the"
120 " configuration. Master's UUID: %s, Digest: %s",
121 master_uuid, master_digest)
122
123 node_errors = {}
124 nodes = self.cfg.GetAllNodesInfo()
125 logging.debug("Renewing non-master nodes' node certificates.")
126 for (node_uuid, node_info) in nodes.items():
127 if node_info.offline:
128 logging.info("* Skipping offline node %s", node_info.name)
129 continue
130 if node_uuid != master_uuid:
131 logging.debug("Adding certificate digest of node '%s'.", node_uuid)
132 last_exception = None
133 for i in range(self._MAX_NUM_RETRIES):
134 try:
135 if node_info.master_candidate:
136 node_digest = GetClientCertDigest(self, node_uuid)
137 digest_map[node_uuid] = node_digest
138 logging.debug("Added the node's certificate to candidate"
139 " certificate list. Current list: %s.",
140 str(cluster.candidate_certs))
141 break
142 except errors.OpExecError as e:
143 last_exception = e
144 logging.error("Could not fetch a non-master node's SSL node"
145 " certificate at attempt no. %s. The node's UUID"
146 " is %s, and the error was: %s.",
147 str(i), node_uuid, e)
148 else:
149 if last_exception:
150 node_errors[node_uuid] = last_exception
151
152 if node_errors:
153 msg = ("Some nodes' SSL client certificates could not be fetched."
154 " Please make sure those nodes are reachable and rerun"
155 " the operation. The affected nodes and their errors are:\n")
156 for uuid, e in node_errors.items():
157 msg += "Node %s: %s\n" % (uuid, e)
158 feedback_fn(msg)
159
160 self.cfg.SetCandidateCerts(digest_map)
161
163 """Renew all nodes' SSH keys.
164
165 """
166 master_uuid = self.cfg.GetMasterNode()
167
168 nodes = self.cfg.GetAllNodesInfo()
169 nodes_uuid_names = [(node_uuid, node_info.name) for (node_uuid, node_info)
170 in nodes.items() if not node_info.offline]
171 node_names = [name for (_, name) in nodes_uuid_names]
172 node_uuids = [uuid for (uuid, _) in nodes_uuid_names]
173 potential_master_candidates = self.cfg.GetPotentialMasterCandidates()
174 master_candidate_uuids = self.cfg.GetMasterCandidateUuids()
175 result = self.rpc.call_node_ssh_keys_renew(
176 [master_uuid],
177 node_uuids, node_names,
178 master_candidate_uuids,
179 potential_master_candidates)
180 result[master_uuid].Raise("Could not renew the SSH keys of all nodes")
181
182 - def Exec(self, feedback_fn):
183 if self.op.node_certificates:
184 feedback_fn("Renewing Node SSL certificates")
185 self._RenewNodeSslCertificates(feedback_fn)
186 if self.op.ssh_keys and not self._ssh_renewal_suppressed:
187 feedback_fn("Renewing SSH keys")
188 self._RenewSshKeys()
189 elif self._ssh_renewal_suppressed:
190 feedback_fn("Cannot renew SSH keys if the cluster is configured to not"
191 " modify the SSH setup.")
192
195 """Activate the master IP on the master node.
196
197 """
198 - def Exec(self, feedback_fn):
207
210 """Deactivate the master IP on the master node.
211
212 """
213 - def Exec(self, feedback_fn):
222
225 """Return configuration values.
226
227 """
228 REQ_BGL = False
229
231 self.cq = ClusterQuery(None, self.op.output_fields, False)
232
235
238
239 - def Exec(self, feedback_fn):
240 result = self.cq.OldStyleQuery(self)
241
242 assert len(result) == 1
243
244 return result[0]
245
248 """Logical unit for destroying the cluster.
249
250 """
251 HPATH = "cluster-destroy"
252 HTYPE = constants.HTYPE_CLUSTER
253
254
255
256
257 clusterHasBeenDestroyed = False
258
260 """Build hooks env.
261
262 """
263 return {
264 "OP_TARGET": self.cfg.GetClusterName(),
265 }
266
268 """Build hooks nodes.
269
270 """
271 return ([], [])
272
274 """Check prerequisites.
275
276 This checks whether the cluster is empty.
277
278 Any errors are signaled by raising errors.OpPrereqError.
279
280 """
281 master = self.cfg.GetMasterNode()
282
283 nodelist = self.cfg.GetNodeList()
284 if len(nodelist) != 1 or nodelist[0] != master:
285 raise errors.OpPrereqError("There are still %d node(s) in"
286 " this cluster." % (len(nodelist) - 1),
287 errors.ECODE_INVAL)
288 instancelist = self.cfg.GetInstanceList()
289 if instancelist:
290 raise errors.OpPrereqError("There are still %d instance(s) in"
291 " this cluster." % len(instancelist),
292 errors.ECODE_INVAL)
293
294 - def Exec(self, feedback_fn):
314
315
316 -class LUClusterPostInit(LogicalUnit):
317 """Logical unit for running hooks after cluster initialization.
318
319 """
320 HPATH = "cluster-init"
321 HTYPE = constants.HTYPE_CLUSTER
322
323 - def CheckArguments(self):
324 self.master_uuid = self.cfg.GetMasterNode()
325 self.master_ndparams = self.cfg.GetNdParams(self.cfg.GetMasterNodeInfo())
326
327
328
329
330
331
332 if (self.master_ndparams[constants.ND_OVS] and not
333 self.master_ndparams.get(constants.ND_OVS_LINK, None)):
334 self.LogInfo("No physical interface for OpenvSwitch was given."
335 " OpenvSwitch will not have an outside connection. This"
336 " might not be what you want.")
337
338 - def BuildHooksEnv(self):
339 """Build hooks env.
340
341 """
342 return {
343 "OP_TARGET": self.cfg.GetClusterName(),
344 }
345
346 - def BuildHooksNodes(self):
347 """Build hooks nodes.
348
349 """
350 return ([], [self.cfg.GetMasterNode()])
351
352 - def Exec(self, feedback_fn):
353 """Create and configure Open vSwitch
354
355 """
356 if self.master_ndparams[constants.ND_OVS]:
357 result = self.rpc.call_node_configure_ovs(
358 self.master_uuid,
359 self.master_ndparams[constants.ND_OVS_NAME],
360 self.master_ndparams.get(constants.ND_OVS_LINK, None))
361 result.Raise("Could not successully configure Open vSwitch")
362
363 return True
364
414
417 """Query cluster configuration.
418
419 """
420 REQ_BGL = False
421
423 self.needed_locks = {}
424
425 - def Exec(self, feedback_fn):
426 """Return cluster config.
427
428 """
429 cluster = self.cfg.GetClusterInfo()
430 os_hvp = {}
431
432
433 for os_name, hv_dict in cluster.os_hvp.items():
434 os_hvp[os_name] = {}
435 for hv_name, hv_params in hv_dict.items():
436 if hv_name in cluster.enabled_hypervisors:
437 os_hvp[os_name][hv_name] = hv_params
438
439
440 primary_ip_version = constants.IP4_VERSION
441 if cluster.primary_ip_family == netutils.IP6Address.family:
442 primary_ip_version = constants.IP6_VERSION
443
444 result = {
445 "software_version": constants.RELEASE_VERSION,
446 "protocol_version": constants.PROTOCOL_VERSION,
447 "config_version": constants.CONFIG_VERSION,
448 "os_api_version": max(constants.OS_API_VERSIONS),
449 "export_version": constants.EXPORT_VERSION,
450 "vcs_version": constants.VCS_VERSION,
451 "architecture": runtime.GetArchInfo(),
452 "name": cluster.cluster_name,
453 "master": self.cfg.GetMasterNodeName(),
454 "default_hypervisor": cluster.primary_hypervisor,
455 "enabled_hypervisors": cluster.enabled_hypervisors,
456 "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name])
457 for hypervisor_name in cluster.enabled_hypervisors]),
458 "os_hvp": os_hvp,
459 "beparams": cluster.beparams,
460 "osparams": cluster.osparams,
461 "ipolicy": cluster.ipolicy,
462 "nicparams": cluster.nicparams,
463 "ndparams": cluster.ndparams,
464 "diskparams": cluster.diskparams,
465 "candidate_pool_size": cluster.candidate_pool_size,
466 "max_running_jobs": cluster.max_running_jobs,
467 "max_tracked_jobs": cluster.max_tracked_jobs,
468 "mac_prefix": cluster.mac_prefix,
469 "master_netdev": cluster.master_netdev,
470 "master_netmask": cluster.master_netmask,
471 "use_external_mip_script": cluster.use_external_mip_script,
472 "volume_group_name": cluster.volume_group_name,
473 "drbd_usermode_helper": cluster.drbd_usermode_helper,
474 "file_storage_dir": cluster.file_storage_dir,
475 "shared_file_storage_dir": cluster.shared_file_storage_dir,
476 "maintain_node_health": cluster.maintain_node_health,
477 "ctime": cluster.ctime,
478 "mtime": cluster.mtime,
479 "uuid": cluster.uuid,
480 "tags": list(cluster.GetTags()),
481 "uid_pool": cluster.uid_pool,
482 "default_iallocator": cluster.default_iallocator,
483 "default_iallocator_params": cluster.default_iallocator_params,
484 "reserved_lvs": cluster.reserved_lvs,
485 "primary_ip_version": primary_ip_version,
486 "prealloc_wipe_disks": cluster.prealloc_wipe_disks,
487 "hidden_os": cluster.hidden_os,
488 "blacklisted_os": cluster.blacklisted_os,
489 "enabled_disk_templates": cluster.enabled_disk_templates,
490 "install_image": cluster.install_image,
491 "instance_communication_network": cluster.instance_communication_network,
492 "compression_tools": cluster.compression_tools,
493 "enabled_user_shutdown": cluster.enabled_user_shutdown,
494 }
495
496 return result
497
500 """Force the redistribution of cluster configuration.
501
502 This is a very simple LU.
503
504 """
505 REQ_BGL = False
506
512
513 - def Exec(self, feedback_fn):
519
522 """Rename the cluster.
523
524 """
525 HPATH = "cluster-rename"
526 HTYPE = constants.HTYPE_CLUSTER
527
529 """Build hooks env.
530
531 """
532 return {
533 "OP_TARGET": self.cfg.GetClusterName(),
534 "NEW_NAME": self.op.name,
535 }
536
542
565
566 - def Exec(self, feedback_fn):
567 """Rename the cluster.
568
569 """
570 clustername = self.op.name
571 new_ip = self.ip
572
573
574 master_params = self.cfg.GetMasterNetworkParameters()
575 ems = self.cfg.GetUseExternalMipScript()
576 result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
577 master_params, ems)
578 result.Raise("Could not disable the master role")
579
580 try:
581 cluster = self.cfg.GetClusterInfo()
582 cluster.cluster_name = clustername
583 cluster.master_ip = new_ip
584 self.cfg.Update(cluster, feedback_fn)
585
586
587 ssh.WriteKnownHostsFile(self.cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
588 node_list = self.cfg.GetOnlineNodeList()
589 try:
590 node_list.remove(master_params.uuid)
591 except ValueError:
592 pass
593 UploadHelper(self, node_list, pathutils.SSH_KNOWN_HOSTS_FILE)
594 finally:
595 master_params.ip = new_ip
596 result = self.rpc.call_node_activate_master_ip(master_params.uuid,
597 master_params, ems)
598 result.Warn("Could not re-enable the master role on the master,"
599 " please restart manually", self.LogWarning)
600
601 return clustername
602
605 """Verifies the cluster disks sizes.
606
607 """
608 REQ_BGL = False
609
631
633 if level == locking.LEVEL_NODE_RES and self.wanted_names is not None:
634 self._LockInstancesNodes(primary_only=True, level=level)
635
637 """Check prerequisites.
638
639 This only checks the optional instance list against the existing names.
640
641 """
642 if self.wanted_names is None:
643 self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE)
644
645 self.wanted_instances = \
646 map(compat.snd, self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
647
649 """Ensure children of the disk have the needed disk size.
650
651 This is valid mainly for DRBD8 and fixes an issue where the
652 children have smaller disk size.
653
654 @param disk: an L{ganeti.objects.Disk} object
655
656 """
657 if disk.dev_type == constants.DT_DRBD8:
658 assert disk.children, "Empty children for DRBD8?"
659 fchild = disk.children[0]
660 mismatch = fchild.size < disk.size
661 if mismatch:
662 self.LogInfo("Child disk has size %d, parent %d, fixing",
663 fchild.size, disk.size)
664 fchild.size = disk.size
665
666
667 return self._EnsureChildSizes(fchild) or mismatch
668 else:
669 return False
670
671 - def Exec(self, feedback_fn):
672 """Verify the size of cluster disks.
673
674 """
675
676
677 per_node_disks = {}
678 for instance in self.wanted_instances:
679 pnode = instance.primary_node
680 if pnode not in per_node_disks:
681 per_node_disks[pnode] = []
682 for idx, disk in enumerate(self.cfg.GetInstanceDisks(instance.uuid)):
683 per_node_disks[pnode].append((instance, idx, disk))
684
685 assert not (frozenset(per_node_disks.keys()) -
686 frozenset(self.owned_locks(locking.LEVEL_NODE_RES))), \
687 "Not owning correct locks"
688 assert not self.owned_locks(locking.LEVEL_NODE)
689
690 es_flags = rpc.GetExclusiveStorageForNodes(self.cfg,
691 per_node_disks.keys())
692
693 changed = []
694 for node_uuid, dskl in per_node_disks.items():
695 if not dskl:
696
697 continue
698
699 newl = [([v[2].Copy()], v[0]) for v in dskl]
700 node_name = self.cfg.GetNodeName(node_uuid)
701 result = self.rpc.call_blockdev_getdimensions(node_uuid, newl)
702 if result.fail_msg:
703 self.LogWarning("Failure in blockdev_getdimensions call to node"
704 " %s, ignoring", node_name)
705 continue
706 if len(result.payload) != len(dskl):
707 logging.warning("Invalid result from node %s: len(dksl)=%d,"
708 " result.payload=%s", node_name, len(dskl),
709 result.payload)
710 self.LogWarning("Invalid result from node %s, ignoring node results",
711 node_name)
712 continue
713 for ((instance, idx, disk), dimensions) in zip(dskl, result.payload):
714 if dimensions is None:
715 self.LogWarning("Disk %d of instance %s did not return size"
716 " information, ignoring", idx, instance.name)
717 continue
718 if not isinstance(dimensions, (tuple, list)):
719 self.LogWarning("Disk %d of instance %s did not return valid"
720 " dimension information, ignoring", idx,
721 instance.name)
722 continue
723 (size, spindles) = dimensions
724 if not isinstance(size, (int, long)):
725 self.LogWarning("Disk %d of instance %s did not return valid"
726 " size information, ignoring", idx, instance.name)
727 continue
728 size = size >> 20
729 if size != disk.size:
730 self.LogInfo("Disk %d of instance %s has mismatched size,"
731 " correcting: recorded %d, actual %d", idx,
732 instance.name, disk.size, size)
733 disk.size = size
734 self.cfg.Update(disk, feedback_fn)
735 changed.append((instance.name, idx, "size", size))
736 if es_flags[node_uuid]:
737 if spindles is None:
738 self.LogWarning("Disk %d of instance %s did not return valid"
739 " spindles information, ignoring", idx,
740 instance.name)
741 elif disk.spindles is None or disk.spindles != spindles:
742 self.LogInfo("Disk %d of instance %s has mismatched spindles,"
743 " correcting: recorded %s, actual %s",
744 idx, instance.name, disk.spindles, spindles)
745 disk.spindles = spindles
746 self.cfg.Update(disk, feedback_fn)
747 changed.append((instance.name, idx, "spindles", disk.spindles))
748 if self._EnsureChildSizes(disk):
749 self.cfg.Update(disk, feedback_fn)
750 changed.append((instance.name, idx, "size", disk.size))
751 return changed
752
773
778 """Checks whether the given file-based storage directory is acceptable.
779
780 Note: This function is public, because it is also used in bootstrap.py.
781
782 @type logging_warn_fn: function
783 @param logging_warn_fn: function which accepts a string and logs it
784 @type file_storage_dir: string
785 @param file_storage_dir: the directory to be used for file-based instances
786 @type enabled_disk_templates: list of string
787 @param enabled_disk_templates: the list of enabled disk templates
788 @type file_disk_template: string
789 @param file_disk_template: the file-based disk template for which the
790 path should be checked
791
792 """
793 assert (file_disk_template in utils.storage.GetDiskTemplatesOfStorageTypes(
794 constants.ST_FILE, constants.ST_SHARED_FILE, constants.ST_GLUSTER
795 ))
796
797 file_storage_enabled = file_disk_template in enabled_disk_templates
798 if file_storage_dir is not None:
799 if file_storage_dir == "":
800 if file_storage_enabled:
801 raise errors.OpPrereqError(
802 "Unsetting the '%s' storage directory while having '%s' storage"
803 " enabled is not permitted." %
804 (file_disk_template, file_disk_template),
805 errors.ECODE_INVAL)
806 else:
807 if not file_storage_enabled:
808 logging_warn_fn(
809 "Specified a %s storage directory, although %s storage is not"
810 " enabled." % (file_disk_template, file_disk_template))
811 else:
812 raise errors.ProgrammerError("Received %s storage dir with value"
813 " 'None'." % file_disk_template)
814
826
838
850
878
881 """Change the parameters of the cluster.
882
883 """
884 HPATH = "cluster-modify"
885 HTYPE = constants.HTYPE_CLUSTER
886 REQ_BGL = False
887
921
933
935 """Build hooks env.
936
937 """
938 return {
939 "OP_TARGET": self.cfg.GetClusterName(),
940 "NEW_VG_NAME": self.op.vg_name,
941 }
942
944 """Build hooks nodes.
945
946 """
947 mn = self.cfg.GetMasterNode()
948 return ([mn], [mn])
949
950 - def _CheckVgName(self, node_uuids, enabled_disk_templates,
951 new_enabled_disk_templates):
952 """Check the consistency of the vg name on all nodes and in case it gets
953 unset whether there are instances still using it.
954
955 """
956 lvm_is_enabled = utils.IsLvmEnabled(enabled_disk_templates)
957 lvm_gets_enabled = utils.LvmGetsEnabled(enabled_disk_templates,
958 new_enabled_disk_templates)
959 current_vg_name = self.cfg.GetVGName()
960
961 if self.op.vg_name == '':
962 if lvm_is_enabled:
963 raise errors.OpPrereqError("Cannot unset volume group if lvm-based"
964 " disk templates are or get enabled.",
965 errors.ECODE_INVAL)
966
967 if self.op.vg_name is None:
968 if current_vg_name is None and lvm_is_enabled:
969 raise errors.OpPrereqError("Please specify a volume group when"
970 " enabling lvm-based disk-templates.",
971 errors.ECODE_INVAL)
972
973 if self.op.vg_name is not None and not self.op.vg_name:
974 if self.cfg.DisksOfType(constants.DT_PLAIN):
975 raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based"
976 " instances exist", errors.ECODE_INVAL)
977
978 if (self.op.vg_name is not None and lvm_is_enabled) or \
979 (self.cfg.GetVGName() is not None and lvm_gets_enabled):
980 self._CheckVgNameOnNodes(node_uuids)
981
1002
1003 @staticmethod
1006 """Computes three sets of disk templates.
1007
1008 @see: C{_GetDiskTemplateSets} for more details.
1009
1010 """
1011 enabled_disk_templates = None
1012 new_enabled_disk_templates = []
1013 disabled_disk_templates = []
1014 if op_enabled_disk_templates:
1015 enabled_disk_templates = op_enabled_disk_templates
1016 new_enabled_disk_templates = \
1017 list(set(enabled_disk_templates)
1018 - set(old_enabled_disk_templates))
1019 disabled_disk_templates = \
1020 list(set(old_enabled_disk_templates)
1021 - set(enabled_disk_templates))
1022 else:
1023 enabled_disk_templates = old_enabled_disk_templates
1024 return (enabled_disk_templates, new_enabled_disk_templates,
1025 disabled_disk_templates)
1026
1028 """Computes three sets of disk templates.
1029
1030 The three sets are:
1031 - disk templates that will be enabled after this operation (no matter if
1032 they were enabled before or not)
1033 - disk templates that get enabled by this operation (thus haven't been
1034 enabled before.)
1035 - disk templates that get disabled by this operation
1036
1037 """
1038 return self._GetDiskTemplateSetsInner(self.op.enabled_disk_templates,
1039 cluster.enabled_disk_templates)
1040
1042 """Checks the ipolicy.
1043
1044 @type cluster: C{objects.Cluster}
1045 @param cluster: the cluster's configuration
1046 @type enabled_disk_templates: list of string
1047 @param enabled_disk_templates: list of (possibly newly) enabled disk
1048 templates
1049
1050 """
1051
1052 if self.op.ipolicy:
1053 self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy,
1054 group_policy=False)
1055
1056 CheckIpolicyVsDiskTemplates(self.new_ipolicy,
1057 enabled_disk_templates)
1058
1059 all_instances = self.cfg.GetAllInstancesInfo().values()
1060 violations = set()
1061 for group in self.cfg.GetAllNodeGroupsInfo().values():
1062 instances = frozenset(
1063 [inst for inst in all_instances
1064 if compat.any(nuuid in group.members
1065 for nuuid in self.cfg.GetInstanceNodes(inst.uuid))])
1066 new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy)
1067 ipol = masterd.instance.CalculateGroupIPolicy(cluster, group)
1068 new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances,
1069 self.cfg)
1070 if new:
1071 violations.update(new)
1072
1073 if violations:
1074 self.LogWarning("After the ipolicy change the following instances"
1075 " violate them: %s",
1076 utils.CommaJoin(utils.NiceSort(violations)))
1077 else:
1078 CheckIpolicyVsDiskTemplates(cluster.ipolicy,
1079 enabled_disk_templates)
1080
1082 """Checks whether the set DRBD helper actually exists on the nodes.
1083
1084 @type drbd_helper: string
1085 @param drbd_helper: path of the drbd usermode helper binary
1086 @type node_uuids: list of strings
1087 @param node_uuids: list of node UUIDs to check for the helper
1088
1089 """
1090
1091 helpers = self.rpc.call_drbd_helper(node_uuids)
1092 for (_, ninfo) in self.cfg.GetMultiNodeInfo(node_uuids):
1093 if ninfo.offline:
1094 self.LogInfo("Not checking drbd helper on offline node %s",
1095 ninfo.name)
1096 continue
1097 msg = helpers[ninfo.uuid].fail_msg
1098 if msg:
1099 raise errors.OpPrereqError("Error checking drbd helper on node"
1100 " '%s': %s" % (ninfo.name, msg),
1101 errors.ECODE_ENVIRON)
1102 node_helper = helpers[ninfo.uuid].payload
1103 if node_helper != drbd_helper:
1104 raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" %
1105 (ninfo.name, node_helper),
1106 errors.ECODE_ENVIRON)
1107
1109 """Check the DRBD usermode helper.
1110
1111 @type node_uuids: list of strings
1112 @param node_uuids: a list of nodes' UUIDs
1113 @type drbd_enabled: boolean
1114 @param drbd_enabled: whether DRBD will be enabled after this operation
1115 (no matter if it was disabled before or not)
1116 @type drbd_gets_enabled: boolen
1117 @param drbd_gets_enabled: true if DRBD was disabled before this
1118 operation, but will be enabled afterwards
1119
1120 """
1121 if self.op.drbd_helper == '':
1122 if drbd_enabled:
1123 raise errors.OpPrereqError("Cannot disable drbd helper while"
1124 " DRBD is enabled.", errors.ECODE_STATE)
1125 if self.cfg.DisksOfType(constants.DT_DRBD8):
1126 raise errors.OpPrereqError("Cannot disable drbd helper while"
1127 " drbd-based instances exist",
1128 errors.ECODE_INVAL)
1129
1130 else:
1131 if self.op.drbd_helper is not None and drbd_enabled:
1132 self._CheckDrbdHelperOnNodes(self.op.drbd_helper, node_uuids)
1133 else:
1134 if drbd_gets_enabled:
1135 current_drbd_helper = self.cfg.GetClusterInfo().drbd_usermode_helper
1136 if current_drbd_helper is not None:
1137 self._CheckDrbdHelperOnNodes(current_drbd_helper, node_uuids)
1138 else:
1139 raise errors.OpPrereqError("Cannot enable DRBD without a"
1140 " DRBD usermode helper set.",
1141 errors.ECODE_STATE)
1142
1145 """Check whether we try to disable a disk template that is in use.
1146
1147 @type disabled_disk_templates: list of string
1148 @param disabled_disk_templates: list of disk templates that are going to
1149 be disabled by this operation
1150
1151 """
1152 for disk_template in disabled_disk_templates:
1153 disks_with_type = self.cfg.DisksOfType(disk_template)
1154 if disks_with_type:
1155 disk_desc = []
1156 for disk in disks_with_type:
1157 instance_uuid = self.cfg.GetInstanceForDisk(disk.uuid)
1158 instance = self.cfg.GetInstanceInfo(instance_uuid)
1159 if instance:
1160 instance_desc = "on " + instance.name
1161 else:
1162 instance_desc = "detached"
1163 disk_desc.append("%s (%s)" % (disk, instance_desc))
1164 raise errors.OpPrereqError(
1165 "Cannot disable disk template '%s', because there is at least one"
1166 " disk using it:\n * %s" % (disk_template, "\n * ".join(disk_desc)),
1167 errors.ECODE_STATE)
1168 if constants.DT_DISKLESS in disabled_disk_templates:
1169 instances = self.cfg.GetAllInstancesInfo()
1170 for inst in instances.values():
1171 if not inst.disks:
1172 raise errors.OpPrereqError(
1173 "Cannot disable disk template 'diskless', because there is at"
1174 " least one instance using it:\n * %s" % inst.name,
1175 errors.ECODE_STATE)
1176
1177 @staticmethod
1179 """Check whether an existing network is configured for instance
1180 communication.
1181
1182 Checks whether an existing network is configured with the
1183 parameters that are advisable for instance communication, and
1184 otherwise issue security warnings.
1185
1186 @type network: L{ganeti.objects.Network}
1187 @param network: L{ganeti.objects.Network} object whose
1188 configuration is being checked
1189 @type warning_fn: function
1190 @param warning_fn: function used to print warnings
1191 @rtype: None
1192 @return: None
1193
1194 """
1195 def _MaybeWarn(err, val, default):
1196 if val != default:
1197 warning_fn("Supplied instance communication network '%s' %s '%s',"
1198 " this might pose a security risk (default is '%s').",
1199 network.name, err, val, default)
1200
1201 if network.network is None:
1202 raise errors.OpPrereqError("Supplied instance communication network '%s'"
1203 " must have an IPv4 network address.",
1204 network.name)
1205
1206 _MaybeWarn("has an IPv4 gateway", network.gateway, None)
1207 _MaybeWarn("has a non-standard IPv4 network address", network.network,
1208 constants.INSTANCE_COMMUNICATION_NETWORK4)
1209 _MaybeWarn("has an IPv6 gateway", network.gateway6, None)
1210 _MaybeWarn("has a non-standard IPv6 network address", network.network6,
1211 constants.INSTANCE_COMMUNICATION_NETWORK6)
1212 _MaybeWarn("has a non-standard MAC prefix", network.mac_prefix,
1213 constants.INSTANCE_COMMUNICATION_MAC_PREFIX)
1214
1216 """Check prerequisites.
1217
1218 This checks whether the given params don't conflict and
1219 if the given volume group is valid.
1220
1221 """
1222 node_uuids = self.owned_locks(locking.LEVEL_NODE)
1223 self.cluster = cluster = self.cfg.GetClusterInfo()
1224
1225 vm_capable_node_uuids = [node.uuid
1226 for node in self.cfg.GetAllNodesInfo().values()
1227 if node.uuid in node_uuids and node.vm_capable]
1228
1229 (enabled_disk_templates, new_enabled_disk_templates,
1230 disabled_disk_templates) = self._GetDiskTemplateSets(cluster)
1231 self._CheckInstancesOfDisabledDiskTemplates(disabled_disk_templates)
1232
1233 self._CheckVgName(vm_capable_node_uuids, enabled_disk_templates,
1234 new_enabled_disk_templates)
1235
1236 if self.op.file_storage_dir is not None:
1237 CheckFileStoragePathVsEnabledDiskTemplates(
1238 self.LogWarning, self.op.file_storage_dir, enabled_disk_templates)
1239
1240 if self.op.shared_file_storage_dir is not None:
1241 CheckSharedFileStoragePathVsEnabledDiskTemplates(
1242 self.LogWarning, self.op.shared_file_storage_dir,
1243 enabled_disk_templates)
1244
1245 drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
1246 drbd_gets_enabled = constants.DT_DRBD8 in new_enabled_disk_templates
1247 self._CheckDrbdHelper(vm_capable_node_uuids,
1248 drbd_enabled, drbd_gets_enabled)
1249
1250
1251 if self.op.beparams:
1252 objects.UpgradeBeParams(self.op.beparams)
1253 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
1254 self.new_beparams = cluster.SimpleFillBE(self.op.beparams)
1255
1256 if self.op.ndparams:
1257 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES)
1258 self.new_ndparams = cluster.SimpleFillND(self.op.ndparams)
1259
1260
1261
1262 if self.new_ndparams["oob_program"] == "":
1263 self.new_ndparams["oob_program"] = \
1264 constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM]
1265
1266 if self.op.hv_state:
1267 new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
1268 self.cluster.hv_state_static)
1269 self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values))
1270 for hv, values in new_hv_state.items())
1271
1272 if self.op.disk_state:
1273 new_disk_state = MergeAndVerifyDiskState(self.op.disk_state,
1274 self.cluster.disk_state_static)
1275 self.new_disk_state = \
1276 dict((storage, dict((name, cluster.SimpleFillDiskState(values))
1277 for name, values in svalues.items()))
1278 for storage, svalues in new_disk_state.items())
1279
1280 self._CheckIpolicy(cluster, enabled_disk_templates)
1281
1282 if self.op.nicparams:
1283 utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES)
1284 self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams)
1285 objects.NIC.CheckParameterSyntax(self.new_nicparams)
1286 nic_errors = []
1287
1288
1289 for instance in self.cfg.GetAllInstancesInfo().values():
1290 for nic_idx, nic in enumerate(instance.nics):
1291 params_copy = copy.deepcopy(nic.nicparams)
1292 params_filled = objects.FillDict(self.new_nicparams, params_copy)
1293
1294
1295 try:
1296 objects.NIC.CheckParameterSyntax(params_filled)
1297 except errors.ConfigurationError, err:
1298 nic_errors.append("Instance %s, nic/%d: %s" %
1299 (instance.name, nic_idx, err))
1300
1301
1302 target_mode = params_filled[constants.NIC_MODE]
1303 if target_mode == constants.NIC_MODE_ROUTED and not nic.ip:
1304 nic_errors.append("Instance %s, nic/%d: routed NIC with no ip"
1305 " address" % (instance.name, nic_idx))
1306 if nic_errors:
1307 raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" %
1308 "\n".join(nic_errors), errors.ECODE_INVAL)
1309
1310
1311 self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {})
1312 if self.op.hvparams:
1313 for hv_name, hv_dict in self.op.hvparams.items():
1314 if hv_name not in self.new_hvparams:
1315 self.new_hvparams[hv_name] = hv_dict
1316 else:
1317 self.new_hvparams[hv_name].update(hv_dict)
1318
1319
1320 self.new_diskparams = objects.FillDict(cluster.diskparams, {})
1321 if self.op.diskparams:
1322 for dt_name, dt_params in self.op.diskparams.items():
1323 if dt_name not in self.new_diskparams:
1324 self.new_diskparams[dt_name] = dt_params
1325 else:
1326 self.new_diskparams[dt_name].update(dt_params)
1327 CheckDiskAccessModeConsistency(self.op.diskparams, self.cfg)
1328
1329
1330 self.new_os_hvp = objects.FillDict(cluster.os_hvp, {})
1331 if self.op.os_hvp:
1332 for os_name, hvs in self.op.os_hvp.items():
1333 if os_name not in self.new_os_hvp:
1334 self.new_os_hvp[os_name] = hvs
1335 else:
1336 for hv_name, hv_dict in hvs.items():
1337 if hv_dict is None:
1338
1339 self.new_os_hvp[os_name].pop(hv_name, None)
1340 elif hv_name not in self.new_os_hvp[os_name]:
1341 self.new_os_hvp[os_name][hv_name] = hv_dict
1342 else:
1343 self.new_os_hvp[os_name][hv_name].update(hv_dict)
1344
1345
1346 self._BuildOSParams(cluster)
1347
1348
1349 if self.op.enabled_hypervisors is not None:
1350 for hv in self.op.enabled_hypervisors:
1351
1352
1353
1354
1355
1356 if hv not in new_hvp:
1357 new_hvp[hv] = {}
1358 new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv])
1359 utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES)
1360
1361 if self.op.hvparams or self.op.enabled_hypervisors is not None:
1362
1363 for hv_name, hv_params in self.new_hvparams.items():
1364 if ((self.op.hvparams and hv_name in self.op.hvparams) or
1365 (self.op.enabled_hypervisors and
1366 hv_name in self.op.enabled_hypervisors)):
1367
1368 hv_class = hypervisor.GetHypervisorClass(hv_name)
1369 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1370 hv_class.CheckParameterSyntax(hv_params)
1371 CheckHVParams(self, node_uuids, hv_name, hv_params)
1372
1373 if self.op.os_hvp:
1374
1375
1376 for os_name, os_hvp in self.new_os_hvp.items():
1377 for hv_name, hv_params in os_hvp.items():
1378 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
1379
1380 cluster_defaults = self.new_hvparams.get(hv_name, {})
1381 new_osp = objects.FillDict(cluster_defaults, hv_params)
1382 hv_class = hypervisor.GetHypervisorClass(hv_name)
1383 hv_class.CheckParameterSyntax(new_osp)
1384 CheckHVParams(self, node_uuids, hv_name, new_osp)
1385
1386 if self.op.default_iallocator:
1387 alloc_script = utils.FindFile(self.op.default_iallocator,
1388 constants.IALLOCATOR_SEARCH_PATH,
1389 os.path.isfile)
1390 if alloc_script is None:
1391 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
1392 " specified" % self.op.default_iallocator,
1393 errors.ECODE_INVAL)
1394
1395 if self.op.instance_communication_network:
1396 network_name = self.op.instance_communication_network
1397
1398 try:
1399 network_uuid = self.cfg.LookupNetwork(network_name)
1400 except errors.OpPrereqError:
1401 network_uuid = None
1402
1403 if network_uuid is not None:
1404 network = self.cfg.GetNetwork(network_uuid)
1405 self._CheckInstanceCommunicationNetwork(network, self.LogWarning)
1406
1407 if self.op.compression_tools:
1408 CheckCompressionTools(self.op.compression_tools)
1409
1411 "Calculate the new OS parameters for this operation."
1412
1413 def _GetNewParams(source, new_params):
1414 "Wrapper around GetUpdatedParams."
1415 if new_params is None:
1416 return source
1417 result = objects.FillDict(source, {})
1418 for os_name in new_params:
1419 result[os_name] = GetUpdatedParams(result.get(os_name, {}),
1420 new_params[os_name],
1421 use_none=True)
1422 if not result[os_name]:
1423 del result[os_name]
1424 return result
1425
1426 self.new_osp = _GetNewParams(cluster.osparams,
1427 self.op.osparams)
1428 self.new_osp_private = _GetNewParams(cluster.osparams_private_cluster,
1429 self.op.osparams_private_cluster)
1430
1431
1432 changed_oses = (set(self.new_osp.keys()) | set(self.new_osp_private.keys()))
1433 for os_name in changed_oses:
1434 os_params = cluster.SimpleFillOS(
1435 os_name,
1436 self.new_osp.get(os_name, {}),
1437 os_params_private=self.new_osp_private.get(os_name, {})
1438 )
1439
1440 CheckOSParams(self, False, [self.cfg.GetMasterNode()],
1441 os_name, os_params, False)
1442
1444 """Determines and sets the new volume group name.
1445
1446 """
1447 if self.op.vg_name is not None:
1448 new_volume = self.op.vg_name
1449 if not new_volume:
1450 new_volume = None
1451 if new_volume != self.cfg.GetVGName():
1452 self.cfg.SetVGName(new_volume)
1453 else:
1454 feedback_fn("Cluster LVM configuration already in desired"
1455 " state, not changing")
1456
1458 """Set the file storage directory.
1459
1460 """
1461 if self.op.file_storage_dir is not None:
1462 if self.cluster.file_storage_dir == self.op.file_storage_dir:
1463 feedback_fn("Global file storage dir already set to value '%s'"
1464 % self.cluster.file_storage_dir)
1465 else:
1466 self.cluster.file_storage_dir = self.op.file_storage_dir
1467
1469 """Set the shared file storage directory.
1470
1471 """
1472 if self.op.shared_file_storage_dir is not None:
1473 if self.cluster.shared_file_storage_dir == \
1474 self.op.shared_file_storage_dir:
1475 feedback_fn("Global shared file storage dir already set to value '%s'"
1476 % self.cluster.shared_file_storage_dir)
1477 else:
1478 self.cluster.shared_file_storage_dir = self.op.shared_file_storage_dir
1479
1481 """Set the DRBD usermode helper.
1482
1483 """
1484 if self.op.drbd_helper is not None:
1485 if not constants.DT_DRBD8 in self.cluster.enabled_disk_templates:
1486 feedback_fn("Note that you specified a drbd user helper, but did not"
1487 " enable the drbd disk template.")
1488 new_helper = self.op.drbd_helper
1489 if not new_helper:
1490 new_helper = None
1491 if new_helper != self.cfg.GetDRBDHelper():
1492 self.cfg.SetDRBDHelper(new_helper)
1493 else:
1494 feedback_fn("Cluster DRBD helper already in desired state,"
1495 " not changing")
1496
1497 @staticmethod
1499 """Ensure that the instance communication network exists and is
1500 connected to all groups.
1501
1502 The instance communication network given by L{network_name} it is
1503 created, if necessary, via the opcode 'OpNetworkAdd'. Also, the
1504 instance communication network is connected to all existing node
1505 groups, if necessary, via the opcode 'OpNetworkConnect'.
1506
1507 @type cfg: L{config.ConfigWriter}
1508 @param cfg: cluster configuration
1509
1510 @type network_name: string
1511 @param network_name: instance communication network name
1512
1513 @rtype: L{ganeti.cmdlib.ResultWithJobs} or L{None}
1514 @return: L{ganeti.cmdlib.ResultWithJobs} if the instance
1515 communication needs to be created or it needs to be
1516 connected to a group, otherwise L{None}
1517
1518 """
1519 jobs = []
1520
1521 try:
1522 network_uuid = cfg.LookupNetwork(network_name)
1523 network_exists = True
1524 except errors.OpPrereqError:
1525 network_exists = False
1526
1527 if not network_exists:
1528 jobs.append(AddInstanceCommunicationNetworkOp(network_name))
1529
1530 for group_uuid in cfg.GetNodeGroupList():
1531 group = cfg.GetNodeGroup(group_uuid)
1532
1533 if network_exists:
1534 network_connected = network_uuid in group.networks
1535 else:
1536
1537
1538
1539
1540 network_connected = False
1541
1542 if not network_connected:
1543 op = ConnectInstanceCommunicationNetworkOp(group_uuid, network_name)
1544 jobs.append(op)
1545
1546 if jobs:
1547 return ResultWithJobs([jobs])
1548 else:
1549 return None
1550
1551 @staticmethod
1553 """Update the instance communication network stored in the cluster
1554 configuration.
1555
1556 Compares the user-supplied instance communication network against
1557 the one stored in the Ganeti cluster configuration. If there is a
1558 change, the instance communication network may be possibly created
1559 and connected to all groups (see
1560 L{LUClusterSetParams._EnsureInstanceCommunicationNetwork}).
1561
1562 @type cfg: L{config.ConfigWriter}
1563 @param cfg: cluster configuration
1564
1565 @type network_name: string
1566 @param network_name: instance communication network name
1567
1568 @type feedback_fn: function
1569 @param feedback_fn: see L{ganeti.cmdlist.base.LogicalUnit}
1570
1571 @rtype: L{LUClusterSetParams._EnsureInstanceCommunicationNetwork} or L{None}
1572 @return: see L{LUClusterSetParams._EnsureInstanceCommunicationNetwork}
1573
1574 """
1575 config_network_name = cfg.GetInstanceCommunicationNetwork()
1576
1577 if network_name == config_network_name:
1578 feedback_fn("Instance communication network already is '%s', nothing to"
1579 " do." % network_name)
1580 else:
1581 try:
1582 cfg.LookupNetwork(config_network_name)
1583 feedback_fn("Previous instance communication network '%s'"
1584 " should be removed manually." % config_network_name)
1585 except errors.OpPrereqError:
1586 pass
1587
1588 if network_name:
1589 feedback_fn("Changing instance communication network to '%s', only new"
1590 " instances will be affected."
1591 % network_name)
1592 else:
1593 feedback_fn("Disabling instance communication network, only new"
1594 " instances will be affected.")
1595
1596 cfg.SetInstanceCommunicationNetwork(network_name)
1597
1598 if network_name:
1599 return LUClusterSetParams._EnsureInstanceCommunicationNetwork(
1600 cfg,
1601 network_name)
1602 else:
1603 return None
1604
1605 - def Exec(self, feedback_fn):
1606 """Change the parameters of the cluster.
1607
1608 """
1609
1610 self.cluster = self.cfg.GetClusterInfo()
1611 if self.op.enabled_disk_templates:
1612 self.cluster.enabled_disk_templates = \
1613 list(self.op.enabled_disk_templates)
1614
1615 self.cfg.Update(self.cluster, feedback_fn)
1616
1617 self._SetVgName(feedback_fn)
1618
1619 self.cluster = self.cfg.GetClusterInfo()
1620 self._SetFileStorageDir(feedback_fn)
1621 self._SetSharedFileStorageDir(feedback_fn)
1622 self.cfg.Update(self.cluster, feedback_fn)
1623 self._SetDrbdHelper(feedback_fn)
1624
1625
1626 self.cluster = self.cfg.GetClusterInfo()
1627
1628 ensure_kvmd = False
1629
1630 active = constants.DATA_COLLECTOR_STATE_ACTIVE
1631 if self.op.enabled_data_collectors is not None:
1632 for name, val in self.op.enabled_data_collectors.items():
1633 self.cluster.data_collectors[name][active] = val
1634
1635 if self.op.data_collector_interval:
1636 internal = constants.DATA_COLLECTOR_PARAMETER_INTERVAL
1637 for name, val in self.op.data_collector_interval.items():
1638 self.cluster.data_collectors[name][internal] = int(val)
1639
1640 if self.op.hvparams:
1641 self.cluster.hvparams = self.new_hvparams
1642 if self.op.os_hvp:
1643 self.cluster.os_hvp = self.new_os_hvp
1644 if self.op.enabled_hypervisors is not None:
1645 self.cluster.hvparams = self.new_hvparams
1646 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors
1647 ensure_kvmd = True
1648 if self.op.beparams:
1649 self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams
1650 if self.op.nicparams:
1651 self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams
1652 if self.op.ipolicy:
1653 self.cluster.ipolicy = self.new_ipolicy
1654 if self.op.osparams:
1655 self.cluster.osparams = self.new_osp
1656 if self.op.osparams_private_cluster:
1657 self.cluster.osparams_private_cluster = self.new_osp_private
1658 if self.op.ndparams:
1659 self.cluster.ndparams = self.new_ndparams
1660 if self.op.diskparams:
1661 self.cluster.diskparams = self.new_diskparams
1662 if self.op.hv_state:
1663 self.cluster.hv_state_static = self.new_hv_state
1664 if self.op.disk_state:
1665 self.cluster.disk_state_static = self.new_disk_state
1666
1667 if self.op.candidate_pool_size is not None:
1668 self.cluster.candidate_pool_size = self.op.candidate_pool_size
1669
1670 master_node = self.cfg.GetMasterNode()
1671 potential_master_candidates = self.cfg.GetPotentialMasterCandidates()
1672 modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup
1673 AdjustCandidatePool(
1674 self, [], master_node, potential_master_candidates, feedback_fn,
1675 modify_ssh_setup)
1676
1677 if self.op.max_running_jobs is not None:
1678 self.cluster.max_running_jobs = self.op.max_running_jobs
1679
1680 if self.op.max_tracked_jobs is not None:
1681 self.cluster.max_tracked_jobs = self.op.max_tracked_jobs
1682
1683 if self.op.maintain_node_health is not None:
1684 self.cluster.maintain_node_health = self.op.maintain_node_health
1685
1686 if self.op.modify_etc_hosts is not None:
1687 self.cluster.modify_etc_hosts = self.op.modify_etc_hosts
1688
1689 if self.op.prealloc_wipe_disks is not None:
1690 self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks
1691
1692 if self.op.add_uids is not None:
1693 uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids)
1694
1695 if self.op.remove_uids is not None:
1696 uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids)
1697
1698 if self.op.uid_pool is not None:
1699 self.cluster.uid_pool = self.op.uid_pool
1700
1701 if self.op.default_iallocator is not None:
1702 self.cluster.default_iallocator = self.op.default_iallocator
1703
1704 if self.op.default_iallocator_params is not None:
1705 self.cluster.default_iallocator_params = self.op.default_iallocator_params
1706
1707 if self.op.reserved_lvs is not None:
1708 self.cluster.reserved_lvs = self.op.reserved_lvs
1709
1710 if self.op.use_external_mip_script is not None:
1711 self.cluster.use_external_mip_script = self.op.use_external_mip_script
1712
1713 if self.op.enabled_user_shutdown is not None and \
1714 self.cluster.enabled_user_shutdown != self.op.enabled_user_shutdown:
1715 self.cluster.enabled_user_shutdown = self.op.enabled_user_shutdown
1716 ensure_kvmd = True
1717
1718 def helper_os(aname, mods, desc):
1719 desc += " OS list"
1720 lst = getattr(self.cluster, aname)
1721 for key, val in mods:
1722 if key == constants.DDM_ADD:
1723 if val in lst:
1724 feedback_fn("OS %s already in %s, ignoring" % (val, desc))
1725 else:
1726 lst.append(val)
1727 elif key == constants.DDM_REMOVE:
1728 if val in lst:
1729 lst.remove(val)
1730 else:
1731 feedback_fn("OS %s not found in %s, ignoring" % (val, desc))
1732 else:
1733 raise errors.ProgrammerError("Invalid modification '%s'" % key)
1734
1735 if self.op.hidden_os:
1736 helper_os("hidden_os", self.op.hidden_os, "hidden")
1737
1738 if self.op.blacklisted_os:
1739 helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted")
1740
1741 if self.op.mac_prefix:
1742 self.cluster.mac_prefix = self.op.mac_prefix
1743
1744 if self.op.master_netdev:
1745 master_params = self.cfg.GetMasterNetworkParameters()
1746 ems = self.cfg.GetUseExternalMipScript()
1747 feedback_fn("Shutting down master ip on the current netdev (%s)" %
1748 self.cluster.master_netdev)
1749 result = self.rpc.call_node_deactivate_master_ip(master_params.uuid,
1750 master_params, ems)
1751 if not self.op.force:
1752 result.Raise("Could not disable the master ip")
1753 else:
1754 if result.fail_msg:
1755 msg = ("Could not disable the master ip (continuing anyway): %s" %
1756 result.fail_msg)
1757 feedback_fn(msg)
1758 feedback_fn("Changing master_netdev from %s to %s" %
1759 (master_params.netdev, self.op.master_netdev))
1760 self.cluster.master_netdev = self.op.master_netdev
1761
1762 if self.op.master_netmask:
1763 master_params = self.cfg.GetMasterNetworkParameters()
1764 feedback_fn("Changing master IP netmask to %s" % self.op.master_netmask)
1765 result = self.rpc.call_node_change_master_netmask(
1766 master_params.uuid, master_params.netmask,
1767 self.op.master_netmask, master_params.ip,
1768 master_params.netdev)
1769 result.Warn("Could not change the master IP netmask", feedback_fn)
1770 self.cluster.master_netmask = self.op.master_netmask
1771
1772 if self.op.install_image:
1773 self.cluster.install_image = self.op.install_image
1774
1775 if self.op.zeroing_image is not None:
1776 CheckImageValidity(self.op.zeroing_image,
1777 "Zeroing image must be an absolute path or a URL")
1778 self.cluster.zeroing_image = self.op.zeroing_image
1779
1780 self.cfg.Update(self.cluster, feedback_fn)
1781
1782 if self.op.master_netdev:
1783 master_params = self.cfg.GetMasterNetworkParameters()
1784 feedback_fn("Starting the master ip on the new master netdev (%s)" %
1785 self.op.master_netdev)
1786 ems = self.cfg.GetUseExternalMipScript()
1787 result = self.rpc.call_node_activate_master_ip(master_params.uuid,
1788 master_params, ems)
1789 result.Warn("Could not re-enable the master ip on the master,"
1790 " please restart manually", self.LogWarning)
1791
1792
1793
1794
1795
1796 if ensure_kvmd:
1797 EnsureKvmdOnNodes(self, feedback_fn)
1798
1799 if self.op.compression_tools is not None:
1800 self.cfg.SetCompressionTools(self.op.compression_tools)
1801
1802 network_name = self.op.instance_communication_network
1803 if network_name is not None:
1804 return self._ModifyInstanceCommunicationNetwork(self.cfg,
1805 network_name, feedback_fn)
1806 else:
1807 return None
1808