Package ganeti :: Package cmdlib :: Package cluster
[hide private]
[frames] | no frames]

Source Code for Package ganeti.cmdlib.cluster

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Logical units dealing with the cluster.""" 
  32   
  33  import copy 
  34  import itertools 
  35  import logging 
  36  import operator 
  37  import os 
  38  import re 
  39  import time 
  40   
  41  from ganeti import compat 
  42  from ganeti import constants 
  43  from ganeti import errors 
  44  from ganeti import hypervisor 
  45  from ganeti import locking 
  46  from ganeti import masterd 
  47  from ganeti import netutils 
  48  from ganeti import objects 
  49  from ganeti import opcodes 
  50  from ganeti import pathutils 
  51  from ganeti import query 
  52  import ganeti.rpc.node as rpc 
  53  from ganeti import runtime 
  54  from ganeti import ssh 
  55  from ganeti import uidpool 
  56  from ganeti import utils 
  57  from ganeti import vcluster 
  58   
  59  from ganeti.cmdlib.base import NoHooksLU, QueryBase, LogicalUnit, \ 
  60    ResultWithJobs 
  61  from ganeti.cmdlib.common import ShareAll, RunPostHook, \ 
  62    ComputeAncillaryFiles, RedistributeAncillaryFiles, UploadHelper, \ 
  63    GetWantedInstances, MergeAndVerifyHvState, MergeAndVerifyDiskState, \ 
  64    GetUpdatedIPolicy, ComputeNewInstanceViolations, GetUpdatedParams, \ 
  65    CheckOSParams, CheckHVParams, AdjustCandidatePool, CheckNodePVs, \ 
  66    ComputeIPolicyInstanceViolation, AnnotateDiskParams, SupportsOob, \ 
  67    CheckIpolicyVsDiskTemplates, CheckDiskAccessModeValidity, \ 
  68    CheckDiskAccessModeConsistency, GetClientCertDigest, \ 
  69    AddInstanceCommunicationNetworkOp, ConnectInstanceCommunicationNetworkOp, \ 
  70    CheckImageValidity, CheckDiskAccessModeConsistency, EnsureKvmdOnNodes 
  71   
  72  import ganeti.masterd.instance 
73 74 75 -class LUClusterRenewCrypto(NoHooksLU):
76 """Renew the cluster's crypto tokens. 77 78 """ 79 80 _MAX_NUM_RETRIES = 3 81 REQ_BGL = False 82
83 - def ExpandNames(self):
84 self.needed_locks = { 85 locking.LEVEL_NODE: locking.ALL_SET, 86 } 87 self.share_locks = ShareAll() 88 self.share_locks[locking.LEVEL_NODE] = 0
89
90 - def CheckPrereq(self):
91 """Check prerequisites. 92 93 Notably the compatibility of specified key bits and key type. 94 95 """ 96 cluster_info = self.cfg.GetClusterInfo() 97 98 self.ssh_key_type = self.op.ssh_key_type 99 if self.ssh_key_type is None: 100 self.ssh_key_type = cluster_info.ssh_key_type 101 102 self.ssh_key_bits = ssh.DetermineKeyBits(self.ssh_key_type, 103 self.op.ssh_key_bits, 104 cluster_info.ssh_key_type, 105 cluster_info.ssh_key_bits)
106
107 - def _RenewNodeSslCertificates(self, feedback_fn):
108 """Renews the nodes' SSL certificates. 109 110 Note that most of this operation is done in gnt_cluster.py, this LU only 111 takes care of the renewal of the client SSL certificates. 112 113 """ 114 master_uuid = self.cfg.GetMasterNode() 115 cluster = self.cfg.GetClusterInfo() 116 117 logging.debug("Renewing the master's SSL node certificate." 118 " Master's UUID: %s.", master_uuid) 119 120 # mapping node UUIDs to client certificate digests 121 digest_map = {} 122 master_digest = utils.GetCertificateDigest( 123 cert_filename=pathutils.NODED_CLIENT_CERT_FILE) 124 digest_map[master_uuid] = master_digest 125 logging.debug("Adding the master's SSL node certificate digest to the" 126 " configuration. Master's UUID: %s, Digest: %s", 127 master_uuid, master_digest) 128 129 node_errors = {} 130 nodes = self.cfg.GetAllNodesInfo() 131 logging.debug("Renewing non-master nodes' node certificates.") 132 for (node_uuid, node_info) in nodes.items(): 133 if node_info.offline: 134 logging.info("* Skipping offline node %s", node_info.name) 135 continue 136 if node_uuid != master_uuid: 137 logging.debug("Adding certificate digest of node '%s'.", node_uuid) 138 last_exception = None 139 for i in range(self._MAX_NUM_RETRIES): 140 try: 141 if node_info.master_candidate: 142 node_digest = GetClientCertDigest(self, node_uuid) 143 digest_map[node_uuid] = node_digest 144 logging.debug("Added the node's certificate to candidate" 145 " certificate list. Current list: %s.", 146 str(cluster.candidate_certs)) 147 break 148 except errors.OpExecError as e: 149 last_exception = e 150 logging.error("Could not fetch a non-master node's SSL node" 151 " certificate at attempt no. %s. The node's UUID" 152 " is %s, and the error was: %s.", 153 str(i), node_uuid, e) 154 else: 155 if last_exception: 156 node_errors[node_uuid] = last_exception 157 158 if node_errors: 159 msg = ("Some nodes' SSL client certificates could not be fetched." 160 " Please make sure those nodes are reachable and rerun" 161 " the operation. The affected nodes and their errors are:\n") 162 for uuid, e in node_errors.items(): 163 msg += "Node %s: %s\n" % (uuid, e) 164 feedback_fn(msg) 165 166 self.cfg.SetCandidateCerts(digest_map)
167
168 - def _RenewSshKeys(self, feedback_fn):
169 """Renew all nodes' SSH keys. 170 171 @type feedback_fn: function 172 @param feedback_fn: logging function, see L{ganeti.cmdlist.base.LogicalUnit} 173 174 """ 175 master_uuid = self.cfg.GetMasterNode() 176 177 nodes = self.cfg.GetAllNodesInfo() 178 nodes_uuid_names = [(node_uuid, node_info.name) for (node_uuid, node_info) 179 in nodes.items() if not node_info.offline] 180 node_names = [name for (_, name) in nodes_uuid_names] 181 node_uuids = [uuid for (uuid, _) in nodes_uuid_names] 182 potential_master_candidates = self.cfg.GetPotentialMasterCandidates() 183 master_candidate_uuids = self.cfg.GetMasterCandidateUuids() 184 185 cluster_info = self.cfg.GetClusterInfo() 186 187 result = self.rpc.call_node_ssh_keys_renew( 188 [master_uuid], 189 node_uuids, node_names, 190 master_candidate_uuids, 191 potential_master_candidates, 192 cluster_info.ssh_key_type, # Old key type 193 self.ssh_key_type, # New key type 194 self.ssh_key_bits, # New key bits 195 self.op.debug, 196 self.op.verbose) 197 result[master_uuid].Raise("Could not renew the SSH keys of all nodes") 198 199 # After the keys have been successfully swapped, time to commit the change 200 # in key type 201 cluster_info.ssh_key_type = self.ssh_key_type 202 cluster_info.ssh_key_bits = self.ssh_key_bits 203 self.cfg.Update(cluster_info, feedback_fn)
204
205 - def Exec(self, feedback_fn):
206 if self.op.node_certificates: 207 feedback_fn("Renewing Node SSL certificates") 208 self._RenewNodeSslCertificates(feedback_fn) 209 210 if self.op.renew_ssh_keys: 211 if self.cfg.GetClusterInfo().modify_ssh_setup: 212 feedback_fn("Renewing SSH keys") 213 self._RenewSshKeys(feedback_fn) 214 else: 215 feedback_fn("Cannot renew SSH keys if the cluster is configured to not" 216 " modify the SSH setup.")
217
218 219 -class LUClusterActivateMasterIp(NoHooksLU):
220 """Activate the master IP on the master node. 221 222 """
223 - def Exec(self, feedback_fn):
224 """Activate the master IP. 225 226 """ 227 master_params = self.cfg.GetMasterNetworkParameters() 228 ems = self.cfg.GetUseExternalMipScript() 229 result = self.rpc.call_node_activate_master_ip(master_params.uuid, 230 master_params, ems) 231 result.Raise("Could not activate the master IP")
232
233 234 -class LUClusterDeactivateMasterIp(NoHooksLU):
235 """Deactivate the master IP on the master node. 236 237 """
238 - def Exec(self, feedback_fn):
239 """Deactivate the master IP. 240 241 """ 242 master_params = self.cfg.GetMasterNetworkParameters() 243 ems = self.cfg.GetUseExternalMipScript() 244 result = self.rpc.call_node_deactivate_master_ip(master_params.uuid, 245 master_params, ems) 246 result.Raise("Could not deactivate the master IP")
247
248 249 -class LUClusterConfigQuery(NoHooksLU):
250 """Return configuration values. 251 252 """ 253 REQ_BGL = False 254
255 - def CheckArguments(self):
256 self.cq = ClusterQuery(None, self.op.output_fields, False)
257
258 - def ExpandNames(self):
259 self.cq.ExpandNames(self)
260
261 - def DeclareLocks(self, level):
262 self.cq.DeclareLocks(self, level)
263
264 - def Exec(self, feedback_fn):
265 result = self.cq.OldStyleQuery(self) 266 267 assert len(result) == 1 268 269 return result[0]
270
271 272 -class LUClusterDestroy(LogicalUnit):
273 """Logical unit for destroying the cluster. 274 275 """ 276 HPATH = "cluster-destroy" 277 HTYPE = constants.HTYPE_CLUSTER 278 279 # Read by the job queue to detect when the cluster is gone and job files will 280 # never be available. 281 # FIXME: This variable should be removed together with the Python job queue. 282 clusterHasBeenDestroyed = False 283
284 - def BuildHooksEnv(self):
285 """Build hooks env. 286 287 """ 288 return { 289 "OP_TARGET": self.cfg.GetClusterName(), 290 }
291
292 - def BuildHooksNodes(self):
293 """Build hooks nodes. 294 295 """ 296 return ([], [])
297
298 - def CheckPrereq(self):
299 """Check prerequisites. 300 301 This checks whether the cluster is empty. 302 303 Any errors are signaled by raising errors.OpPrereqError. 304 305 """ 306 master = self.cfg.GetMasterNode() 307 308 nodelist = self.cfg.GetNodeList() 309 if len(nodelist) != 1 or nodelist[0] != master: 310 raise errors.OpPrereqError("There are still %d node(s) in" 311 " this cluster." % (len(nodelist) - 1), 312 errors.ECODE_INVAL) 313 instancelist = self.cfg.GetInstanceList() 314 if instancelist: 315 raise errors.OpPrereqError("There are still %d instance(s) in" 316 " this cluster." % len(instancelist), 317 errors.ECODE_INVAL)
318
319 - def Exec(self, feedback_fn):
320 """Destroys the cluster. 321 322 """ 323 master_params = self.cfg.GetMasterNetworkParameters() 324 325 # Run post hooks on master node before it's removed 326 RunPostHook(self, master_params.uuid) 327 328 ems = self.cfg.GetUseExternalMipScript() 329 result = self.rpc.call_node_deactivate_master_ip(master_params.uuid, 330 master_params, ems) 331 result.Warn("Error disabling the master IP address", self.LogWarning) 332 333 self.wconfd.Client().PrepareClusterDestruction(self.wconfdcontext) 334 335 # signal to the job queue that the cluster is gone 336 LUClusterDestroy.clusterHasBeenDestroyed = True 337 338 return master_params.uuid
339
340 341 -class LUClusterPostInit(LogicalUnit):
342 """Logical unit for running hooks after cluster initialization. 343 344 """ 345 HPATH = "cluster-init" 346 HTYPE = constants.HTYPE_CLUSTER 347
348 - def CheckArguments(self):
349 self.master_uuid = self.cfg.GetMasterNode() 350 self.master_ndparams = self.cfg.GetNdParams(self.cfg.GetMasterNodeInfo()) 351 352 # TODO: When Issue 584 is solved, and None is properly parsed when used 353 # as a default value, ndparams.get(.., None) can be changed to 354 # ndparams[..] to access the values directly 355 356 # OpenvSwitch: Warn user if link is missing 357 if (self.master_ndparams[constants.ND_OVS] and not 358 self.master_ndparams.get(constants.ND_OVS_LINK, None)): 359 self.LogInfo("No physical interface for OpenvSwitch was given." 360 " OpenvSwitch will not have an outside connection. This" 361 " might not be what you want.")
362
363 - def BuildHooksEnv(self):
364 """Build hooks env. 365 366 """ 367 return { 368 "OP_TARGET": self.cfg.GetClusterName(), 369 }
370
371 - def BuildHooksNodes(self):
372 """Build hooks nodes. 373 374 """ 375 return ([], [self.cfg.GetMasterNode()])
376
377 - def Exec(self, feedback_fn):
378 """Create and configure Open vSwitch 379 380 """ 381 if self.master_ndparams[constants.ND_OVS]: 382 result = self.rpc.call_node_configure_ovs( 383 self.master_uuid, 384 self.master_ndparams[constants.ND_OVS_NAME], 385 self.master_ndparams.get(constants.ND_OVS_LINK, None)) 386 result.Raise("Could not successully configure Open vSwitch") 387 388 return True
389
390 391 -class ClusterQuery(QueryBase):
392 FIELDS = query.CLUSTER_FIELDS 393 394 #: Do not sort (there is only one item) 395 SORT_FIELD = None 396
397 - def ExpandNames(self, lu):
398 lu.needed_locks = {} 399 400 # The following variables interact with _QueryBase._GetNames 401 self.wanted = locking.ALL_SET 402 self.do_locking = self.use_locking 403 404 if self.do_locking: 405 raise errors.OpPrereqError("Can not use locking for cluster queries", 406 errors.ECODE_INVAL)
407
408 - def DeclareLocks(self, lu, level):
409 pass
410
411 - def _GetQueryData(self, lu):
412 """Computes the list of nodes and their attributes. 413 414 """ 415 if query.CQ_CONFIG in self.requested_data: 416 cluster = lu.cfg.GetClusterInfo() 417 nodes = lu.cfg.GetAllNodesInfo() 418 else: 419 cluster = NotImplemented 420 nodes = NotImplemented 421 422 if query.CQ_QUEUE_DRAINED in self.requested_data: 423 drain_flag = os.path.exists(pathutils.JOB_QUEUE_DRAIN_FILE) 424 else: 425 drain_flag = NotImplemented 426 427 if query.CQ_WATCHER_PAUSE in self.requested_data: 428 master_node_uuid = lu.cfg.GetMasterNode() 429 430 result = lu.rpc.call_get_watcher_pause(master_node_uuid) 431 result.Raise("Can't retrieve watcher pause from master node '%s'" % 432 lu.cfg.GetMasterNodeName()) 433 434 watcher_pause = result.payload 435 else: 436 watcher_pause = NotImplemented 437 438 return query.ClusterQueryData(cluster, nodes, drain_flag, watcher_pause)
439
440 441 -class LUClusterQuery(NoHooksLU):
442 """Query cluster configuration. 443 444 """ 445 REQ_BGL = False 446
447 - def ExpandNames(self):
448 self.needed_locks = {}
449
450 - def Exec(self, feedback_fn):
451 """Return cluster config. 452 453 """ 454 cluster = self.cfg.GetClusterInfo() 455 os_hvp = {} 456 457 # Filter just for enabled hypervisors 458 for os_name, hv_dict in cluster.os_hvp.items(): 459 os_hvp[os_name] = {} 460 for hv_name, hv_params in hv_dict.items(): 461 if hv_name in cluster.enabled_hypervisors: 462 os_hvp[os_name][hv_name] = hv_params 463 464 # Convert ip_family to ip_version 465 primary_ip_version = constants.IP4_VERSION 466 if cluster.primary_ip_family == netutils.IP6Address.family: 467 primary_ip_version = constants.IP6_VERSION 468 469 result = { 470 "software_version": constants.RELEASE_VERSION, 471 "protocol_version": constants.PROTOCOL_VERSION, 472 "config_version": constants.CONFIG_VERSION, 473 "os_api_version": max(constants.OS_API_VERSIONS), 474 "export_version": constants.EXPORT_VERSION, 475 "vcs_version": constants.VCS_VERSION, 476 "architecture": runtime.GetArchInfo(), 477 "name": cluster.cluster_name, 478 "master": self.cfg.GetMasterNodeName(), 479 "default_hypervisor": cluster.primary_hypervisor, 480 "enabled_hypervisors": cluster.enabled_hypervisors, 481 "hvparams": dict([(hypervisor_name, cluster.hvparams[hypervisor_name]) 482 for hypervisor_name in cluster.enabled_hypervisors]), 483 "os_hvp": os_hvp, 484 "beparams": cluster.beparams, 485 "osparams": cluster.osparams, 486 "ipolicy": cluster.ipolicy, 487 "nicparams": cluster.nicparams, 488 "ndparams": cluster.ndparams, 489 "diskparams": cluster.diskparams, 490 "candidate_pool_size": cluster.candidate_pool_size, 491 "max_running_jobs": cluster.max_running_jobs, 492 "max_tracked_jobs": cluster.max_tracked_jobs, 493 "mac_prefix": cluster.mac_prefix, 494 "master_netdev": cluster.master_netdev, 495 "master_netmask": cluster.master_netmask, 496 "use_external_mip_script": cluster.use_external_mip_script, 497 "volume_group_name": cluster.volume_group_name, 498 "drbd_usermode_helper": cluster.drbd_usermode_helper, 499 "file_storage_dir": cluster.file_storage_dir, 500 "shared_file_storage_dir": cluster.shared_file_storage_dir, 501 "maintain_node_health": cluster.maintain_node_health, 502 "ctime": cluster.ctime, 503 "mtime": cluster.mtime, 504 "uuid": cluster.uuid, 505 "tags": list(cluster.GetTags()), 506 "uid_pool": cluster.uid_pool, 507 "default_iallocator": cluster.default_iallocator, 508 "default_iallocator_params": cluster.default_iallocator_params, 509 "reserved_lvs": cluster.reserved_lvs, 510 "primary_ip_version": primary_ip_version, 511 "prealloc_wipe_disks": cluster.prealloc_wipe_disks, 512 "hidden_os": cluster.hidden_os, 513 "blacklisted_os": cluster.blacklisted_os, 514 "enabled_disk_templates": cluster.enabled_disk_templates, 515 "install_image": cluster.install_image, 516 "instance_communication_network": cluster.instance_communication_network, 517 "compression_tools": cluster.compression_tools, 518 "enabled_user_shutdown": cluster.enabled_user_shutdown, 519 } 520 521 return result
522
523 524 -class LUClusterRedistConf(NoHooksLU):
525 """Force the redistribution of cluster configuration. 526 527 This is a very simple LU. 528 529 """ 530 REQ_BGL = False 531
532 - def ExpandNames(self):
533 self.needed_locks = { 534 locking.LEVEL_NODE: locking.ALL_SET, 535 } 536 self.share_locks = ShareAll()
537
538 - def Exec(self, feedback_fn):
539 """Redistribute the configuration. 540 541 """ 542 self.cfg.Update(self.cfg.GetClusterInfo(), feedback_fn) 543 RedistributeAncillaryFiles(self)
544
545 546 -class LUClusterRename(LogicalUnit):
547 """Rename the cluster. 548 549 """ 550 HPATH = "cluster-rename" 551 HTYPE = constants.HTYPE_CLUSTER 552
553 - def BuildHooksEnv(self):
554 """Build hooks env. 555 556 """ 557 return { 558 "OP_TARGET": self.cfg.GetClusterName(), 559 "NEW_NAME": self.op.name, 560 }
561
562 - def BuildHooksNodes(self):
563 """Build hooks nodes. 564 565 """ 566 return ([self.cfg.GetMasterNode()], self.cfg.GetNodeList())
567
568 - def CheckPrereq(self):
569 """Verify that the passed name is a valid one. 570 571 """ 572 hostname = netutils.GetHostname(name=self.op.name, 573 family=self.cfg.GetPrimaryIPFamily()) 574 575 new_name = hostname.name 576 self.ip = new_ip = hostname.ip 577 old_name = self.cfg.GetClusterName() 578 old_ip = self.cfg.GetMasterIP() 579 if new_name == old_name and new_ip == old_ip: 580 raise errors.OpPrereqError("Neither the name nor the IP address of the" 581 " cluster has changed", 582 errors.ECODE_INVAL) 583 if new_ip != old_ip: 584 if netutils.TcpPing(new_ip, constants.DEFAULT_NODED_PORT): 585 raise errors.OpPrereqError("The given cluster IP address (%s) is" 586 " reachable on the network" % 587 new_ip, errors.ECODE_NOTUNIQUE) 588 589 self.op.name = new_name
590
591 - def Exec(self, feedback_fn):
592 """Rename the cluster. 593 594 """ 595 clustername = self.op.name 596 new_ip = self.ip 597 598 # shutdown the master IP 599 master_params = self.cfg.GetMasterNetworkParameters() 600 ems = self.cfg.GetUseExternalMipScript() 601 result = self.rpc.call_node_deactivate_master_ip(master_params.uuid, 602 master_params, ems) 603 result.Raise("Could not disable the master role") 604 605 try: 606 cluster = self.cfg.GetClusterInfo() 607 cluster.cluster_name = clustername 608 cluster.master_ip = new_ip 609 self.cfg.Update(cluster, feedback_fn) 610 611 # update the known hosts file 612 ssh.WriteKnownHostsFile(self.cfg, pathutils.SSH_KNOWN_HOSTS_FILE) 613 node_list = self.cfg.GetOnlineNodeList() 614 try: 615 node_list.remove(master_params.uuid) 616 except ValueError: 617 pass 618 UploadHelper(self, node_list, pathutils.SSH_KNOWN_HOSTS_FILE) 619 finally: 620 master_params.ip = new_ip 621 result = self.rpc.call_node_activate_master_ip(master_params.uuid, 622 master_params, ems) 623 result.Warn("Could not re-enable the master role on the master," 624 " please restart manually", self.LogWarning) 625 626 return clustername
627
628 629 -class LUClusterRepairDiskSizes(NoHooksLU):
630 """Verifies the cluster disks sizes. 631 632 """ 633 REQ_BGL = False 634
635 - def ExpandNames(self):
636 if self.op.instances: 637 (_, self.wanted_names) = GetWantedInstances(self, self.op.instances) 638 # Not getting the node allocation lock as only a specific set of 639 # instances (and their nodes) is going to be acquired 640 self.needed_locks = { 641 locking.LEVEL_NODE_RES: [], 642 locking.LEVEL_INSTANCE: self.wanted_names, 643 } 644 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE 645 else: 646 self.wanted_names = None 647 self.needed_locks = { 648 locking.LEVEL_NODE_RES: locking.ALL_SET, 649 locking.LEVEL_INSTANCE: locking.ALL_SET, 650 } 651 652 self.share_locks = { 653 locking.LEVEL_NODE_RES: 1, 654 locking.LEVEL_INSTANCE: 0, 655 }
656
657 - def DeclareLocks(self, level):
658 if level == locking.LEVEL_NODE_RES and self.wanted_names is not None: 659 self._LockInstancesNodes(primary_only=True, level=level)
660
661 - def CheckPrereq(self):
662 """Check prerequisites. 663 664 This only checks the optional instance list against the existing names. 665 666 """ 667 if self.wanted_names is None: 668 self.wanted_names = self.owned_locks(locking.LEVEL_INSTANCE) 669 670 self.wanted_instances = \ 671 map(compat.snd, self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
672
673 - def _EnsureChildSizes(self, disk):
674 """Ensure children of the disk have the needed disk size. 675 676 This is valid mainly for DRBD8 and fixes an issue where the 677 children have smaller disk size. 678 679 @param disk: an L{ganeti.objects.Disk} object 680 681 """ 682 if disk.dev_type == constants.DT_DRBD8: 683 assert disk.children, "Empty children for DRBD8?" 684 fchild = disk.children[0] 685 mismatch = fchild.size < disk.size 686 if mismatch: 687 self.LogInfo("Child disk has size %d, parent %d, fixing", 688 fchild.size, disk.size) 689 fchild.size = disk.size 690 691 # and we recurse on this child only, not on the metadev 692 return self._EnsureChildSizes(fchild) or mismatch 693 else: 694 return False
695
696 - def Exec(self, feedback_fn):
697 """Verify the size of cluster disks. 698 699 """ 700 # TODO: check child disks too 701 # TODO: check differences in size between primary/secondary nodes 702 per_node_disks = {} 703 for instance in self.wanted_instances: 704 pnode = instance.primary_node 705 if pnode not in per_node_disks: 706 per_node_disks[pnode] = [] 707 for idx, disk in enumerate(self.cfg.GetInstanceDisks(instance.uuid)): 708 per_node_disks[pnode].append((instance, idx, disk)) 709 710 assert not (frozenset(per_node_disks.keys()) - 711 frozenset(self.owned_locks(locking.LEVEL_NODE_RES))), \ 712 "Not owning correct locks" 713 assert not self.owned_locks(locking.LEVEL_NODE) 714 715 es_flags = rpc.GetExclusiveStorageForNodes(self.cfg, 716 per_node_disks.keys()) 717 718 changed = [] 719 for node_uuid, dskl in per_node_disks.items(): 720 if not dskl: 721 # no disks on the node 722 continue 723 724 newl = [([v[2].Copy()], v[0]) for v in dskl] 725 node_name = self.cfg.GetNodeName(node_uuid) 726 result = self.rpc.call_blockdev_getdimensions(node_uuid, newl) 727 if result.fail_msg: 728 self.LogWarning("Failure in blockdev_getdimensions call to node" 729 " %s, ignoring", node_name) 730 continue 731 if len(result.payload) != len(dskl): 732 logging.warning("Invalid result from node %s: len(dksl)=%d," 733 " result.payload=%s", node_name, len(dskl), 734 result.payload) 735 self.LogWarning("Invalid result from node %s, ignoring node results", 736 node_name) 737 continue 738 for ((instance, idx, disk), dimensions) in zip(dskl, result.payload): 739 if dimensions is None: 740 self.LogWarning("Disk %d of instance %s did not return size" 741 " information, ignoring", idx, instance.name) 742 continue 743 if not isinstance(dimensions, (tuple, list)): 744 self.LogWarning("Disk %d of instance %s did not return valid" 745 " dimension information, ignoring", idx, 746 instance.name) 747 continue 748 (size, spindles) = dimensions 749 if not isinstance(size, (int, long)): 750 self.LogWarning("Disk %d of instance %s did not return valid" 751 " size information, ignoring", idx, instance.name) 752 continue 753 size = size >> 20 754 if size != disk.size: 755 self.LogInfo("Disk %d of instance %s has mismatched size," 756 " correcting: recorded %d, actual %d", idx, 757 instance.name, disk.size, size) 758 disk.size = size 759 self.cfg.Update(disk, feedback_fn) 760 changed.append((instance.name, idx, "size", size)) 761 if es_flags[node_uuid]: 762 if spindles is None: 763 self.LogWarning("Disk %d of instance %s did not return valid" 764 " spindles information, ignoring", idx, 765 instance.name) 766 elif disk.spindles is None or disk.spindles != spindles: 767 self.LogInfo("Disk %d of instance %s has mismatched spindles," 768 " correcting: recorded %s, actual %s", 769 idx, instance.name, disk.spindles, spindles) 770 disk.spindles = spindles 771 self.cfg.Update(disk, feedback_fn) 772 changed.append((instance.name, idx, "spindles", disk.spindles)) 773 if self._EnsureChildSizes(disk): 774 self.cfg.Update(disk, feedback_fn) 775 changed.append((instance.name, idx, "size", disk.size)) 776 return changed
777
778 779 -def _ValidateNetmask(cfg, netmask):
780 """Checks if a netmask is valid. 781 782 @type cfg: L{config.ConfigWriter} 783 @param cfg: cluster configuration 784 @type netmask: int 785 @param netmask: netmask to be verified 786 @raise errors.OpPrereqError: if the validation fails 787 788 """ 789 ip_family = cfg.GetPrimaryIPFamily() 790 try: 791 ipcls = netutils.IPAddress.GetClassFromIpFamily(ip_family) 792 except errors.ProgrammerError: 793 raise errors.OpPrereqError("Invalid primary ip family: %s." % 794 ip_family, errors.ECODE_INVAL) 795 if not ipcls.ValidateNetmask(netmask): 796 raise errors.OpPrereqError("CIDR netmask (%s) not valid" % 797 (netmask), errors.ECODE_INVAL)
798
799 800 -def CheckFileBasedStoragePathVsEnabledDiskTemplates( 801 logging_warn_fn, file_storage_dir, enabled_disk_templates, 802 file_disk_template):
803 """Checks whether the given file-based storage directory is acceptable. 804 805 Note: This function is public, because it is also used in bootstrap.py. 806 807 @type logging_warn_fn: function 808 @param logging_warn_fn: function which accepts a string and logs it 809 @type file_storage_dir: string 810 @param file_storage_dir: the directory to be used for file-based instances 811 @type enabled_disk_templates: list of string 812 @param enabled_disk_templates: the list of enabled disk templates 813 @type file_disk_template: string 814 @param file_disk_template: the file-based disk template for which the 815 path should be checked 816 817 """ 818 assert (file_disk_template in utils.storage.GetDiskTemplatesOfStorageTypes( 819 constants.ST_FILE, constants.ST_SHARED_FILE, constants.ST_GLUSTER 820 )) 821 822 file_storage_enabled = file_disk_template in enabled_disk_templates 823 if file_storage_dir is not None: 824 if file_storage_dir == "": 825 if file_storage_enabled: 826 raise errors.OpPrereqError( 827 "Unsetting the '%s' storage directory while having '%s' storage" 828 " enabled is not permitted." % 829 (file_disk_template, file_disk_template), 830 errors.ECODE_INVAL) 831 else: 832 if not file_storage_enabled: 833 logging_warn_fn( 834 "Specified a %s storage directory, although %s storage is not" 835 " enabled." % (file_disk_template, file_disk_template)) 836 else: 837 raise errors.ProgrammerError("Received %s storage dir with value" 838 " 'None'." % file_disk_template)
839
840 841 -def CheckFileStoragePathVsEnabledDiskTemplates( 842 logging_warn_fn, file_storage_dir, enabled_disk_templates):
843 """Checks whether the given file storage directory is acceptable. 844 845 @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates} 846 847 """ 848 CheckFileBasedStoragePathVsEnabledDiskTemplates( 849 logging_warn_fn, file_storage_dir, enabled_disk_templates, 850 constants.DT_FILE)
851
852 853 -def CheckSharedFileStoragePathVsEnabledDiskTemplates( 854 logging_warn_fn, file_storage_dir, enabled_disk_templates):
855 """Checks whether the given shared file storage directory is acceptable. 856 857 @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates} 858 859 """ 860 CheckFileBasedStoragePathVsEnabledDiskTemplates( 861 logging_warn_fn, file_storage_dir, enabled_disk_templates, 862 constants.DT_SHARED_FILE)
863
864 865 -def CheckGlusterStoragePathVsEnabledDiskTemplates( 866 logging_warn_fn, file_storage_dir, enabled_disk_templates):
867 """Checks whether the given gluster storage directory is acceptable. 868 869 @see: C{CheckFileBasedStoragePathVsEnabledDiskTemplates} 870 871 """ 872 CheckFileBasedStoragePathVsEnabledDiskTemplates( 873 logging_warn_fn, file_storage_dir, enabled_disk_templates, 874 constants.DT_GLUSTER)
875
876 877 -def CheckCompressionTools(tools):
878 """Check whether the provided compression tools look like executables. 879 880 @type tools: list of string 881 @param tools: The tools provided as opcode input 882 883 """ 884 regex = re.compile('^[-_a-zA-Z0-9]+$') 885 illegal_tools = [t for t in tools if not regex.match(t)] 886 887 if illegal_tools: 888 raise errors.OpPrereqError( 889 "The tools '%s' contain illegal characters: only alphanumeric values," 890 " dashes, and underscores are allowed" % ", ".join(illegal_tools), 891 errors.ECODE_INVAL 892 ) 893 894 if constants.IEC_GZIP not in tools: 895 raise errors.OpPrereqError("For compatibility reasons, the %s utility must" 896 " be present among the compression tools" % 897 constants.IEC_GZIP, errors.ECODE_INVAL) 898 899 if constants.IEC_NONE in tools: 900 raise errors.OpPrereqError("%s is a reserved value used for no compression," 901 " and cannot be used as the name of a tool" % 902 constants.IEC_NONE, errors.ECODE_INVAL)
903
904 905 -class LUClusterSetParams(LogicalUnit):
906 """Change the parameters of the cluster. 907 908 """ 909 HPATH = "cluster-modify" 910 HTYPE = constants.HTYPE_CLUSTER 911 REQ_BGL = False 912
913 - def CheckArguments(self):
914 """Check parameters 915 916 """ 917 if self.op.uid_pool: 918 uidpool.CheckUidPool(self.op.uid_pool) 919 920 if self.op.add_uids: 921 uidpool.CheckUidPool(self.op.add_uids) 922 923 if self.op.remove_uids: 924 uidpool.CheckUidPool(self.op.remove_uids) 925 926 if self.op.mac_prefix: 927 self.op.mac_prefix = \ 928 utils.NormalizeAndValidateThreeOctetMacPrefix(self.op.mac_prefix) 929 930 if self.op.master_netmask is not None: 931 _ValidateNetmask(self.cfg, self.op.master_netmask) 932 933 if self.op.diskparams: 934 for dt_params in self.op.diskparams.values(): 935 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES) 936 try: 937 utils.VerifyDictOptions(self.op.diskparams, constants.DISK_DT_DEFAULTS) 938 CheckDiskAccessModeValidity(self.op.diskparams) 939 except errors.OpPrereqError, err: 940 raise errors.OpPrereqError("While verify diskparams options: %s" % err, 941 errors.ECODE_INVAL) 942 943 if self.op.install_image is not None: 944 CheckImageValidity(self.op.install_image, 945 "Install image must be an absolute path or a URL")
946
947 - def ExpandNames(self):
948 # FIXME: in the future maybe other cluster params won't require checking on 949 # all nodes to be modified. 950 # FIXME: This opcode changes cluster-wide settings. Is acquiring all 951 # resource locks the right thing, shouldn't it be the BGL instead? 952 self.needed_locks = { 953 locking.LEVEL_NODE: locking.ALL_SET, 954 locking.LEVEL_INSTANCE: locking.ALL_SET, 955 locking.LEVEL_NODEGROUP: locking.ALL_SET, 956 } 957 self.share_locks = ShareAll()
958
959 - def BuildHooksEnv(self):
960 """Build hooks env. 961 962 """ 963 return { 964 "OP_TARGET": self.cfg.GetClusterName(), 965 "NEW_VG_NAME": self.op.vg_name, 966 }
967
968 - def BuildHooksNodes(self):
969 """Build hooks nodes. 970 971 """ 972 mn = self.cfg.GetMasterNode() 973 return ([mn], [mn])
974
975 - def _CheckVgName(self, node_uuids, enabled_disk_templates, 976 new_enabled_disk_templates):
977 """Check the consistency of the vg name on all nodes and in case it gets 978 unset whether there are instances still using it. 979 980 """ 981 lvm_is_enabled = utils.IsLvmEnabled(enabled_disk_templates) 982 lvm_gets_enabled = utils.LvmGetsEnabled(enabled_disk_templates, 983 new_enabled_disk_templates) 984 current_vg_name = self.cfg.GetVGName() 985 986 if self.op.vg_name == '': 987 if lvm_is_enabled: 988 raise errors.OpPrereqError("Cannot unset volume group if lvm-based" 989 " disk templates are or get enabled.", 990 errors.ECODE_INVAL) 991 992 if self.op.vg_name is None: 993 if current_vg_name is None and lvm_is_enabled: 994 raise errors.OpPrereqError("Please specify a volume group when" 995 " enabling lvm-based disk-templates.", 996 errors.ECODE_INVAL) 997 998 if self.op.vg_name is not None and not self.op.vg_name: 999 if self.cfg.DisksOfType(constants.DT_PLAIN): 1000 raise errors.OpPrereqError("Cannot disable lvm storage while lvm-based" 1001 " instances exist", errors.ECODE_INVAL) 1002 1003 if (self.op.vg_name is not None and lvm_is_enabled) or \ 1004 (self.cfg.GetVGName() is not None and lvm_gets_enabled): 1005 self._CheckVgNameOnNodes(node_uuids)
1006
1007 - def _CheckVgNameOnNodes(self, node_uuids):
1008 """Check the status of the volume group on each node. 1009 1010 """ 1011 vglist = self.rpc.call_vg_list(node_uuids) 1012 for node_uuid in node_uuids: 1013 msg = vglist[node_uuid].fail_msg 1014 if msg: 1015 # ignoring down node 1016 self.LogWarning("Error while gathering data on node %s" 1017 " (ignoring node): %s", 1018 self.cfg.GetNodeName(node_uuid), msg) 1019 continue 1020 vgstatus = utils.CheckVolumeGroupSize(vglist[node_uuid].payload, 1021 self.op.vg_name, 1022 constants.MIN_VG_SIZE) 1023 if vgstatus: 1024 raise errors.OpPrereqError("Error on node '%s': %s" % 1025 (self.cfg.GetNodeName(node_uuid), vgstatus), 1026 errors.ECODE_ENVIRON)
1027 1028 @staticmethod
1029 - def _GetDiskTemplateSetsInner(op_enabled_disk_templates, 1030 old_enabled_disk_templates):
1031 """Computes three sets of disk templates. 1032 1033 @see: C{_GetDiskTemplateSets} for more details. 1034 1035 """ 1036 enabled_disk_templates = None 1037 new_enabled_disk_templates = [] 1038 disabled_disk_templates = [] 1039 if op_enabled_disk_templates: 1040 enabled_disk_templates = op_enabled_disk_templates 1041 new_enabled_disk_templates = \ 1042 list(set(enabled_disk_templates) 1043 - set(old_enabled_disk_templates)) 1044 disabled_disk_templates = \ 1045 list(set(old_enabled_disk_templates) 1046 - set(enabled_disk_templates)) 1047 else: 1048 enabled_disk_templates = old_enabled_disk_templates 1049 return (enabled_disk_templates, new_enabled_disk_templates, 1050 disabled_disk_templates)
1051
1052 - def _GetDiskTemplateSets(self, cluster):
1053 """Computes three sets of disk templates. 1054 1055 The three sets are: 1056 - disk templates that will be enabled after this operation (no matter if 1057 they were enabled before or not) 1058 - disk templates that get enabled by this operation (thus haven't been 1059 enabled before.) 1060 - disk templates that get disabled by this operation 1061 1062 """ 1063 return self._GetDiskTemplateSetsInner(self.op.enabled_disk_templates, 1064 cluster.enabled_disk_templates)
1065
1066 - def _CheckIpolicy(self, cluster, enabled_disk_templates):
1067 """Checks the ipolicy. 1068 1069 @type cluster: C{objects.Cluster} 1070 @param cluster: the cluster's configuration 1071 @type enabled_disk_templates: list of string 1072 @param enabled_disk_templates: list of (possibly newly) enabled disk 1073 templates 1074 1075 """ 1076 # FIXME: write unit tests for this 1077 if self.op.ipolicy: 1078 self.new_ipolicy = GetUpdatedIPolicy(cluster.ipolicy, self.op.ipolicy, 1079 group_policy=False) 1080 1081 CheckIpolicyVsDiskTemplates(self.new_ipolicy, 1082 enabled_disk_templates) 1083 1084 all_instances = self.cfg.GetAllInstancesInfo().values() 1085 violations = set() 1086 for group in self.cfg.GetAllNodeGroupsInfo().values(): 1087 instances = frozenset( 1088 [inst for inst in all_instances 1089 if compat.any(nuuid in group.members 1090 for nuuid in self.cfg.GetInstanceNodes(inst.uuid))]) 1091 new_ipolicy = objects.FillIPolicy(self.new_ipolicy, group.ipolicy) 1092 ipol = masterd.instance.CalculateGroupIPolicy(cluster, group) 1093 new = ComputeNewInstanceViolations(ipol, new_ipolicy, instances, 1094 self.cfg) 1095 if new: 1096 violations.update(new) 1097 1098 if violations: 1099 self.LogWarning("After the ipolicy change the following instances" 1100 " violate them: %s", 1101 utils.CommaJoin(utils.NiceSort(violations))) 1102 else: 1103 CheckIpolicyVsDiskTemplates(cluster.ipolicy, 1104 enabled_disk_templates)
1105
1106 - def _CheckDrbdHelperOnNodes(self, drbd_helper, node_uuids):
1107 """Checks whether the set DRBD helper actually exists on the nodes. 1108 1109 @type drbd_helper: string 1110 @param drbd_helper: path of the drbd usermode helper binary 1111 @type node_uuids: list of strings 1112 @param node_uuids: list of node UUIDs to check for the helper 1113 1114 """ 1115 # checks given drbd helper on all nodes 1116 helpers = self.rpc.call_drbd_helper(node_uuids) 1117 for (_, ninfo) in self.cfg.GetMultiNodeInfo(node_uuids): 1118 if ninfo.offline: 1119 self.LogInfo("Not checking drbd helper on offline node %s", 1120 ninfo.name) 1121 continue 1122 msg = helpers[ninfo.uuid].fail_msg 1123 if msg: 1124 raise errors.OpPrereqError("Error checking drbd helper on node" 1125 " '%s': %s" % (ninfo.name, msg), 1126 errors.ECODE_ENVIRON) 1127 node_helper = helpers[ninfo.uuid].payload 1128 if node_helper != drbd_helper: 1129 raise errors.OpPrereqError("Error on node '%s': drbd helper is %s" % 1130 (ninfo.name, node_helper), 1131 errors.ECODE_ENVIRON)
1132
1133 - def _CheckDrbdHelper(self, node_uuids, drbd_enabled, drbd_gets_enabled):
1134 """Check the DRBD usermode helper. 1135 1136 @type node_uuids: list of strings 1137 @param node_uuids: a list of nodes' UUIDs 1138 @type drbd_enabled: boolean 1139 @param drbd_enabled: whether DRBD will be enabled after this operation 1140 (no matter if it was disabled before or not) 1141 @type drbd_gets_enabled: boolen 1142 @param drbd_gets_enabled: true if DRBD was disabled before this 1143 operation, but will be enabled afterwards 1144 1145 """ 1146 if self.op.drbd_helper == '': 1147 if drbd_enabled: 1148 raise errors.OpPrereqError("Cannot disable drbd helper while" 1149 " DRBD is enabled.", errors.ECODE_STATE) 1150 if self.cfg.DisksOfType(constants.DT_DRBD8): 1151 raise errors.OpPrereqError("Cannot disable drbd helper while" 1152 " drbd-based instances exist", 1153 errors.ECODE_INVAL) 1154 1155 else: 1156 if self.op.drbd_helper is not None and drbd_enabled: 1157 self._CheckDrbdHelperOnNodes(self.op.drbd_helper, node_uuids) 1158 else: 1159 if drbd_gets_enabled: 1160 current_drbd_helper = self.cfg.GetClusterInfo().drbd_usermode_helper 1161 if current_drbd_helper is not None: 1162 self._CheckDrbdHelperOnNodes(current_drbd_helper, node_uuids) 1163 else: 1164 raise errors.OpPrereqError("Cannot enable DRBD without a" 1165 " DRBD usermode helper set.", 1166 errors.ECODE_STATE)
1167
1168 - def _CheckInstancesOfDisabledDiskTemplates( 1169 self, disabled_disk_templates):
1170 """Check whether we try to disable a disk template that is in use. 1171 1172 @type disabled_disk_templates: list of string 1173 @param disabled_disk_templates: list of disk templates that are going to 1174 be disabled by this operation 1175 1176 """ 1177 for disk_template in disabled_disk_templates: 1178 disks_with_type = self.cfg.DisksOfType(disk_template) 1179 if disks_with_type: 1180 disk_desc = [] 1181 for disk in disks_with_type: 1182 instance_uuid = self.cfg.GetInstanceForDisk(disk.uuid) 1183 instance = self.cfg.GetInstanceInfo(instance_uuid) 1184 if instance: 1185 instance_desc = "on " + instance.name 1186 else: 1187 instance_desc = "detached" 1188 disk_desc.append("%s (%s)" % (disk, instance_desc)) 1189 raise errors.OpPrereqError( 1190 "Cannot disable disk template '%s', because there is at least one" 1191 " disk using it:\n * %s" % (disk_template, "\n * ".join(disk_desc)), 1192 errors.ECODE_STATE) 1193 if constants.DT_DISKLESS in disabled_disk_templates: 1194 instances = self.cfg.GetAllInstancesInfo() 1195 for inst in instances.values(): 1196 if not inst.disks: 1197 raise errors.OpPrereqError( 1198 "Cannot disable disk template 'diskless', because there is at" 1199 " least one instance using it:\n * %s" % inst.name, 1200 errors.ECODE_STATE)
1201 1202 @staticmethod
1203 - def _CheckInstanceCommunicationNetwork(network, warning_fn):
1204 """Check whether an existing network is configured for instance 1205 communication. 1206 1207 Checks whether an existing network is configured with the 1208 parameters that are advisable for instance communication, and 1209 otherwise issue security warnings. 1210 1211 @type network: L{ganeti.objects.Network} 1212 @param network: L{ganeti.objects.Network} object whose 1213 configuration is being checked 1214 @type warning_fn: function 1215 @param warning_fn: function used to print warnings 1216 @rtype: None 1217 @return: None 1218 1219 """ 1220 def _MaybeWarn(err, val, default): 1221 if val != default: 1222 warning_fn("Supplied instance communication network '%s' %s '%s'," 1223 " this might pose a security risk (default is '%s').", 1224 network.name, err, val, default)
1225 1226 if network.network is None: 1227 raise errors.OpPrereqError("Supplied instance communication network '%s'" 1228 " must have an IPv4 network address.", 1229 network.name) 1230 1231 _MaybeWarn("has an IPv4 gateway", network.gateway, None) 1232 _MaybeWarn("has a non-standard IPv4 network address", network.network, 1233 constants.INSTANCE_COMMUNICATION_NETWORK4) 1234 _MaybeWarn("has an IPv6 gateway", network.gateway6, None) 1235 _MaybeWarn("has a non-standard IPv6 network address", network.network6, 1236 constants.INSTANCE_COMMUNICATION_NETWORK6) 1237 _MaybeWarn("has a non-standard MAC prefix", network.mac_prefix, 1238 constants.INSTANCE_COMMUNICATION_MAC_PREFIX)
1239
1240 - def CheckPrereq(self):
1241 """Check prerequisites. 1242 1243 This checks whether the given params don't conflict and 1244 if the given volume group is valid. 1245 1246 """ 1247 node_uuids = self.owned_locks(locking.LEVEL_NODE) 1248 self.cluster = cluster = self.cfg.GetClusterInfo() 1249 1250 vm_capable_node_uuids = [node.uuid 1251 for node in self.cfg.GetAllNodesInfo().values() 1252 if node.uuid in node_uuids and node.vm_capable] 1253 1254 (enabled_disk_templates, new_enabled_disk_templates, 1255 disabled_disk_templates) = self._GetDiskTemplateSets(cluster) 1256 self._CheckInstancesOfDisabledDiskTemplates(disabled_disk_templates) 1257 1258 self._CheckVgName(vm_capable_node_uuids, enabled_disk_templates, 1259 new_enabled_disk_templates) 1260 1261 if self.op.file_storage_dir is not None: 1262 CheckFileStoragePathVsEnabledDiskTemplates( 1263 self.LogWarning, self.op.file_storage_dir, enabled_disk_templates) 1264 1265 if self.op.shared_file_storage_dir is not None: 1266 CheckSharedFileStoragePathVsEnabledDiskTemplates( 1267 self.LogWarning, self.op.shared_file_storage_dir, 1268 enabled_disk_templates) 1269 1270 drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates 1271 drbd_gets_enabled = constants.DT_DRBD8 in new_enabled_disk_templates 1272 self._CheckDrbdHelper(vm_capable_node_uuids, 1273 drbd_enabled, drbd_gets_enabled) 1274 1275 # validate params changes 1276 if self.op.beparams: 1277 objects.UpgradeBeParams(self.op.beparams) 1278 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES) 1279 self.new_beparams = cluster.SimpleFillBE(self.op.beparams) 1280 1281 if self.op.ndparams: 1282 utils.ForceDictType(self.op.ndparams, constants.NDS_PARAMETER_TYPES) 1283 self.new_ndparams = cluster.SimpleFillND(self.op.ndparams) 1284 1285 # TODO: we need a more general way to handle resetting 1286 # cluster-level parameters to default values 1287 if self.new_ndparams["oob_program"] == "": 1288 self.new_ndparams["oob_program"] = \ 1289 constants.NDC_DEFAULTS[constants.ND_OOB_PROGRAM] 1290 1291 if self.op.hv_state: 1292 new_hv_state = MergeAndVerifyHvState(self.op.hv_state, 1293 self.cluster.hv_state_static) 1294 self.new_hv_state = dict((hv, cluster.SimpleFillHvState(values)) 1295 for hv, values in new_hv_state.items()) 1296 1297 if self.op.disk_state: 1298 new_disk_state = MergeAndVerifyDiskState(self.op.disk_state, 1299 self.cluster.disk_state_static) 1300 self.new_disk_state = \ 1301 dict((storage, dict((name, cluster.SimpleFillDiskState(values)) 1302 for name, values in svalues.items())) 1303 for storage, svalues in new_disk_state.items()) 1304 1305 self._CheckIpolicy(cluster, enabled_disk_templates) 1306 1307 if self.op.nicparams: 1308 utils.ForceDictType(self.op.nicparams, constants.NICS_PARAMETER_TYPES) 1309 self.new_nicparams = cluster.SimpleFillNIC(self.op.nicparams) 1310 objects.NIC.CheckParameterSyntax(self.new_nicparams) 1311 nic_errors = [] 1312 1313 # check all instances for consistency 1314 for instance in self.cfg.GetAllInstancesInfo().values(): 1315 for nic_idx, nic in enumerate(instance.nics): 1316 params_copy = copy.deepcopy(nic.nicparams) 1317 params_filled = objects.FillDict(self.new_nicparams, params_copy) 1318 1319 # check parameter syntax 1320 try: 1321 objects.NIC.CheckParameterSyntax(params_filled) 1322 except errors.ConfigurationError, err: 1323 nic_errors.append("Instance %s, nic/%d: %s" % 1324 (instance.name, nic_idx, err)) 1325 1326 # if we're moving instances to routed, check that they have an ip 1327 target_mode = params_filled[constants.NIC_MODE] 1328 if target_mode == constants.NIC_MODE_ROUTED and not nic.ip: 1329 nic_errors.append("Instance %s, nic/%d: routed NIC with no ip" 1330 " address" % (instance.name, nic_idx)) 1331 if nic_errors: 1332 raise errors.OpPrereqError("Cannot apply the change, errors:\n%s" % 1333 "\n".join(nic_errors), errors.ECODE_INVAL) 1334 1335 # hypervisor list/parameters 1336 self.new_hvparams = new_hvp = objects.FillDict(cluster.hvparams, {}) 1337 if self.op.hvparams: 1338 for hv_name, hv_dict in self.op.hvparams.items(): 1339 if hv_name not in self.new_hvparams: 1340 self.new_hvparams[hv_name] = hv_dict 1341 else: 1342 self.new_hvparams[hv_name].update(hv_dict) 1343 1344 # disk template parameters 1345 self.new_diskparams = objects.FillDict(cluster.diskparams, {}) 1346 if self.op.diskparams: 1347 for dt_name, dt_params in self.op.diskparams.items(): 1348 if dt_name not in self.new_diskparams: 1349 self.new_diskparams[dt_name] = dt_params 1350 else: 1351 self.new_diskparams[dt_name].update(dt_params) 1352 CheckDiskAccessModeConsistency(self.op.diskparams, self.cfg) 1353 1354 # os hypervisor parameters 1355 self.new_os_hvp = objects.FillDict(cluster.os_hvp, {}) 1356 if self.op.os_hvp: 1357 for os_name, hvs in self.op.os_hvp.items(): 1358 if os_name not in self.new_os_hvp: 1359 self.new_os_hvp[os_name] = hvs 1360 else: 1361 for hv_name, hv_dict in hvs.items(): 1362 if hv_dict is None: 1363 # Delete if it exists 1364 self.new_os_hvp[os_name].pop(hv_name, None) 1365 elif hv_name not in self.new_os_hvp[os_name]: 1366 self.new_os_hvp[os_name][hv_name] = hv_dict 1367 else: 1368 self.new_os_hvp[os_name][hv_name].update(hv_dict) 1369 1370 # os parameters 1371 self._BuildOSParams(cluster) 1372 1373 # changes to the hypervisor list 1374 if self.op.enabled_hypervisors is not None: 1375 for hv in self.op.enabled_hypervisors: 1376 # if the hypervisor doesn't already exist in the cluster 1377 # hvparams, we initialize it to empty, and then (in both 1378 # cases) we make sure to fill the defaults, as we might not 1379 # have a complete defaults list if the hypervisor wasn't 1380 # enabled before 1381 if hv not in new_hvp: 1382 new_hvp[hv] = {} 1383 new_hvp[hv] = objects.FillDict(constants.HVC_DEFAULTS[hv], new_hvp[hv]) 1384 utils.ForceDictType(new_hvp[hv], constants.HVS_PARAMETER_TYPES) 1385 1386 if self.op.hvparams or self.op.enabled_hypervisors is not None: 1387 # either the enabled list has changed, or the parameters have, validate 1388 for hv_name, hv_params in self.new_hvparams.items(): 1389 if ((self.op.hvparams and hv_name in self.op.hvparams) or 1390 (self.op.enabled_hypervisors and 1391 hv_name in self.op.enabled_hypervisors)): 1392 # either this is a new hypervisor, or its parameters have changed 1393 hv_class = hypervisor.GetHypervisorClass(hv_name) 1394 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) 1395 hv_class.CheckParameterSyntax(hv_params) 1396 CheckHVParams(self, node_uuids, hv_name, hv_params) 1397 1398 if self.op.os_hvp: 1399 # no need to check any newly-enabled hypervisors, since the 1400 # defaults have already been checked in the above code-block 1401 for os_name, os_hvp in self.new_os_hvp.items(): 1402 for hv_name, hv_params in os_hvp.items(): 1403 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) 1404 # we need to fill in the new os_hvp on top of the actual hv_p 1405 cluster_defaults = self.new_hvparams.get(hv_name, {}) 1406 new_osp = objects.FillDict(cluster_defaults, hv_params) 1407 hv_class = hypervisor.GetHypervisorClass(hv_name) 1408 hv_class.CheckParameterSyntax(new_osp) 1409 CheckHVParams(self, node_uuids, hv_name, new_osp) 1410 1411 if self.op.default_iallocator: 1412 alloc_script = utils.FindFile(self.op.default_iallocator, 1413 constants.IALLOCATOR_SEARCH_PATH, 1414 os.path.isfile) 1415 if alloc_script is None: 1416 raise errors.OpPrereqError("Invalid default iallocator script '%s'" 1417 " specified" % self.op.default_iallocator, 1418 errors.ECODE_INVAL) 1419 1420 if self.op.instance_communication_network: 1421 network_name = self.op.instance_communication_network 1422 1423 try: 1424 network_uuid = self.cfg.LookupNetwork(network_name) 1425 except errors.OpPrereqError: 1426 network_uuid = None 1427 1428 if network_uuid is not None: 1429 network = self.cfg.GetNetwork(network_uuid) 1430 self._CheckInstanceCommunicationNetwork(network, self.LogWarning) 1431 1432 if self.op.compression_tools: 1433 CheckCompressionTools(self.op.compression_tools)
1434
1435 - def _BuildOSParams(self, cluster):
1436 "Calculate the new OS parameters for this operation." 1437 1438 def _GetNewParams(source, new_params): 1439 "Wrapper around GetUpdatedParams." 1440 if new_params is None: 1441 return source 1442 result = objects.FillDict(source, {}) # deep copy of source 1443 for os_name in new_params: 1444 result[os_name] = GetUpdatedParams(result.get(os_name, {}), 1445 new_params[os_name], 1446 use_none=True) 1447 if not result[os_name]: 1448 del result[os_name] # we removed all parameters 1449 return result
1450 1451 self.new_osp = _GetNewParams(cluster.osparams, 1452 self.op.osparams) 1453 self.new_osp_private = _GetNewParams(cluster.osparams_private_cluster, 1454 self.op.osparams_private_cluster) 1455 1456 # Remove os validity check 1457 changed_oses = (set(self.new_osp.keys()) | set(self.new_osp_private.keys())) 1458 for os_name in changed_oses: 1459 os_params = cluster.SimpleFillOS( 1460 os_name, 1461 self.new_osp.get(os_name, {}), 1462 os_params_private=self.new_osp_private.get(os_name, {}) 1463 ) 1464 # check the parameter validity (remote check) 1465 CheckOSParams(self, False, [self.cfg.GetMasterNode()], 1466 os_name, os_params, False) 1467
1468 - def _SetVgName(self, feedback_fn):
1469 """Determines and sets the new volume group name. 1470 1471 """ 1472 if self.op.vg_name is not None: 1473 new_volume = self.op.vg_name 1474 if not new_volume: 1475 new_volume = None 1476 if new_volume != self.cfg.GetVGName(): 1477 self.cfg.SetVGName(new_volume) 1478 else: 1479 feedback_fn("Cluster LVM configuration already in desired" 1480 " state, not changing")
1481
1482 - def _SetDiagnoseDataCollectorFilename(self, feedback_fn):
1483 """Determines and sets the filename of the script 1484 diagnose data collector should run. 1485 1486 """ 1487 if self.op.diagnose_data_collector_filename is not None: 1488 fn = self.op.diagnose_data_collector_filename 1489 if fn != self.cfg.GetDiagnoseDataCollectorFilename(): 1490 self.cfg.SetDiagnoseDataCollectorFilename(fn) 1491 else: 1492 feedback_fn("Diagnose data collector filename" 1493 " configuration already in desired" 1494 " state, not changing")
1495
1496 - def _SetFileStorageDir(self, feedback_fn):
1497 """Set the file storage directory. 1498 1499 """ 1500 if self.op.file_storage_dir is not None: 1501 if self.cluster.file_storage_dir == self.op.file_storage_dir: 1502 feedback_fn("Global file storage dir already set to value '%s'" 1503 % self.cluster.file_storage_dir) 1504 else: 1505 self.cluster.file_storage_dir = self.op.file_storage_dir
1506
1507 - def _SetSharedFileStorageDir(self, feedback_fn):
1508 """Set the shared file storage directory. 1509 1510 """ 1511 if self.op.shared_file_storage_dir is not None: 1512 if self.cluster.shared_file_storage_dir == \ 1513 self.op.shared_file_storage_dir: 1514 feedback_fn("Global shared file storage dir already set to value '%s'" 1515 % self.cluster.shared_file_storage_dir) 1516 else: 1517 self.cluster.shared_file_storage_dir = self.op.shared_file_storage_dir
1518
1519 - def _SetDrbdHelper(self, feedback_fn):
1520 """Set the DRBD usermode helper. 1521 1522 """ 1523 if self.op.drbd_helper is not None: 1524 if not constants.DT_DRBD8 in self.cluster.enabled_disk_templates: 1525 feedback_fn("Note that you specified a drbd user helper, but did not" 1526 " enable the drbd disk template.") 1527 new_helper = self.op.drbd_helper 1528 if not new_helper: 1529 new_helper = None 1530 if new_helper != self.cfg.GetDRBDHelper(): 1531 self.cfg.SetDRBDHelper(new_helper) 1532 else: 1533 feedback_fn("Cluster DRBD helper already in desired state," 1534 " not changing")
1535 1536 @staticmethod
1537 - def _EnsureInstanceCommunicationNetwork(cfg, network_name):
1538 """Ensure that the instance communication network exists and is 1539 connected to all groups. 1540 1541 The instance communication network given by L{network_name} it is 1542 created, if necessary, via the opcode 'OpNetworkAdd'. Also, the 1543 instance communication network is connected to all existing node 1544 groups, if necessary, via the opcode 'OpNetworkConnect'. 1545 1546 @type cfg: L{config.ConfigWriter} 1547 @param cfg: cluster configuration 1548 1549 @type network_name: string 1550 @param network_name: instance communication network name 1551 1552 @rtype: L{ganeti.cmdlib.ResultWithJobs} or L{None} 1553 @return: L{ganeti.cmdlib.ResultWithJobs} if the instance 1554 communication needs to be created or it needs to be 1555 connected to a group, otherwise L{None} 1556 1557 """ 1558 jobs = [] 1559 1560 try: 1561 network_uuid = cfg.LookupNetwork(network_name) 1562 network_exists = True 1563 except errors.OpPrereqError: 1564 network_exists = False 1565 1566 if not network_exists: 1567 jobs.append(AddInstanceCommunicationNetworkOp(network_name)) 1568 1569 for group_uuid in cfg.GetNodeGroupList(): 1570 group = cfg.GetNodeGroup(group_uuid) 1571 1572 if network_exists: 1573 network_connected = network_uuid in group.networks 1574 else: 1575 # The network was created asynchronously by the previous 1576 # opcode and, therefore, we don't have access to its 1577 # network_uuid. As a result, we assume that the network is 1578 # not connected to any group yet. 1579 network_connected = False 1580 1581 if not network_connected: 1582 op = ConnectInstanceCommunicationNetworkOp(group_uuid, network_name) 1583 jobs.append(op) 1584 1585 if jobs: 1586 return ResultWithJobs([jobs]) 1587 else: 1588 return None
1589 1590 @staticmethod
1591 - def _ModifyInstanceCommunicationNetwork(cfg, network_name, feedback_fn):
1592 """Update the instance communication network stored in the cluster 1593 configuration. 1594 1595 Compares the user-supplied instance communication network against 1596 the one stored in the Ganeti cluster configuration. If there is a 1597 change, the instance communication network may be possibly created 1598 and connected to all groups (see 1599 L{LUClusterSetParams._EnsureInstanceCommunicationNetwork}). 1600 1601 @type cfg: L{config.ConfigWriter} 1602 @param cfg: cluster configuration 1603 1604 @type network_name: string 1605 @param network_name: instance communication network name 1606 1607 @type feedback_fn: function 1608 @param feedback_fn: see L{ganeti.cmdlist.base.LogicalUnit} 1609 1610 @rtype: L{LUClusterSetParams._EnsureInstanceCommunicationNetwork} or L{None} 1611 @return: see L{LUClusterSetParams._EnsureInstanceCommunicationNetwork} 1612 1613 """ 1614 config_network_name = cfg.GetInstanceCommunicationNetwork() 1615 1616 if network_name == config_network_name: 1617 feedback_fn("Instance communication network already is '%s', nothing to" 1618 " do." % network_name) 1619 else: 1620 try: 1621 cfg.LookupNetwork(config_network_name) 1622 feedback_fn("Previous instance communication network '%s'" 1623 " should be removed manually." % config_network_name) 1624 except errors.OpPrereqError: 1625 pass 1626 1627 if network_name: 1628 feedback_fn("Changing instance communication network to '%s', only new" 1629 " instances will be affected." 1630 % network_name) 1631 else: 1632 feedback_fn("Disabling instance communication network, only new" 1633 " instances will be affected.") 1634 1635 cfg.SetInstanceCommunicationNetwork(network_name) 1636 1637 if network_name: 1638 return LUClusterSetParams._EnsureInstanceCommunicationNetwork( 1639 cfg, 1640 network_name) 1641 else: 1642 return None
1643
1644 - def Exec(self, feedback_fn):
1645 """Change the parameters of the cluster. 1646 1647 """ 1648 # re-read the fresh configuration 1649 self.cluster = self.cfg.GetClusterInfo() 1650 if self.op.enabled_disk_templates: 1651 self.cluster.enabled_disk_templates = \ 1652 list(self.op.enabled_disk_templates) 1653 # save the changes 1654 self.cfg.Update(self.cluster, feedback_fn) 1655 1656 self._SetVgName(feedback_fn) 1657 1658 self.cluster = self.cfg.GetClusterInfo() 1659 self._SetFileStorageDir(feedback_fn) 1660 self._SetSharedFileStorageDir(feedback_fn) 1661 self.cfg.Update(self.cluster, feedback_fn) 1662 self._SetDrbdHelper(feedback_fn) 1663 self._SetDiagnoseDataCollectorFilename(feedback_fn) 1664 1665 # re-read the fresh configuration again 1666 self.cluster = self.cfg.GetClusterInfo() 1667 1668 ensure_kvmd = False 1669 stop_kvmd_silently = not ( 1670 constants.HT_KVM in self.cluster.enabled_hypervisors or 1671 (self.op.enabled_hypervisors is not None and 1672 constants.HT_KVM in self.op.enabled_hypervisors)) 1673 1674 active = constants.DATA_COLLECTOR_STATE_ACTIVE 1675 if self.op.enabled_data_collectors is not None: 1676 for name, val in self.op.enabled_data_collectors.items(): 1677 self.cluster.data_collectors[name][active] = val 1678 1679 if self.op.data_collector_interval: 1680 internal = constants.DATA_COLLECTOR_PARAMETER_INTERVAL 1681 for name, val in self.op.data_collector_interval.items(): 1682 self.cluster.data_collectors[name][internal] = int(val) 1683 1684 if self.op.hvparams: 1685 self.cluster.hvparams = self.new_hvparams 1686 if self.op.os_hvp: 1687 self.cluster.os_hvp = self.new_os_hvp 1688 if self.op.enabled_hypervisors is not None: 1689 self.cluster.hvparams = self.new_hvparams 1690 self.cluster.enabled_hypervisors = self.op.enabled_hypervisors 1691 ensure_kvmd = True 1692 if self.op.beparams: 1693 self.cluster.beparams[constants.PP_DEFAULT] = self.new_beparams 1694 if self.op.nicparams: 1695 self.cluster.nicparams[constants.PP_DEFAULT] = self.new_nicparams 1696 if self.op.ipolicy: 1697 self.cluster.ipolicy = self.new_ipolicy 1698 if self.op.osparams: 1699 self.cluster.osparams = self.new_osp 1700 if self.op.osparams_private_cluster: 1701 self.cluster.osparams_private_cluster = self.new_osp_private 1702 if self.op.ndparams: 1703 self.cluster.ndparams = self.new_ndparams 1704 if self.op.diskparams: 1705 self.cluster.diskparams = self.new_diskparams 1706 if self.op.hv_state: 1707 self.cluster.hv_state_static = self.new_hv_state 1708 if self.op.disk_state: 1709 self.cluster.disk_state_static = self.new_disk_state 1710 1711 if self.op.candidate_pool_size is not None: 1712 self.cluster.candidate_pool_size = self.op.candidate_pool_size 1713 # we need to update the pool size here, otherwise the save will fail 1714 master_node = self.cfg.GetMasterNode() 1715 potential_master_candidates = self.cfg.GetPotentialMasterCandidates() 1716 modify_ssh_setup = self.cfg.GetClusterInfo().modify_ssh_setup 1717 AdjustCandidatePool( 1718 self, [], master_node, potential_master_candidates, feedback_fn, 1719 modify_ssh_setup) 1720 1721 if self.op.max_running_jobs is not None: 1722 self.cluster.max_running_jobs = self.op.max_running_jobs 1723 1724 if self.op.max_tracked_jobs is not None: 1725 self.cluster.max_tracked_jobs = self.op.max_tracked_jobs 1726 1727 if self.op.maintain_node_health is not None: 1728 self.cluster.maintain_node_health = self.op.maintain_node_health 1729 1730 if self.op.modify_etc_hosts is not None: 1731 self.cluster.modify_etc_hosts = self.op.modify_etc_hosts 1732 1733 if self.op.prealloc_wipe_disks is not None: 1734 self.cluster.prealloc_wipe_disks = self.op.prealloc_wipe_disks 1735 1736 if self.op.add_uids is not None: 1737 uidpool.AddToUidPool(self.cluster.uid_pool, self.op.add_uids) 1738 1739 if self.op.remove_uids is not None: 1740 uidpool.RemoveFromUidPool(self.cluster.uid_pool, self.op.remove_uids) 1741 1742 if self.op.uid_pool is not None: 1743 self.cluster.uid_pool = self.op.uid_pool 1744 1745 if self.op.default_iallocator is not None: 1746 self.cluster.default_iallocator = self.op.default_iallocator 1747 1748 if self.op.default_iallocator_params is not None: 1749 self.cluster.default_iallocator_params = self.op.default_iallocator_params 1750 1751 if self.op.reserved_lvs is not None: 1752 self.cluster.reserved_lvs = self.op.reserved_lvs 1753 1754 if self.op.use_external_mip_script is not None: 1755 self.cluster.use_external_mip_script = self.op.use_external_mip_script 1756 1757 if self.op.enabled_user_shutdown is not None and \ 1758 self.cluster.enabled_user_shutdown != self.op.enabled_user_shutdown: 1759 self.cluster.enabled_user_shutdown = self.op.enabled_user_shutdown 1760 ensure_kvmd = True 1761 1762 def helper_os(aname, mods, desc): 1763 desc += " OS list" 1764 lst = getattr(self.cluster, aname) 1765 for key, val in mods: 1766 if key == constants.DDM_ADD: 1767 if val in lst: 1768 feedback_fn("OS %s already in %s, ignoring" % (val, desc)) 1769 else: 1770 lst.append(val) 1771 elif key == constants.DDM_REMOVE: 1772 if val in lst: 1773 lst.remove(val) 1774 else: 1775 feedback_fn("OS %s not found in %s, ignoring" % (val, desc)) 1776 else: 1777 raise errors.ProgrammerError("Invalid modification '%s'" % key)
1778 1779 if self.op.hidden_os: 1780 helper_os("hidden_os", self.op.hidden_os, "hidden") 1781 1782 if self.op.blacklisted_os: 1783 helper_os("blacklisted_os", self.op.blacklisted_os, "blacklisted") 1784 1785 if self.op.mac_prefix: 1786 self.cluster.mac_prefix = self.op.mac_prefix 1787 1788 if self.op.master_netdev: 1789 master_params = self.cfg.GetMasterNetworkParameters() 1790 ems = self.cfg.GetUseExternalMipScript() 1791 feedback_fn("Shutting down master ip on the current netdev (%s)" % 1792 self.cluster.master_netdev) 1793 result = self.rpc.call_node_deactivate_master_ip(master_params.uuid, 1794 master_params, ems) 1795 if not self.op.force: 1796 result.Raise("Could not disable the master ip") 1797 else: 1798 if result.fail_msg: 1799 msg = ("Could not disable the master ip (continuing anyway): %s" % 1800 result.fail_msg) 1801 feedback_fn(msg) 1802 feedback_fn("Changing master_netdev from %s to %s" % 1803 (master_params.netdev, self.op.master_netdev)) 1804 self.cluster.master_netdev = self.op.master_netdev 1805 1806 if self.op.master_netmask: 1807 master_params = self.cfg.GetMasterNetworkParameters() 1808 feedback_fn("Changing master IP netmask to %s" % self.op.master_netmask) 1809 result = self.rpc.call_node_change_master_netmask( 1810 master_params.uuid, master_params.netmask, 1811 self.op.master_netmask, master_params.ip, 1812 master_params.netdev) 1813 result.Warn("Could not change the master IP netmask", feedback_fn) 1814 self.cluster.master_netmask = self.op.master_netmask 1815 1816 if self.op.install_image: 1817 self.cluster.install_image = self.op.install_image 1818 1819 if self.op.zeroing_image is not None: 1820 CheckImageValidity(self.op.zeroing_image, 1821 "Zeroing image must be an absolute path or a URL") 1822 self.cluster.zeroing_image = self.op.zeroing_image 1823 1824 self.cfg.Update(self.cluster, feedback_fn) 1825 1826 if self.op.master_netdev: 1827 master_params = self.cfg.GetMasterNetworkParameters() 1828 feedback_fn("Starting the master ip on the new master netdev (%s)" % 1829 self.op.master_netdev) 1830 ems = self.cfg.GetUseExternalMipScript() 1831 result = self.rpc.call_node_activate_master_ip(master_params.uuid, 1832 master_params, ems) 1833 result.Warn("Could not re-enable the master ip on the master," 1834 " please restart manually", self.LogWarning) 1835 1836 # Even though 'self.op.enabled_user_shutdown' is being tested 1837 # above, the RPCs can only be done after 'self.cfg.Update' because 1838 # this will update the cluster object and sync 'Ssconf', and kvmd 1839 # uses 'Ssconf'. 1840 if ensure_kvmd: 1841 EnsureKvmdOnNodes(self, feedback_fn, silent_stop=stop_kvmd_silently) 1842 1843 if self.op.compression_tools is not None: 1844 self.cfg.SetCompressionTools(self.op.compression_tools) 1845 1846 if self.op.maint_round_delay is not None: 1847 self.cfg.SetMaintdRoundDelay(self.op.maint_round_delay) 1848 1849 if self.op.maint_balance is not None: 1850 self.cfg.SetMaintdBalance(self.op.maint_balance) 1851 1852 if self.op.maint_balance_threshold is not None: 1853 self.cfg.SetMaintdBalanceThreshold(self.op.maint_balance_threshold) 1854 1855 network_name = self.op.instance_communication_network 1856 if network_name is not None: 1857 return self._ModifyInstanceCommunicationNetwork(self.cfg, 1858 network_name, feedback_fn) 1859 else: 1860 return None 1861