Package ganeti :: Package tools :: Module cfgupgrade
[hide private]
[frames] | no frames]

Source Code for Module ganeti.tools.cfgupgrade

  1  # 
  2  # 
  3   
  4  # Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Google Inc. 
  5  # All rights reserved. 
  6  # 
  7  # Redistribution and use in source and binary forms, with or without 
  8  # modification, are permitted provided that the following conditions are 
  9  # met: 
 10  # 
 11  # 1. Redistributions of source code must retain the above copyright notice, 
 12  # this list of conditions and the following disclaimer. 
 13  # 
 14  # 2. Redistributions in binary form must reproduce the above copyright 
 15  # notice, this list of conditions and the following disclaimer in the 
 16  # documentation and/or other materials provided with the distribution. 
 17  # 
 18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
 19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
 20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
 21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
 22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
 23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
 24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
 25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
 26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
 27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
 28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 29   
 30  """Library of the tools/cfgupgrade utility. 
 31   
 32  This code handles only the types supported by simplejson. As an 
 33  example, 'set' is a 'list'. 
 34   
 35  """ 
 36   
 37  import copy 
 38  import os 
 39  import os.path 
 40  import sys 
 41  import logging 
 42  import optparse 
 43  import time 
 44  import functools 
 45  from cStringIO import StringIO 
 46   
 47  from ganeti import cli 
 48  from ganeti import constants 
 49  from ganeti import serializer 
 50  from ganeti import utils 
 51  from ganeti import bootstrap 
 52  from ganeti import config 
 53  from ganeti import pathutils 
 54  from ganeti import netutils 
 55   
 56  from ganeti.utils import version 
 57   
 58   
 59  #: Target major version we will upgrade to 
 60  TARGET_MAJOR = 2 
 61  #: Target minor version we will upgrade to 
 62  TARGET_MINOR = 16 
 63  #: Target major version for downgrade 
 64  DOWNGRADE_MAJOR = 2 
 65  #: Target minor version for downgrade 
 66  DOWNGRADE_MINOR = 15 
 67   
 68  # map of legacy device types 
 69  # (mapping differing old LD_* constants to new DT_* constants) 
 70  DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8} 
 71  # (mapping differing new DT_* constants to old LD_* constants) 
 72  DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items()) 
73 74 75 -class Error(Exception):
76 """Generic exception""" 77 pass
78
79 80 -def ParseOptions(args=None):
81 parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]") 82 parser.add_option("--dry-run", dest="dry_run", 83 action="store_true", 84 help="Try to do the conversion, but don't write" 85 " output file") 86 parser.add_option(cli.FORCE_OPT) 87 parser.add_option(cli.DEBUG_OPT) 88 parser.add_option(cli.VERBOSE_OPT) 89 parser.add_option("--ignore-hostname", dest="ignore_hostname", 90 action="store_true", default=False, 91 help="Don't abort if hostname doesn't match") 92 parser.add_option("--path", help="Convert configuration in this" 93 " directory instead of '%s'" % pathutils.DATA_DIR, 94 default=pathutils.DATA_DIR, dest="data_dir") 95 parser.add_option("--confdir", 96 help=("Use this directory instead of '%s'" % 97 pathutils.CONF_DIR), 98 default=pathutils.CONF_DIR, dest="conf_dir") 99 parser.add_option("--no-verify", 100 help="Do not verify configuration after upgrade", 101 action="store_true", dest="no_verify", default=False) 102 parser.add_option("--downgrade", 103 help="Downgrade to the previous stable version", 104 action="store_true", dest="downgrade", default=False) 105 return parser.parse_args(args=args)
106
107 108 -def OrFail(description=None):
109 """Make failure non-fatal and improve reporting.""" 110 def wrapper(f): 111 @functools.wraps(f) 112 def wrapped(self): 113 safety = copy.deepcopy(self.config_data) 114 try: 115 f(self) 116 except BaseException, e: 117 msg = "%s failed:\n%s" % (description or f.func_name, e) 118 logging.exception(msg) 119 self.config_data = safety 120 self.errors.append(msg)
121 return wrapped 122 return wrapper 123
124 125 -class CfgUpgrade(object):
126 - def __init__(self, opts, args):
127 self.opts = opts 128 self.args = args 129 self.errors = []
130
131 - def Run(self):
132 """Main program. 133 134 """ 135 self._ComposePaths() 136 137 self.SetupLogging() 138 139 # Option checking 140 if self.args: 141 raise Error("No arguments expected") 142 if self.opts.downgrade and not self.opts.no_verify: 143 self.opts.no_verify = True 144 145 # Check master name 146 if not (self.CheckHostname(self.opts.SSCONF_MASTER_NODE) or 147 self.opts.ignore_hostname): 148 logging.error("Aborting due to hostname mismatch") 149 sys.exit(constants.EXIT_FAILURE) 150 151 self._AskUser() 152 153 # Check whether it's a Ganeti configuration directory 154 if not (os.path.isfile(self.opts.CONFIG_DATA_PATH) and 155 os.path.isfile(self.opts.SERVER_PEM_PATH) and 156 os.path.isfile(self.opts.KNOWN_HOSTS_PATH)): 157 raise Error(("%s does not seem to be a Ganeti configuration" 158 " directory") % self.opts.data_dir) 159 160 if not os.path.isdir(self.opts.conf_dir): 161 raise Error("Not a directory: %s" % self.opts.conf_dir) 162 163 self.config_data = serializer.LoadJson(utils.ReadFile( 164 self.opts.CONFIG_DATA_PATH)) 165 166 try: 167 config_version = self.config_data["version"] 168 except KeyError: 169 raise Error("Unable to determine configuration version") 170 171 (config_major, config_minor, config_revision) = \ 172 version.SplitVersion(config_version) 173 174 logging.info("Found configuration version %s (%d.%d.%d)", 175 config_version, config_major, config_minor, config_revision) 176 177 if "config_version" in self.config_data["cluster"]: 178 raise Error("Inconsistent configuration: found config_version in" 179 " configuration file") 180 181 # Downgrade to the previous stable version 182 if self.opts.downgrade: 183 self._Downgrade(config_major, config_minor, config_version, 184 config_revision) 185 186 # Upgrade from 2.{0..15} to 2.16 187 elif config_major == 2 and config_minor in range(0, 16): 188 if config_revision != 0: 189 logging.warning("Config revision is %s, not 0", config_revision) 190 if not self.UpgradeAll(): 191 raise Error("Upgrade failed:\n%s" % '\n'.join(self.errors)) 192 193 elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR: 194 logging.info("No changes necessary") 195 196 else: 197 raise Error("Configuration version %d.%d.%d not supported by this tool" % 198 (config_major, config_minor, config_revision)) 199 200 try: 201 logging.info("Writing configuration file to %s", 202 self.opts.CONFIG_DATA_PATH) 203 utils.WriteFile(file_name=self.opts.CONFIG_DATA_PATH, 204 data=serializer.DumpJson(self.config_data), 205 mode=0600, 206 dry_run=self.opts.dry_run, 207 backup=True) 208 209 if not self.opts.dry_run: 210 # This creates the cluster certificate if it does not exist yet. 211 # In this case, we do not automatically create a client certificate 212 # as well, because if the cluster certificate did not exist before, 213 # no client certificate will exist on any node yet. In this case 214 # all client certificate should be renewed by 'gnt-cluster 215 # renew-crypto --new-node-certificates'. This will be enforced 216 # by a nagging warning in 'gnt-cluster verify'. 217 bootstrap.GenerateClusterCrypto( 218 False, False, False, False, False, False, None, 219 nodecert_file=self.opts.SERVER_PEM_PATH, 220 rapicert_file=self.opts.RAPI_CERT_FILE, 221 spicecert_file=self.opts.SPICE_CERT_FILE, 222 spicecacert_file=self.opts.SPICE_CACERT_FILE, 223 hmackey_file=self.opts.CONFD_HMAC_KEY, 224 cds_file=self.opts.CDS_FILE) 225 226 except Exception: 227 logging.critical("Writing configuration failed. It is probably in an" 228 " inconsistent state and needs manual intervention.") 229 raise 230 231 self._TestLoadingConfigFile()
232
233 - def SetupLogging(self):
234 """Configures the logging module. 235 236 """ 237 formatter = logging.Formatter("%(asctime)s: %(message)s") 238 239 stderr_handler = logging.StreamHandler() 240 stderr_handler.setFormatter(formatter) 241 if self.opts.debug: 242 stderr_handler.setLevel(logging.NOTSET) 243 elif self.opts.verbose: 244 stderr_handler.setLevel(logging.INFO) 245 else: 246 stderr_handler.setLevel(logging.WARNING) 247 248 root_logger = logging.getLogger("") 249 root_logger.setLevel(logging.NOTSET) 250 root_logger.addHandler(stderr_handler)
251 252 @staticmethod
253 - def CheckHostname(path):
254 """Ensures hostname matches ssconf value. 255 256 @param path: Path to ssconf file 257 258 """ 259 ssconf_master_node = utils.ReadOneLineFile(path) 260 hostname = netutils.GetHostname().name 261 262 if ssconf_master_node == hostname: 263 return True 264 265 logging.warning("Warning: ssconf says master node is '%s', but this" 266 " machine's name is '%s'; this tool must be run on" 267 " the master node", ssconf_master_node, hostname) 268 return False
269 270 @staticmethod
271 - def _FillIPolicySpecs(default_ipolicy, ipolicy):
272 if "minmax" in ipolicy: 273 for (key, spec) in ipolicy["minmax"][0].items(): 274 for (par, val) in default_ipolicy["minmax"][0][key].items(): 275 if par not in spec: 276 spec[par] = val
277
278 - def UpgradeIPolicy(self, ipolicy, default_ipolicy, isgroup):
279 minmax_keys = ["min", "max"] 280 if any((k in ipolicy) for k in minmax_keys): 281 minmax = {} 282 for key in minmax_keys: 283 if key in ipolicy: 284 if ipolicy[key]: 285 minmax[key] = ipolicy[key] 286 del ipolicy[key] 287 if minmax: 288 ipolicy["minmax"] = [minmax] 289 if isgroup and "std" in ipolicy: 290 del ipolicy["std"] 291 self._FillIPolicySpecs(default_ipolicy, ipolicy)
292 293 @OrFail("Setting networks")
294 - def UpgradeNetworks(self):
295 assert isinstance(self.config_data, dict) 296 # pylint can't infer config_data type 297 # pylint: disable=E1103 298 networks = self.config_data.get("networks", None) 299 if not networks: 300 self.config_data["networks"] = {}
301 302 @OrFail("Upgrading cluster")
303 - def UpgradeCluster(self):
304 assert isinstance(self.config_data, dict) 305 # pylint can't infer config_data type 306 # pylint: disable=E1103 307 cluster = self.config_data.get("cluster", None) 308 if cluster is None: 309 raise Error("Cannot find cluster") 310 311 ipolicy = cluster.setdefault("ipolicy", None) 312 if ipolicy: 313 self.UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False) 314 ial_params = cluster.get("default_iallocator_params", None) 315 316 if not ial_params: 317 cluster["default_iallocator_params"] = {} 318 319 if not "candidate_certs" in cluster: 320 cluster["candidate_certs"] = {} 321 322 cluster["instance_communication_network"] = \ 323 cluster.get("instance_communication_network", "") 324 325 cluster["install_image"] = \ 326 cluster.get("install_image", "") 327 328 cluster["zeroing_image"] = \ 329 cluster.get("zeroing_image", "") 330 331 cluster["compression_tools"] = \ 332 cluster.get("compression_tools", constants.IEC_DEFAULT_TOOLS) 333 334 if "enabled_user_shutdown" not in cluster: 335 cluster["enabled_user_shutdown"] = False 336 337 cluster["data_collectors"] = cluster.get("data_collectors", {}) 338 for name in constants.DATA_COLLECTOR_NAMES: 339 cluster["data_collectors"][name] = \ 340 cluster["data_collectors"].get( 341 name, dict(active=True, 342 interval=constants.MOND_TIME_INTERVAL * 1e6)) 343 344 # These parameters are set to pre-2.16 default values, which 345 # differ from post-2.16 default values 346 if "ssh_key_type" not in cluster: 347 cluster["ssh_key_type"] = constants.SSHK_DSA 348 349 if "ssh_key_bits" not in cluster: 350 cluster["ssh_key_bits"] = 1024
351 352 @OrFail("Upgrading groups")
353 - def UpgradeGroups(self):
354 cl_ipolicy = self.config_data["cluster"].get("ipolicy") 355 for group in self.config_data["nodegroups"].values(): 356 networks = group.get("networks", None) 357 if not networks: 358 group["networks"] = {} 359 ipolicy = group.get("ipolicy", None) 360 if ipolicy: 361 if cl_ipolicy is None: 362 raise Error("A group defines an instance policy but there is no" 363 " instance policy at cluster level") 364 self.UpgradeIPolicy(ipolicy, cl_ipolicy, True)
365
366 - def GetExclusiveStorageValue(self):
367 """Return a conservative value of the exclusive_storage flag. 368 369 Return C{True} if the cluster or at least a nodegroup have the flag set. 370 371 """ 372 ret = False 373 cluster = self.config_data["cluster"] 374 ndparams = cluster.get("ndparams") 375 if ndparams is not None and ndparams.get("exclusive_storage"): 376 ret = True 377 for group in self.config_data["nodegroups"].values(): 378 ndparams = group.get("ndparams") 379 if ndparams is not None and ndparams.get("exclusive_storage"): 380 ret = True 381 return ret
382
383 - def RemovePhysicalId(self, disk):
384 if "children" in disk: 385 for d in disk["children"]: 386 self.RemovePhysicalId(d) 387 if "physical_id" in disk: 388 del disk["physical_id"]
389
390 - def ChangeDiskDevType(self, disk, dev_type_map):
391 """Replaces disk's dev_type attributes according to the given map. 392 393 This can be used for both, up or downgrading the disks. 394 """ 395 if disk["dev_type"] in dev_type_map: 396 disk["dev_type"] = dev_type_map[disk["dev_type"]] 397 if "children" in disk: 398 for child in disk["children"]: 399 self.ChangeDiskDevType(child, dev_type_map)
400
401 - def UpgradeDiskDevType(self, disk):
402 """Upgrades the disks' device type.""" 403 self.ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)
404 405 @staticmethod
406 - def _ConvertNicNameToUuid(iobj, network2uuid):
407 for nic in iobj["nics"]: 408 name = nic.get("network", None) 409 if name: 410 uuid = network2uuid.get(name, None) 411 if uuid: 412 print("NIC with network name %s found." 413 " Substituting with uuid %s." % (name, uuid)) 414 nic["network"] = uuid
415 416 @classmethod
417 - def AssignUuid(cls, disk):
418 if not "uuid" in disk: 419 disk["uuid"] = utils.io.NewUUID() 420 if "children" in disk: 421 for d in disk["children"]: 422 cls.AssignUuid(d)
423
424 - def _ConvertDiskAndCheckMissingSpindles(self, iobj, instance):
425 missing_spindles = False 426 if "disks" not in iobj: 427 raise Error("Instance '%s' doesn't have a disks entry?!" % instance) 428 disks = iobj["disks"] 429 if not all(isinstance(d, str) for d in disks): 430 # Disks are not top level citizens 431 for idx, dobj in enumerate(disks): 432 self.RemovePhysicalId(dobj) 433 434 expected = "disk/%s" % idx 435 current = dobj.get("iv_name", "") 436 if current != expected: 437 logging.warning("Updating iv_name for instance %s/disk %s" 438 " from '%s' to '%s'", 439 instance, idx, current, expected) 440 dobj["iv_name"] = expected 441 442 if "dev_type" in dobj: 443 self.UpgradeDiskDevType(dobj) 444 445 if not "spindles" in dobj: 446 missing_spindles = True 447 448 self.AssignUuid(dobj) 449 return missing_spindles
450 451 @OrFail("Upgrading instance with spindles")
452 - def UpgradeInstances(self):
453 """Upgrades the instances' configuration.""" 454 455 network2uuid = dict((n["name"], n["uuid"]) 456 for n in self.config_data["networks"].values()) 457 if "instances" not in self.config_data: 458 raise Error("Can't find the 'instances' key in the configuration!") 459 460 missing_spindles = False 461 for instance, iobj in self.config_data["instances"].items(): 462 self._ConvertNicNameToUuid(iobj, network2uuid) 463 if self._ConvertDiskAndCheckMissingSpindles(iobj, instance): 464 missing_spindles = True 465 if "admin_state_source" not in iobj: 466 iobj["admin_state_source"] = constants.ADMIN_SOURCE 467 468 if self.GetExclusiveStorageValue() and missing_spindles: 469 # We cannot be sure that the instances that are missing spindles have 470 # exclusive storage enabled (the check would be more complicated), so we 471 # give a noncommittal message 472 logging.warning("Some instance disks could be needing to update the" 473 " spindles parameter; you can check by running" 474 " 'gnt-cluster verify', and fix any problem with" 475 " 'gnt-cluster repair-disk-sizes'")
476
477 - def UpgradeRapiUsers(self):
478 if (os.path.isfile(self.opts.RAPI_USERS_FILE_PRE24) and 479 not os.path.islink(self.opts.RAPI_USERS_FILE_PRE24)): 480 if os.path.exists(self.opts.RAPI_USERS_FILE): 481 raise Error("Found pre-2.4 RAPI users file at %s, but another file" 482 " already exists at %s" % 483 (self.opts.RAPI_USERS_FILE_PRE24, 484 self.opts.RAPI_USERS_FILE)) 485 logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s", 486 self.opts.RAPI_USERS_FILE_PRE24, self.opts.RAPI_USERS_FILE) 487 if not self.opts.dry_run: 488 utils.RenameFile(self.opts.RAPI_USERS_FILE_PRE24, 489 self.opts.RAPI_USERS_FILE, mkdir=True, mkdir_mode=0750) 490 491 # Create a symlink for RAPI users file 492 if (not (os.path.islink(self.opts.RAPI_USERS_FILE_PRE24) or 493 os.path.isfile(self.opts.RAPI_USERS_FILE_PRE24)) and 494 os.path.isfile(self.opts.RAPI_USERS_FILE)): 495 logging.info("Creating symlink from %s to %s", 496 self.opts.RAPI_USERS_FILE_PRE24, self.opts.RAPI_USERS_FILE) 497 if not self.opts.dry_run: 498 os.symlink(self.opts.RAPI_USERS_FILE, self.opts.RAPI_USERS_FILE_PRE24)
499
500 - def UpgradeWatcher(self):
501 # Remove old watcher state file if it exists 502 if os.path.exists(self.opts.WATCHER_STATEFILE): 503 logging.info("Removing watcher state file %s", 504 self.opts.WATCHER_STATEFILE) 505 if not self.opts.dry_run: 506 utils.RemoveFile(self.opts.WATCHER_STATEFILE)
507 508 @OrFail("Upgrading file storage paths")
509 - def UpgradeFileStoragePaths(self):
510 # Write file storage paths 511 if not os.path.exists(self.opts.FILE_STORAGE_PATHS_FILE): 512 cluster = self.config_data["cluster"] 513 file_storage_dir = cluster.get("file_storage_dir") 514 shared_file_storage_dir = cluster.get("shared_file_storage_dir") 515 del cluster 516 517 logging.info("Ganeti 2.7 and later only allow whitelisted directories" 518 " for file storage; writing existing configuration values" 519 " into '%s'", 520 self.opts.FILE_STORAGE_PATHS_FILE) 521 522 if file_storage_dir: 523 logging.info("File storage directory: %s", file_storage_dir) 524 if shared_file_storage_dir: 525 logging.info("Shared file storage directory: %s", 526 shared_file_storage_dir) 527 528 buf = StringIO() 529 buf.write("# List automatically generated from configuration by\n") 530 buf.write("# cfgupgrade at %s\n" % time.asctime()) 531 if file_storage_dir: 532 buf.write("%s\n" % file_storage_dir) 533 if shared_file_storage_dir: 534 buf.write("%s\n" % shared_file_storage_dir) 535 utils.WriteFile(file_name=self.opts.FILE_STORAGE_PATHS_FILE, 536 data=buf.getvalue(), 537 mode=0600, 538 dry_run=self.opts.dry_run, 539 backup=True)
540 541 @staticmethod
542 - def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
543 if old_key not in nodes_by_old_key: 544 logging.warning("Can't find node '%s' in configuration, " 545 "assuming that it's already up-to-date", old_key) 546 return old_key 547 return nodes_by_old_key[old_key][new_key_field]
548
549 - def ChangeNodeIndices(self, config_data, old_key_field, new_key_field):
550 def ChangeDiskNodeIndices(disk): 551 # Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be 552 # considered when up/downgrading from/to any versions touching 2.9 on the 553 # way. 554 drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD 555 if disk["dev_type"] in drbd_disk_types: 556 for i in range(0, 2): 557 disk["logical_id"][i] = self.GetNewNodeIndex(nodes_by_old_key, 558 disk["logical_id"][i], 559 new_key_field) 560 if "children" in disk: 561 for child in disk["children"]: 562 ChangeDiskNodeIndices(child)
563 564 nodes_by_old_key = {} 565 nodes_by_new_key = {} 566 for (_, node) in config_data["nodes"].items(): 567 nodes_by_old_key[node[old_key_field]] = node 568 nodes_by_new_key[node[new_key_field]] = node 569 570 config_data["nodes"] = nodes_by_new_key 571 572 cluster = config_data["cluster"] 573 cluster["master_node"] = self.GetNewNodeIndex(nodes_by_old_key, 574 cluster["master_node"], 575 new_key_field) 576 577 for inst in config_data["instances"].values(): 578 inst["primary_node"] = self.GetNewNodeIndex(nodes_by_old_key, 579 inst["primary_node"], 580 new_key_field) 581 582 for disk in config_data["disks"].values(): 583 ChangeDiskNodeIndices(disk)
584 585 @staticmethod
586 - def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
587 insts_by_old_key = {} 588 insts_by_new_key = {} 589 for (_, inst) in config_data["instances"].items(): 590 insts_by_old_key[inst[old_key_field]] = inst 591 insts_by_new_key[inst[new_key_field]] = inst 592 593 config_data["instances"] = insts_by_new_key
594 595 @OrFail("Changing node indices")
596 - def UpgradeNodeIndices(self):
597 self.ChangeNodeIndices(self.config_data, "name", "uuid")
598 599 @OrFail("Changing instance indices")
600 - def UpgradeInstanceIndices(self):
601 self.ChangeInstanceIndices(self.config_data, "name", "uuid")
602 603 @OrFail("Adding filters")
604 - def UpgradeFilters(self):
605 # pylint can't infer config_data type 606 # pylint: disable=E1103 607 filters = self.config_data.get("filters", None) 608 if not filters: 609 self.config_data["filters"] = {}
610 611 @OrFail("Set top level disks")
612 - def UpgradeTopLevelDisks(self):
613 """Upgrades the disks as config top level citizens.""" 614 if "instances" not in self.config_data: 615 raise Error("Can't find the 'instances' key in the configuration!") 616 617 if "disks" in self.config_data: 618 # Disks are already top level citizens 619 return 620 621 self.config_data["disks"] = dict() 622 for iobj in self.config_data["instances"].values(): 623 disk_uuids = [] 624 for disk in iobj["disks"]: 625 duuid = disk["uuid"] 626 disk["serial_no"] = 1 627 # Instances may not have the ctime value, and the Haskell serialization 628 # will have set it to zero. 629 disk["ctime"] = disk["mtime"] = iobj.get("ctime", 0) 630 self.config_data["disks"][duuid] = disk 631 disk_uuids.append(duuid) 632 iobj["disks"] = disk_uuids
633 634 @OrFail("Removing disk template")
635 - def UpgradeDiskTemplate(self):
636 if "instances" not in self.config_data: 637 raise Error("Can't find the 'instances' dictionary in the configuration.") 638 instances = self.config_data["instances"] 639 for inst in instances.values(): 640 if "disk_template" in inst: 641 del inst["disk_template"]
642 643 # The following function is based on a method of class Disk with the same 644 # name, but adjusted to work with dicts and sets.
645 - def _ComputeAllNodes(self, disk):
646 """Recursively compute nodes given a top device.""" 647 nodes = set() 648 if disk["dev_type"] in constants.DTS_DRBD: 649 nodes = set(disk["logical_id"][:2]) 650 for child in disk.get("children", []): 651 nodes |= self._ComputeAllNodes(child) 652 return nodes
653
654 - def _RecursiveUpdateNodes(self, disk, nodes):
655 disk["nodes"] = nodes 656 for child in disk.get("children", []): 657 self._RecursiveUpdateNodes(child, nodes)
658 659 @OrFail("Upgrading disk nodes")
660 - def UpgradeDiskNodes(self):
661 """Specify the nodes from which a disk is accessible in its definition. 662 663 For every disk that is attached to an instance, get the UUIDs of the nodes 664 that it's accessible from. There are three main cases: 665 1) Internally mirrored disks (DRBD): 666 These disks are accessible from two nodes, so the nodes list will include 667 these. Their children (data, meta) are also accessible from two nodes, 668 therefore they will inherit the nodes of the parent. 669 2) Externally mirrored disks (Blockdev, Ext, Gluster, RBD, Shared File): 670 These disks should be accessible from any node in the cluster, therefore the 671 nodes list will be empty. 672 3) Single-node disks (Plain, File): 673 These disks are accessible from one node only, therefore the nodes list will 674 consist only of the primary instance node. 675 """ 676 disks = self.config_data["disks"] 677 for instance in self.config_data["instances"].itervalues(): 678 # Get all disk nodes for an instance 679 instance_node = set([instance["primary_node"]]) 680 disk_nodes = set() 681 for disk_uuid in instance["disks"]: 682 disk_nodes |= self._ComputeAllNodes(disks[disk_uuid]) 683 all_nodes = list(instance_node | disk_nodes) 684 685 # Populate the `nodes` list field of each disk. 686 for disk_uuid in instance["disks"]: 687 disk = disks[disk_uuid] 688 if "nodes" in disk: 689 # The "nodes" field has already been added for this disk. 690 continue 691 692 if disk["dev_type"] in constants.DTS_INT_MIRROR: 693 self._RecursiveUpdateNodes(disk, all_nodes) 694 elif disk["dev_type"] in (constants.DT_PLAIN, constants.DT_FILE): 695 disk["nodes"] = all_nodes 696 else: 697 disk["nodes"] = []
698
699 - def UpgradeAll(self):
700 self.config_data["version"] = version.BuildVersion(TARGET_MAJOR, 701 TARGET_MINOR, 0) 702 self.UpgradeRapiUsers() 703 self.UpgradeWatcher() 704 steps = [self.UpgradeFileStoragePaths, 705 self.UpgradeNetworks, 706 self.UpgradeCluster, 707 self.UpgradeGroups, 708 self.UpgradeInstances, 709 self.UpgradeTopLevelDisks, 710 self.UpgradeNodeIndices, 711 self.UpgradeInstanceIndices, 712 self.UpgradeFilters, 713 self.UpgradeDiskNodes, 714 self.UpgradeDiskTemplate] 715 for s in steps: 716 s() 717 return not self.errors
718 719 # DOWNGRADE ------------------------------------------------------------ 720 721 @OrFail("Removing SSH parameters")
722 - def DowngradeSshKeyParams(self):
723 """Removes the SSH key type and bits parameters from the config. 724 725 Also fails if these have been changed from values appropriate in lower 726 Ganeti versions. 727 728 """ 729 # pylint: disable=E1103 730 # Because config_data is a dictionary which has the get method. 731 cluster = self.config_data.get("cluster", None) 732 if cluster is None: 733 raise Error("Can't find the cluster entry in the configuration") 734 735 def _FetchAndDelete(key): 736 val = cluster.get(key, None) 737 if key in cluster: 738 del cluster[key] 739 return val
740 741 ssh_key_type = _FetchAndDelete("ssh_key_type") 742 _FetchAndDelete("ssh_key_bits") 743 744 if ssh_key_type is not None and ssh_key_type != "dsa": 745 raise Error("The current Ganeti setup is using non-DSA SSH keys, and" 746 " versions below 2.16 do not support these. To downgrade," 747 " please perform a gnt-cluster renew-crypto using the " 748 " --new-ssh-keys and --ssh-key-type=dsa options, generating" 749 " DSA keys that older versions can also use.") 750
751 - def DowngradeAll(self):
752 self.config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR, 753 DOWNGRADE_MINOR, 0) 754 755 self.DowngradeSshKeyParams() 756 return not self.errors
757
758 - def _ComposePaths(self):
759 # We need to keep filenames locally because they might be renamed between 760 # versions. 761 self.opts.data_dir = os.path.abspath(self.opts.data_dir) 762 self.opts.CONFIG_DATA_PATH = self.opts.data_dir + "/config.data" 763 self.opts.SERVER_PEM_PATH = self.opts.data_dir + "/server.pem" 764 self.opts.CLIENT_PEM_PATH = self.opts.data_dir + "/client.pem" 765 self.opts.KNOWN_HOSTS_PATH = self.opts.data_dir + "/known_hosts" 766 self.opts.RAPI_CERT_FILE = self.opts.data_dir + "/rapi.pem" 767 self.opts.SPICE_CERT_FILE = self.opts.data_dir + "/spice.pem" 768 self.opts.SPICE_CACERT_FILE = self.opts.data_dir + "/spice-ca.pem" 769 self.opts.RAPI_USERS_FILE = self.opts.data_dir + "/rapi/users" 770 self.opts.RAPI_USERS_FILE_PRE24 = self.opts.data_dir + "/rapi_users" 771 self.opts.CONFD_HMAC_KEY = self.opts.data_dir + "/hmac.key" 772 self.opts.CDS_FILE = self.opts.data_dir + "/cluster-domain-secret" 773 self.opts.SSCONF_MASTER_NODE = self.opts.data_dir + "/ssconf_master_node" 774 self.opts.WATCHER_STATEFILE = self.opts.data_dir + "/watcher.data" 775 self.opts.FILE_STORAGE_PATHS_FILE = (self.opts.conf_dir + 776 "/file-storage-paths")
777
778 - def _AskUser(self):
779 if not self.opts.force: 780 if self.opts.downgrade: 781 usertext = ("The configuration is going to be DOWNGRADED " 782 "to version %s.%s. Some configuration data might be " 783 " removed if they don't fit" 784 " in the old format. Please make sure you have read the" 785 " upgrade notes (available in the UPGRADE file and included" 786 " in other documentation formats) to understand what they" 787 " are. Continue with *DOWNGRADING* the configuration?" % 788 (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)) 789 else: 790 usertext = ("Please make sure you have read the upgrade notes for" 791 " Ganeti %s (available in the UPGRADE file and included" 792 " in other documentation formats). Continue with upgrading" 793 " configuration?" % constants.RELEASE_VERSION) 794 if not cli.AskUser(usertext): 795 sys.exit(constants.EXIT_FAILURE)
796
797 - def _Downgrade(self, config_major, config_minor, config_version, 798 config_revision):
799 if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or 800 (config_major == DOWNGRADE_MAJOR and 801 config_minor == DOWNGRADE_MINOR)): 802 raise Error("Downgrade supported only from the latest version (%s.%s)," 803 " found %s (%s.%s.%s) instead" % 804 (TARGET_MAJOR, TARGET_MINOR, config_version, config_major, 805 config_minor, config_revision)) 806 if not self.DowngradeAll(): 807 raise Error("Downgrade failed:\n%s" % "\n".join(self.errors))
808
809 - def _TestLoadingConfigFile(self):
810 # test loading the config file 811 all_ok = True 812 if not (self.opts.dry_run or self.opts.no_verify): 813 logging.info("Testing the new config file...") 814 cfg = config.ConfigWriter(cfg_file=self.opts.CONFIG_DATA_PATH, 815 accept_foreign=self.opts.ignore_hostname, 816 offline=True) 817 # if we reached this, it's all fine 818 vrfy = cfg.VerifyConfig() 819 if vrfy: 820 logging.error("Errors after conversion:") 821 for item in vrfy: 822 logging.error(" - %s", item) 823 all_ok = False 824 else: 825 logging.info("File loaded successfully after upgrading") 826 del cfg 827 828 if self.opts.downgrade: 829 action = "downgraded" 830 out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR) 831 else: 832 action = "upgraded" 833 out_ver = constants.RELEASE_VERSION 834 if all_ok: 835 cli.ToStderr("Configuration successfully %s to version %s.", 836 action, out_ver) 837 else: 838 cli.ToStderr("Configuration %s to version %s, but there are errors." 839 "\nPlease review the file.", action, out_ver)
840