Package ganeti :: Package tools :: Module cfgupgrade
[hide private]
[frames] | no frames]

Source Code for Module ganeti.tools.cfgupgrade

  1  # 
  2  # 
  3   
  4  # Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 Google Inc. 
  5  # All rights reserved. 
  6  # 
  7  # Redistribution and use in source and binary forms, with or without 
  8  # modification, are permitted provided that the following conditions are 
  9  # met: 
 10  # 
 11  # 1. Redistributions of source code must retain the above copyright notice, 
 12  # this list of conditions and the following disclaimer. 
 13  # 
 14  # 2. Redistributions in binary form must reproduce the above copyright 
 15  # notice, this list of conditions and the following disclaimer in the 
 16  # documentation and/or other materials provided with the distribution. 
 17  # 
 18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
 19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
 20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
 21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
 22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
 23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
 24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
 25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
 26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
 27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
 28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 29   
 30  """Library of the tools/cfgupgrade utility. 
 31   
 32  This code handles only the types supported by simplejson. As an 
 33  example, 'set' is a 'list'. 
 34   
 35  """ 
 36   
 37  import copy 
 38  import os 
 39  import os.path 
 40  import sys 
 41  import logging 
 42  import optparse 
 43  import time 
 44  import functools 
 45  from cStringIO import StringIO 
 46   
 47  from ganeti import cli 
 48  from ganeti import constants 
 49  from ganeti import serializer 
 50  from ganeti import utils 
 51  from ganeti import bootstrap 
 52  from ganeti import config 
 53  from ganeti import pathutils 
 54  from ganeti import netutils 
 55   
 56  from ganeti.utils import version 
 57   
 58   
 59  #: Target major version we will upgrade to 
 60  TARGET_MAJOR = 2 
 61  #: Target minor version we will upgrade to 
 62  TARGET_MINOR = 18 
 63  #: Target major version for downgrade 
 64  DOWNGRADE_MAJOR = 2 
 65  #: Target minor version for downgrade 
 66  DOWNGRADE_MINOR = 17 
 67   
 68  # map of legacy device types 
 69  # (mapping differing old LD_* constants to new DT_* constants) 
 70  DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8} 
 71  # (mapping differing new DT_* constants to old LD_* constants) 
 72  DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items()) 
73 74 75 -class Error(Exception):
76 """Generic exception""" 77 pass
78
79 80 -def ParseOptions(args=None):
81 parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]") 82 parser.add_option("--dry-run", dest="dry_run", 83 action="store_true", 84 help="Try to do the conversion, but don't write" 85 " output file") 86 parser.add_option(cli.FORCE_OPT) 87 parser.add_option(cli.DEBUG_OPT) 88 parser.add_option(cli.VERBOSE_OPT) 89 parser.add_option("--ignore-hostname", dest="ignore_hostname", 90 action="store_true", default=False, 91 help="Don't abort if hostname doesn't match") 92 parser.add_option("--path", help="Convert configuration in this" 93 " directory instead of '%s'" % pathutils.DATA_DIR, 94 default=pathutils.DATA_DIR, dest="data_dir") 95 parser.add_option("--confdir", 96 help=("Use this directory instead of '%s'" % 97 pathutils.CONF_DIR), 98 default=pathutils.CONF_DIR, dest="conf_dir") 99 parser.add_option("--no-verify", 100 help="Do not verify configuration after upgrade", 101 action="store_true", dest="no_verify", default=False) 102 parser.add_option("--downgrade", 103 help="Downgrade to the previous stable version", 104 action="store_true", dest="downgrade", default=False) 105 return parser.parse_args(args=args)
106
107 108 -def OrFail(description=None):
109 """Make failure non-fatal and improve reporting.""" 110 def wrapper(f): 111 @functools.wraps(f) 112 def wrapped(self): 113 safety = copy.deepcopy(self.config_data) 114 try: 115 f(self) 116 except BaseException, e: 117 msg = "%s failed:\n%s" % (description or f.func_name, e) 118 logging.exception(msg) 119 self.config_data = safety 120 self.errors.append(msg)
121 return wrapped 122 return wrapper 123
124 125 -class CfgUpgrade(object):
126 - def __init__(self, opts, args):
127 self.opts = opts 128 self.args = args 129 self.errors = []
130
131 - def Run(self):
132 """Main program. 133 134 """ 135 self._ComposePaths() 136 137 self.SetupLogging() 138 139 # Option checking 140 if self.args: 141 raise Error("No arguments expected") 142 if self.opts.downgrade and not self.opts.no_verify: 143 self.opts.no_verify = True 144 145 # Check master name 146 if not (self.CheckHostname(self.opts.SSCONF_MASTER_NODE) or 147 self.opts.ignore_hostname): 148 logging.error("Aborting due to hostname mismatch") 149 sys.exit(constants.EXIT_FAILURE) 150 151 self._AskUser() 152 153 # Check whether it's a Ganeti configuration directory 154 if not (os.path.isfile(self.opts.CONFIG_DATA_PATH) and 155 os.path.isfile(self.opts.SERVER_PEM_PATH) and 156 os.path.isfile(self.opts.KNOWN_HOSTS_PATH)): 157 raise Error(("%s does not seem to be a Ganeti configuration" 158 " directory") % self.opts.data_dir) 159 160 if not os.path.isdir(self.opts.conf_dir): 161 raise Error("Not a directory: %s" % self.opts.conf_dir) 162 163 self.config_data = serializer.LoadJson(utils.ReadFile( 164 self.opts.CONFIG_DATA_PATH)) 165 166 try: 167 config_version = self.config_data["version"] 168 except KeyError: 169 raise Error("Unable to determine configuration version") 170 171 (config_major, config_minor, config_revision) = \ 172 version.SplitVersion(config_version) 173 174 logging.info("Found configuration version %s (%d.%d.%d)", 175 config_version, config_major, config_minor, config_revision) 176 177 if "config_version" in self.config_data["cluster"]: 178 raise Error("Inconsistent configuration: found config_version in" 179 " configuration file") 180 181 # Downgrade to the previous stable version 182 if self.opts.downgrade: 183 self._Downgrade(config_major, config_minor, config_version, 184 config_revision) 185 186 # Upgrade from 2.{0..n-1} to 2.n 187 elif config_major == 2 and config_minor in range(0, TARGET_MINOR): 188 if config_revision != 0: 189 logging.warning("Config revision is %s, not 0", config_revision) 190 if not self.UpgradeAll(): 191 raise Error("Upgrade failed:\n%s" % '\n'.join(self.errors)) 192 193 elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR: 194 logging.info("No changes necessary") 195 196 else: 197 raise Error("Configuration version %d.%d.%d not supported by this tool" % 198 (config_major, config_minor, config_revision)) 199 200 try: 201 logging.info("Writing configuration file to %s", 202 self.opts.CONFIG_DATA_PATH) 203 utils.WriteFile(file_name=self.opts.CONFIG_DATA_PATH, 204 data=serializer.DumpJson(self.config_data), 205 mode=0600, 206 dry_run=self.opts.dry_run, 207 backup=True) 208 209 if not self.opts.dry_run: 210 # This creates the cluster certificate if it does not exist yet. 211 # In this case, we do not automatically create a client certificate 212 # as well, because if the cluster certificate did not exist before, 213 # no client certificate will exist on any node yet. In this case 214 # all client certificate should be renewed by 'gnt-cluster 215 # renew-crypto --new-node-certificates'. This will be enforced 216 # by a nagging warning in 'gnt-cluster verify'. 217 bootstrap.GenerateClusterCrypto( 218 False, False, False, False, False, False, None, 219 nodecert_file=self.opts.SERVER_PEM_PATH, 220 rapicert_file=self.opts.RAPI_CERT_FILE, 221 spicecert_file=self.opts.SPICE_CERT_FILE, 222 spicecacert_file=self.opts.SPICE_CACERT_FILE, 223 hmackey_file=self.opts.CONFD_HMAC_KEY, 224 cds_file=self.opts.CDS_FILE) 225 226 except Exception: 227 logging.critical("Writing configuration failed. It is probably in an" 228 " inconsistent state and needs manual intervention.") 229 raise 230 231 self._TestLoadingConfigFile()
232
233 - def SetupLogging(self):
234 """Configures the logging module. 235 236 """ 237 formatter = logging.Formatter("%(asctime)s: %(message)s") 238 239 stderr_handler = logging.StreamHandler() 240 stderr_handler.setFormatter(formatter) 241 if self.opts.debug: 242 stderr_handler.setLevel(logging.NOTSET) 243 elif self.opts.verbose: 244 stderr_handler.setLevel(logging.INFO) 245 else: 246 stderr_handler.setLevel(logging.WARNING) 247 248 root_logger = logging.getLogger("") 249 root_logger.setLevel(logging.NOTSET) 250 root_logger.addHandler(stderr_handler)
251 252 @staticmethod
253 - def CheckHostname(path):
254 """Ensures hostname matches ssconf value. 255 256 @param path: Path to ssconf file 257 258 """ 259 ssconf_master_node = utils.ReadOneLineFile(path) 260 hostname = netutils.GetHostname().name 261 262 if ssconf_master_node == hostname: 263 return True 264 265 logging.warning("Warning: ssconf says master node is '%s', but this" 266 " machine's name is '%s'; this tool must be run on" 267 " the master node", ssconf_master_node, hostname) 268 return False
269 270 @staticmethod
271 - def _FillIPolicySpecs(default_ipolicy, ipolicy):
272 if "minmax" in ipolicy: 273 for (key, spec) in ipolicy["minmax"][0].items(): 274 for (par, val) in default_ipolicy["minmax"][0][key].items(): 275 if par not in spec: 276 spec[par] = val
277
278 - def UpgradeIPolicy(self, ipolicy, default_ipolicy, isgroup):
279 minmax_keys = ["min", "max"] 280 if any((k in ipolicy) for k in minmax_keys): 281 minmax = {} 282 for key in minmax_keys: 283 if key in ipolicy: 284 if ipolicy[key]: 285 minmax[key] = ipolicy[key] 286 del ipolicy[key] 287 if minmax: 288 ipolicy["minmax"] = [minmax] 289 if isgroup and "std" in ipolicy: 290 del ipolicy["std"] 291 self._FillIPolicySpecs(default_ipolicy, ipolicy)
292 293 @OrFail("Setting networks")
294 - def UpgradeNetworks(self):
295 assert isinstance(self.config_data, dict) 296 # pylint can't infer config_data type 297 # pylint: disable=E1103 298 networks = self.config_data.get("networks", None) 299 if not networks: 300 self.config_data["networks"] = {}
301 302 @OrFail("Upgrading cluster")
303 - def UpgradeCluster(self):
304 assert isinstance(self.config_data, dict) 305 # pylint can't infer config_data type 306 # pylint: disable=E1103 307 cluster = self.config_data.get("cluster", None) 308 if cluster is None: 309 raise Error("Cannot find cluster") 310 311 ipolicy = cluster.setdefault("ipolicy", None) 312 if ipolicy: 313 self.UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False) 314 ial_params = cluster.get("default_iallocator_params", None) 315 316 if not ial_params: 317 cluster["default_iallocator_params"] = {} 318 319 if not "candidate_certs" in cluster: 320 cluster["candidate_certs"] = {} 321 322 cluster["instance_communication_network"] = \ 323 cluster.get("instance_communication_network", "") 324 325 cluster["install_image"] = \ 326 cluster.get("install_image", "") 327 328 cluster["zeroing_image"] = \ 329 cluster.get("zeroing_image", "") 330 331 cluster["compression_tools"] = \ 332 cluster.get("compression_tools", constants.IEC_DEFAULT_TOOLS) 333 334 if "enabled_user_shutdown" not in cluster: 335 cluster["enabled_user_shutdown"] = False 336 337 cluster["data_collectors"] = cluster.get("data_collectors", {}) 338 for name in constants.DATA_COLLECTOR_NAMES: 339 cluster["data_collectors"][name] = \ 340 cluster["data_collectors"].get( 341 name, dict(active=True, 342 interval=constants.MOND_TIME_INTERVAL * 1e6)) 343 if "diagnose_data_collector_filename" not in cluster: 344 cluster["diagnose_data_collector_filename"] = "" 345 346 # These parameters are set to pre-2.16 default values, which 347 # differ from post-2.16 default values 348 if "ssh_key_type" not in cluster: 349 cluster["ssh_key_type"] = constants.SSHK_DSA 350 351 if "ssh_key_bits" not in cluster: 352 cluster["ssh_key_bits"] = 1024
353 354 @OrFail("Upgrading groups")
355 - def UpgradeGroups(self):
356 cl_ipolicy = self.config_data["cluster"].get("ipolicy") 357 for group in self.config_data["nodegroups"].values(): 358 networks = group.get("networks", None) 359 if not networks: 360 group["networks"] = {} 361 ipolicy = group.get("ipolicy", None) 362 if ipolicy: 363 if cl_ipolicy is None: 364 raise Error("A group defines an instance policy but there is no" 365 " instance policy at cluster level") 366 self.UpgradeIPolicy(ipolicy, cl_ipolicy, True)
367
368 - def GetExclusiveStorageValue(self):
369 """Return a conservative value of the exclusive_storage flag. 370 371 Return C{True} if the cluster or at least a nodegroup have the flag set. 372 373 """ 374 ret = False 375 cluster = self.config_data["cluster"] 376 ndparams = cluster.get("ndparams") 377 if ndparams is not None and ndparams.get("exclusive_storage"): 378 ret = True 379 for group in self.config_data["nodegroups"].values(): 380 ndparams = group.get("ndparams") 381 if ndparams is not None and ndparams.get("exclusive_storage"): 382 ret = True 383 return ret
384
385 - def RemovePhysicalId(self, disk):
386 if "children" in disk: 387 for d in disk["children"]: 388 self.RemovePhysicalId(d) 389 if "physical_id" in disk: 390 del disk["physical_id"]
391
392 - def ChangeDiskDevType(self, disk, dev_type_map):
393 """Replaces disk's dev_type attributes according to the given map. 394 395 This can be used for both, up or downgrading the disks. 396 """ 397 if disk["dev_type"] in dev_type_map: 398 disk["dev_type"] = dev_type_map[disk["dev_type"]] 399 if "children" in disk: 400 for child in disk["children"]: 401 self.ChangeDiskDevType(child, dev_type_map)
402
403 - def UpgradeDiskDevType(self, disk):
404 """Upgrades the disks' device type.""" 405 self.ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)
406 407 @staticmethod
408 - def _ConvertNicNameToUuid(iobj, network2uuid):
409 for nic in iobj["nics"]: 410 name = nic.get("network", None) 411 if name: 412 uuid = network2uuid.get(name, None) 413 if uuid: 414 print("NIC with network name %s found." 415 " Substituting with uuid %s." % (name, uuid)) 416 nic["network"] = uuid
417 418 @classmethod
419 - def AssignUuid(cls, disk):
420 if not "uuid" in disk: 421 disk["uuid"] = utils.io.NewUUID() 422 if "children" in disk: 423 for d in disk["children"]: 424 cls.AssignUuid(d)
425
426 - def _ConvertDiskAndCheckMissingSpindles(self, iobj, instance):
427 missing_spindles = False 428 if "disks" not in iobj: 429 raise Error("Instance '%s' doesn't have a disks entry?!" % instance) 430 disks = iobj["disks"] 431 if not all(isinstance(d, str) for d in disks): 432 # Disks are not top level citizens 433 for idx, dobj in enumerate(disks): 434 self.RemovePhysicalId(dobj) 435 436 expected = "disk/%s" % idx 437 current = dobj.get("iv_name", "") 438 if current != expected: 439 logging.warning("Updating iv_name for instance %s/disk %s" 440 " from '%s' to '%s'", 441 instance, idx, current, expected) 442 dobj["iv_name"] = expected 443 444 if "dev_type" in dobj: 445 self.UpgradeDiskDevType(dobj) 446 447 if not "spindles" in dobj: 448 missing_spindles = True 449 450 self.AssignUuid(dobj) 451 return missing_spindles
452 453 @OrFail("Upgrading instance with spindles")
454 - def UpgradeInstances(self):
455 """Upgrades the instances' configuration.""" 456 457 network2uuid = dict((n["name"], n["uuid"]) 458 for n in self.config_data["networks"].values()) 459 if "instances" not in self.config_data: 460 raise Error("Can't find the 'instances' key in the configuration!") 461 462 missing_spindles = False 463 for instance, iobj in self.config_data["instances"].items(): 464 self._ConvertNicNameToUuid(iobj, network2uuid) 465 if self._ConvertDiskAndCheckMissingSpindles(iobj, instance): 466 missing_spindles = True 467 if "admin_state_source" not in iobj: 468 iobj["admin_state_source"] = constants.ADMIN_SOURCE 469 470 if self.GetExclusiveStorageValue() and missing_spindles: 471 # We cannot be sure that the instances that are missing spindles have 472 # exclusive storage enabled (the check would be more complicated), so we 473 # give a noncommittal message 474 logging.warning("Some instance disks could be needing to update the" 475 " spindles parameter; you can check by running" 476 " 'gnt-cluster verify', and fix any problem with" 477 " 'gnt-cluster repair-disk-sizes'")
478
479 - def UpgradeRapiUsers(self):
480 if (os.path.isfile(self.opts.RAPI_USERS_FILE_PRE24) and 481 not os.path.islink(self.opts.RAPI_USERS_FILE_PRE24)): 482 if os.path.exists(self.opts.RAPI_USERS_FILE): 483 raise Error("Found pre-2.4 RAPI users file at %s, but another file" 484 " already exists at %s" % 485 (self.opts.RAPI_USERS_FILE_PRE24, 486 self.opts.RAPI_USERS_FILE)) 487 logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s", 488 self.opts.RAPI_USERS_FILE_PRE24, self.opts.RAPI_USERS_FILE) 489 if not self.opts.dry_run: 490 utils.RenameFile(self.opts.RAPI_USERS_FILE_PRE24, 491 self.opts.RAPI_USERS_FILE, mkdir=True, mkdir_mode=0750) 492 493 # Create a symlink for RAPI users file 494 if (not (os.path.islink(self.opts.RAPI_USERS_FILE_PRE24) or 495 os.path.isfile(self.opts.RAPI_USERS_FILE_PRE24)) and 496 os.path.isfile(self.opts.RAPI_USERS_FILE)): 497 logging.info("Creating symlink from %s to %s", 498 self.opts.RAPI_USERS_FILE_PRE24, self.opts.RAPI_USERS_FILE) 499 if not self.opts.dry_run: 500 os.symlink(self.opts.RAPI_USERS_FILE, self.opts.RAPI_USERS_FILE_PRE24)
501
502 - def UpgradeWatcher(self):
503 # Remove old watcher state file if it exists 504 if os.path.exists(self.opts.WATCHER_STATEFILE): 505 logging.info("Removing watcher state file %s", 506 self.opts.WATCHER_STATEFILE) 507 if not self.opts.dry_run: 508 utils.RemoveFile(self.opts.WATCHER_STATEFILE)
509 510 @OrFail("Upgrading file storage paths")
511 - def UpgradeFileStoragePaths(self):
512 # Write file storage paths 513 if not os.path.exists(self.opts.FILE_STORAGE_PATHS_FILE): 514 cluster = self.config_data["cluster"] 515 file_storage_dir = cluster.get("file_storage_dir") 516 shared_file_storage_dir = cluster.get("shared_file_storage_dir") 517 del cluster 518 519 logging.info("Ganeti 2.7 and later only allow whitelisted directories" 520 " for file storage; writing existing configuration values" 521 " into '%s'", 522 self.opts.FILE_STORAGE_PATHS_FILE) 523 524 if file_storage_dir: 525 logging.info("File storage directory: %s", file_storage_dir) 526 if shared_file_storage_dir: 527 logging.info("Shared file storage directory: %s", 528 shared_file_storage_dir) 529 530 buf = StringIO() 531 buf.write("# List automatically generated from configuration by\n") 532 buf.write("# cfgupgrade at %s\n" % time.asctime()) 533 if file_storage_dir: 534 buf.write("%s\n" % file_storage_dir) 535 if shared_file_storage_dir: 536 buf.write("%s\n" % shared_file_storage_dir) 537 utils.WriteFile(file_name=self.opts.FILE_STORAGE_PATHS_FILE, 538 data=buf.getvalue(), 539 mode=0600, 540 dry_run=self.opts.dry_run, 541 backup=True)
542 543 @staticmethod
544 - def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
545 if old_key not in nodes_by_old_key: 546 logging.warning("Can't find node '%s' in configuration, " 547 "assuming that it's already up-to-date", old_key) 548 return old_key 549 return nodes_by_old_key[old_key][new_key_field]
550
551 - def ChangeNodeIndices(self, config_data, old_key_field, new_key_field):
552 def ChangeDiskNodeIndices(disk): 553 # Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be 554 # considered when up/downgrading from/to any versions touching 2.9 on the 555 # way. 556 drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD 557 if disk["dev_type"] in drbd_disk_types: 558 for i in range(0, 2): 559 disk["logical_id"][i] = self.GetNewNodeIndex(nodes_by_old_key, 560 disk["logical_id"][i], 561 new_key_field) 562 if "children" in disk: 563 for child in disk["children"]: 564 ChangeDiskNodeIndices(child)
565 566 nodes_by_old_key = {} 567 nodes_by_new_key = {} 568 for (_, node) in config_data["nodes"].items(): 569 nodes_by_old_key[node[old_key_field]] = node 570 nodes_by_new_key[node[new_key_field]] = node 571 572 config_data["nodes"] = nodes_by_new_key 573 574 cluster = config_data["cluster"] 575 cluster["master_node"] = self.GetNewNodeIndex(nodes_by_old_key, 576 cluster["master_node"], 577 new_key_field) 578 579 for inst in config_data["instances"].values(): 580 inst["primary_node"] = self.GetNewNodeIndex(nodes_by_old_key, 581 inst["primary_node"], 582 new_key_field) 583 584 for disk in config_data["disks"].values(): 585 ChangeDiskNodeIndices(disk)
586 587 @staticmethod
588 - def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
589 insts_by_old_key = {} 590 insts_by_new_key = {} 591 for (_, inst) in config_data["instances"].items(): 592 insts_by_old_key[inst[old_key_field]] = inst 593 insts_by_new_key[inst[new_key_field]] = inst 594 595 config_data["instances"] = insts_by_new_key
596 597 @OrFail("Changing node indices")
598 - def UpgradeNodeIndices(self):
599 self.ChangeNodeIndices(self.config_data, "name", "uuid")
600 601 @OrFail("Changing instance indices")
602 - def UpgradeInstanceIndices(self):
603 self.ChangeInstanceIndices(self.config_data, "name", "uuid")
604 605 @OrFail("Adding filters")
606 - def UpgradeFilters(self):
607 # pylint can't infer config_data type 608 # pylint: disable=E1103 609 filters = self.config_data.get("filters", None) 610 if not filters: 611 self.config_data["filters"] = {}
612 613 @OrFail("Set top level disks")
614 - def UpgradeTopLevelDisks(self):
615 """Upgrades the disks as config top level citizens.""" 616 if "instances" not in self.config_data: 617 raise Error("Can't find the 'instances' key in the configuration!") 618 619 if "disks" in self.config_data: 620 # Disks are already top level citizens 621 return 622 623 self.config_data["disks"] = dict() 624 for iobj in self.config_data["instances"].values(): 625 disk_uuids = [] 626 for disk in iobj["disks"]: 627 duuid = disk["uuid"] 628 disk["serial_no"] = 1 629 # Instances may not have the ctime value, and the Haskell serialization 630 # will have set it to zero. 631 disk["ctime"] = disk["mtime"] = iobj.get("ctime", 0) 632 self.config_data["disks"][duuid] = disk 633 disk_uuids.append(duuid) 634 iobj["disks"] = disk_uuids
635 636 @OrFail("Removing disk template")
637 - def UpgradeDiskTemplate(self):
638 if "instances" not in self.config_data: 639 raise Error("Can't find the 'instances' dictionary in the configuration.") 640 instances = self.config_data["instances"] 641 for inst in instances.values(): 642 if "disk_template" in inst: 643 del inst["disk_template"]
644 645 # The following function is based on a method of class Disk with the same 646 # name, but adjusted to work with dicts and sets.
647 - def _ComputeAllNodes(self, disk):
648 """Recursively compute nodes given a top device.""" 649 nodes = set() 650 if disk["dev_type"] in constants.DTS_DRBD: 651 nodes = set(disk["logical_id"][:2]) 652 for child in disk.get("children", []): 653 nodes |= self._ComputeAllNodes(child) 654 return nodes
655
656 - def _RecursiveUpdateNodes(self, disk, nodes):
657 disk["nodes"] = nodes 658 for child in disk.get("children", []): 659 self._RecursiveUpdateNodes(child, nodes)
660 661 @OrFail("Upgrading disk nodes")
662 - def UpgradeDiskNodes(self):
663 """Specify the nodes from which a disk is accessible in its definition. 664 665 For every disk that is attached to an instance, get the UUIDs of the nodes 666 that it's accessible from. There are three main cases: 667 1) Internally mirrored disks (DRBD): 668 These disks are accessible from two nodes, so the nodes list will include 669 these. Their children (data, meta) are also accessible from two nodes, 670 therefore they will inherit the nodes of the parent. 671 2) Externally mirrored disks (Blockdev, Ext, Gluster, RBD, Shared File): 672 These disks should be accessible from any node in the cluster, therefore the 673 nodes list will be empty. 674 3) Single-node disks (Plain, File): 675 These disks are accessible from one node only, therefore the nodes list will 676 consist only of the primary instance node. 677 """ 678 disks = self.config_data["disks"] 679 for instance in self.config_data["instances"].itervalues(): 680 # Get all disk nodes for an instance 681 instance_node = set([instance["primary_node"]]) 682 disk_nodes = set() 683 for disk_uuid in instance["disks"]: 684 disk_nodes |= self._ComputeAllNodes(disks[disk_uuid]) 685 all_nodes = list(instance_node | disk_nodes) 686 687 # Populate the `nodes` list field of each disk. 688 for disk_uuid in instance["disks"]: 689 disk = disks[disk_uuid] 690 if "nodes" in disk: 691 # The "nodes" field has already been added for this disk. 692 continue 693 694 if disk["dev_type"] in constants.DTS_INT_MIRROR: 695 self._RecursiveUpdateNodes(disk, all_nodes) 696 elif disk["dev_type"] in (constants.DT_PLAIN, constants.DT_FILE): 697 disk["nodes"] = all_nodes 698 else: 699 disk["nodes"] = []
700 701 @OrFail("Upgrading maintenance data")
702 - def UpgradeMaintenance(self):
703 # pylint can't infer config_data type 704 # pylint: disable=E1103 705 maintenance = self.config_data.get("maintenance", None) 706 if maintenance is None: 707 self.config_data["maintenance"] = {}
708
709 - def UpgradeAll(self):
710 self.config_data["version"] = version.BuildVersion(TARGET_MAJOR, 711 TARGET_MINOR, 0) 712 self.UpgradeRapiUsers() 713 self.UpgradeWatcher() 714 steps = [self.UpgradeFileStoragePaths, 715 self.UpgradeNetworks, 716 self.UpgradeCluster, 717 self.UpgradeGroups, 718 self.UpgradeInstances, 719 self.UpgradeTopLevelDisks, 720 self.UpgradeNodeIndices, 721 self.UpgradeInstanceIndices, 722 self.UpgradeFilters, 723 self.UpgradeDiskNodes, 724 self.UpgradeDiskTemplate, 725 self.UpgradeMaintenance] 726 for s in steps: 727 s() 728 return not self.errors
729 730 # DOWNGRADE ------------------------------------------------------------ 731
732 - def DowngradeAll(self):
733 self.config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR, 734 DOWNGRADE_MINOR, 0) 735 736 return not self.errors
737
738 - def _ComposePaths(self):
739 # We need to keep filenames locally because they might be renamed between 740 # versions. 741 self.opts.data_dir = os.path.abspath(self.opts.data_dir) 742 self.opts.CONFIG_DATA_PATH = self.opts.data_dir + "/config.data" 743 self.opts.SERVER_PEM_PATH = self.opts.data_dir + "/server.pem" 744 self.opts.CLIENT_PEM_PATH = self.opts.data_dir + "/client.pem" 745 self.opts.KNOWN_HOSTS_PATH = self.opts.data_dir + "/known_hosts" 746 self.opts.RAPI_CERT_FILE = self.opts.data_dir + "/rapi.pem" 747 self.opts.SPICE_CERT_FILE = self.opts.data_dir + "/spice.pem" 748 self.opts.SPICE_CACERT_FILE = self.opts.data_dir + "/spice-ca.pem" 749 self.opts.RAPI_USERS_FILE = self.opts.data_dir + "/rapi/users" 750 self.opts.RAPI_USERS_FILE_PRE24 = self.opts.data_dir + "/rapi_users" 751 self.opts.CONFD_HMAC_KEY = self.opts.data_dir + "/hmac.key" 752 self.opts.CDS_FILE = self.opts.data_dir + "/cluster-domain-secret" 753 self.opts.SSCONF_MASTER_NODE = self.opts.data_dir + "/ssconf_master_node" 754 self.opts.WATCHER_STATEFILE = self.opts.data_dir + "/watcher.data" 755 self.opts.FILE_STORAGE_PATHS_FILE = (self.opts.conf_dir + 756 "/file-storage-paths")
757
758 - def _AskUser(self):
759 if not self.opts.force: 760 if self.opts.downgrade: 761 usertext = ("The configuration is going to be DOWNGRADED " 762 "to version %s.%s. Some configuration data might be " 763 " removed if they don't fit" 764 " in the old format. Please make sure you have read the" 765 " upgrade notes (available in the UPGRADE file and included" 766 " in other documentation formats) to understand what they" 767 " are. Continue with *DOWNGRADING* the configuration?" % 768 (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)) 769 else: 770 usertext = ("Please make sure you have read the upgrade notes for" 771 " Ganeti %s (available in the UPGRADE file and included" 772 " in other documentation formats). Continue with upgrading" 773 " configuration?" % constants.RELEASE_VERSION) 774 if not cli.AskUser(usertext): 775 sys.exit(constants.EXIT_FAILURE)
776
777 - def _Downgrade(self, config_major, config_minor, config_version, 778 config_revision):
779 if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or 780 (config_major == DOWNGRADE_MAJOR and 781 config_minor == DOWNGRADE_MINOR)): 782 raise Error("Downgrade supported only from the latest version (%s.%s)," 783 " found %s (%s.%s.%s) instead" % 784 (TARGET_MAJOR, TARGET_MINOR, config_version, config_major, 785 config_minor, config_revision)) 786 if not self.DowngradeAll(): 787 raise Error("Downgrade failed:\n%s" % "\n".join(self.errors))
788
789 - def _TestLoadingConfigFile(self):
790 # test loading the config file 791 all_ok = True 792 if not (self.opts.dry_run or self.opts.no_verify): 793 logging.info("Testing the new config file...") 794 cfg = config.ConfigWriter(cfg_file=self.opts.CONFIG_DATA_PATH, 795 accept_foreign=self.opts.ignore_hostname, 796 offline=True) 797 # if we reached this, it's all fine 798 vrfy = cfg.VerifyConfig() 799 if vrfy: 800 logging.error("Errors after conversion:") 801 for item in vrfy: 802 logging.error(" - %s", item) 803 all_ok = False 804 else: 805 logging.info("File loaded successfully after upgrading") 806 del cfg 807 808 if self.opts.downgrade: 809 action = "downgraded" 810 out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR) 811 else: 812 action = "upgraded" 813 out_ver = constants.RELEASE_VERSION 814 if all_ok: 815 cli.ToStderr("Configuration successfully %s to version %s.", 816 action, out_ver) 817 else: 818 cli.ToStderr("Configuration %s to version %s, but there are errors." 819 "\nPlease review the file.", action, out_ver)
820