Package ganeti :: Package tools :: Module cfgupgrade
[hide private]
[frames] | no frames]

Source Code for Module ganeti.tools.cfgupgrade

  1  # 
  2  # 
  3   
  4  # Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
  5  # All rights reserved. 
  6  # 
  7  # Redistribution and use in source and binary forms, with or without 
  8  # modification, are permitted provided that the following conditions are 
  9  # met: 
 10  # 
 11  # 1. Redistributions of source code must retain the above copyright notice, 
 12  # this list of conditions and the following disclaimer. 
 13  # 
 14  # 2. Redistributions in binary form must reproduce the above copyright 
 15  # notice, this list of conditions and the following disclaimer in the 
 16  # documentation and/or other materials provided with the distribution. 
 17  # 
 18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
 19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
 20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
 21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
 22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
 23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
 24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
 25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
 26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
 27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
 28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 29   
 30  """Library of the tools/cfgupgrade utility. 
 31   
 32  This code handles only the types supported by simplejson. As an 
 33  example, 'set' is a 'list'. 
 34   
 35  """ 
 36   
 37  import copy 
 38  import os 
 39  import os.path 
 40  import sys 
 41  import logging 
 42  import optparse 
 43  import time 
 44  import functools 
 45  from cStringIO import StringIO 
 46   
 47  from ganeti import cli 
 48  from ganeti import constants 
 49  from ganeti import serializer 
 50  from ganeti import utils 
 51  from ganeti import bootstrap 
 52  from ganeti import config 
 53  from ganeti import pathutils 
 54  from ganeti import netutils 
 55   
 56  from ganeti.utils import version 
 57   
 58   
 59  #: Target major version we will upgrade to 
 60  TARGET_MAJOR = 2 
 61  #: Target minor version we will upgrade to 
 62  TARGET_MINOR = 14 
 63  #: Target major version for downgrade 
 64  DOWNGRADE_MAJOR = 2 
 65  #: Target minor version for downgrade 
 66  DOWNGRADE_MINOR = 13 
 67   
 68  # map of legacy device types 
 69  # (mapping differing old LD_* constants to new DT_* constants) 
 70  DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8} 
 71  # (mapping differing new DT_* constants to old LD_* constants) 
 72  DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items()) 
73 74 75 -class Error(Exception):
76 """Generic exception""" 77 pass
78
79 80 -def ParseOptions(args=None):
81 parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]") 82 parser.add_option("--dry-run", dest="dry_run", 83 action="store_true", 84 help="Try to do the conversion, but don't write" 85 " output file") 86 parser.add_option(cli.FORCE_OPT) 87 parser.add_option(cli.DEBUG_OPT) 88 parser.add_option(cli.VERBOSE_OPT) 89 parser.add_option("--ignore-hostname", dest="ignore_hostname", 90 action="store_true", default=False, 91 help="Don't abort if hostname doesn't match") 92 parser.add_option("--path", help="Convert configuration in this" 93 " directory instead of '%s'" % pathutils.DATA_DIR, 94 default=pathutils.DATA_DIR, dest="data_dir") 95 parser.add_option("--confdir", 96 help=("Use this directory instead of '%s'" % 97 pathutils.CONF_DIR), 98 default=pathutils.CONF_DIR, dest="conf_dir") 99 parser.add_option("--no-verify", 100 help="Do not verify configuration after upgrade", 101 action="store_true", dest="no_verify", default=False) 102 parser.add_option("--downgrade", 103 help="Downgrade to the previous stable version", 104 action="store_true", dest="downgrade", default=False) 105 return parser.parse_args(args=args)
106
107 108 -def OrFail(description=None):
109 """Make failure non-fatal and improve reporting.""" 110 def wrapper(f): 111 @functools.wraps(f) 112 def wrapped(self): 113 safety = copy.deepcopy(self.config_data) 114 try: 115 f(self) 116 except BaseException, e: 117 msg = "%s failed:\n%s" % (description or f.func_name, e) 118 logging.exception(msg) 119 self.config_data = safety 120 self.errors.append(msg)
121 return wrapped 122 return wrapper 123
124 125 -class CfgUpgrade(object):
126 - def __init__(self, opts, args):
127 self.opts = opts 128 self.args = args 129 self.errors = []
130
131 - def Run(self):
132 """Main program. 133 134 """ 135 self._ComposePaths() 136 137 self.SetupLogging() 138 139 # Option checking 140 if self.args: 141 raise Error("No arguments expected") 142 if self.opts.downgrade and not self.opts.no_verify: 143 self.opts.no_verify = True 144 145 # Check master name 146 if not (self.CheckHostname(self.opts.SSCONF_MASTER_NODE) or 147 self.opts.ignore_hostname): 148 logging.error("Aborting due to hostname mismatch") 149 sys.exit(constants.EXIT_FAILURE) 150 151 self._AskUser() 152 153 # Check whether it's a Ganeti configuration directory 154 if not (os.path.isfile(self.opts.CONFIG_DATA_PATH) and 155 os.path.isfile(self.opts.SERVER_PEM_PATH) and 156 os.path.isfile(self.opts.KNOWN_HOSTS_PATH)): 157 raise Error(("%s does not seem to be a Ganeti configuration" 158 " directory") % self.opts.data_dir) 159 160 if not os.path.isdir(self.opts.conf_dir): 161 raise Error("Not a directory: %s" % self.opts.conf_dir) 162 163 self.config_data = serializer.LoadJson(utils.ReadFile( 164 self.opts.CONFIG_DATA_PATH)) 165 166 try: 167 config_version = self.config_data["version"] 168 except KeyError: 169 raise Error("Unable to determine configuration version") 170 171 (config_major, config_minor, config_revision) = \ 172 version.SplitVersion(config_version) 173 174 logging.info("Found configuration version %s (%d.%d.%d)", 175 config_version, config_major, config_minor, config_revision) 176 177 if "config_version" in self.config_data["cluster"]: 178 raise Error("Inconsistent configuration: found config_version in" 179 " configuration file") 180 181 # Downgrade to the previous stable version 182 if self.opts.downgrade: 183 self._Downgrade(config_major, config_minor, config_version, 184 config_revision) 185 186 # Upgrade from 2.{0..13} to 2.14 187 elif config_major == 2 and config_minor in range(0, 14): 188 if config_revision != 0: 189 logging.warning("Config revision is %s, not 0", config_revision) 190 if not self.UpgradeAll(): 191 raise Error("Upgrade failed:\n%s" % '\n'.join(self.errors)) 192 193 elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR: 194 logging.info("No changes necessary") 195 196 else: 197 raise Error("Configuration version %d.%d.%d not supported by this tool" % 198 (config_major, config_minor, config_revision)) 199 200 try: 201 logging.info("Writing configuration file to %s", 202 self.opts.CONFIG_DATA_PATH) 203 utils.WriteFile(file_name=self.opts.CONFIG_DATA_PATH, 204 data=serializer.DumpJson(self.config_data), 205 mode=0600, 206 dry_run=self.opts.dry_run, 207 backup=True) 208 209 if not self.opts.dry_run: 210 # This creates the cluster certificate if it does not exist yet. 211 # In this case, we do not automatically create a client certificate 212 # as well, because if the cluster certificate did not exist before, 213 # no client certificate will exist on any node yet. In this case 214 # all client certificate should be renewed by 'gnt-cluster 215 # renew-crypto --new-node-certificates'. This will be enforced 216 # by a nagging warning in 'gnt-cluster verify'. 217 bootstrap.GenerateClusterCrypto( 218 False, False, False, False, False, False, None, 219 nodecert_file=self.opts.SERVER_PEM_PATH, 220 rapicert_file=self.opts.RAPI_CERT_FILE, 221 spicecert_file=self.opts.SPICE_CERT_FILE, 222 spicecacert_file=self.opts.SPICE_CACERT_FILE, 223 hmackey_file=self.opts.CONFD_HMAC_KEY, 224 cds_file=self.opts.CDS_FILE) 225 226 except Exception: 227 logging.critical("Writing configuration failed. It is probably in an" 228 " inconsistent state and needs manual intervention.") 229 raise 230 231 self._TestLoadingConfigFile()
232
233 - def SetupLogging(self):
234 """Configures the logging module. 235 236 """ 237 formatter = logging.Formatter("%(asctime)s: %(message)s") 238 239 stderr_handler = logging.StreamHandler() 240 stderr_handler.setFormatter(formatter) 241 if self.opts.debug: 242 stderr_handler.setLevel(logging.NOTSET) 243 elif self.opts.verbose: 244 stderr_handler.setLevel(logging.INFO) 245 else: 246 stderr_handler.setLevel(logging.WARNING) 247 248 root_logger = logging.getLogger("") 249 root_logger.setLevel(logging.NOTSET) 250 root_logger.addHandler(stderr_handler)
251 252 @staticmethod
253 - def CheckHostname(path):
254 """Ensures hostname matches ssconf value. 255 256 @param path: Path to ssconf file 257 258 """ 259 ssconf_master_node = utils.ReadOneLineFile(path) 260 hostname = netutils.GetHostname().name 261 262 if ssconf_master_node == hostname: 263 return True 264 265 logging.warning("Warning: ssconf says master node is '%s', but this" 266 " machine's name is '%s'; this tool must be run on" 267 " the master node", ssconf_master_node, hostname) 268 return False
269 270 @staticmethod
271 - def _FillIPolicySpecs(default_ipolicy, ipolicy):
272 if "minmax" in ipolicy: 273 for (key, spec) in ipolicy["minmax"][0].items(): 274 for (par, val) in default_ipolicy["minmax"][0][key].items(): 275 if par not in spec: 276 spec[par] = val
277
278 - def UpgradeIPolicy(self, ipolicy, default_ipolicy, isgroup):
279 minmax_keys = ["min", "max"] 280 if any((k in ipolicy) for k in minmax_keys): 281 minmax = {} 282 for key in minmax_keys: 283 if key in ipolicy: 284 if ipolicy[key]: 285 minmax[key] = ipolicy[key] 286 del ipolicy[key] 287 if minmax: 288 ipolicy["minmax"] = [minmax] 289 if isgroup and "std" in ipolicy: 290 del ipolicy["std"] 291 self._FillIPolicySpecs(default_ipolicy, ipolicy)
292 293 @OrFail("Setting networks")
294 - def UpgradeNetworks(self):
295 assert isinstance(self.config_data, dict) 296 # pylint can't infer config_data type 297 # pylint: disable=E1103 298 networks = self.config_data.get("networks", None) 299 if not networks: 300 self.config_data["networks"] = {}
301 302 @OrFail("Upgrading cluster")
303 - def UpgradeCluster(self):
304 assert isinstance(self.config_data, dict) 305 # pylint can't infer config_data type 306 # pylint: disable=E1103 307 cluster = self.config_data.get("cluster", None) 308 if cluster is None: 309 raise Error("Cannot find cluster") 310 ipolicy = cluster.setdefault("ipolicy", None) 311 if ipolicy: 312 self.UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False) 313 ial_params = cluster.get("default_iallocator_params", None) 314 if not ial_params: 315 cluster["default_iallocator_params"] = {} 316 if not "candidate_certs" in cluster: 317 cluster["candidate_certs"] = {} 318 cluster["instance_communication_network"] = \ 319 cluster.get("instance_communication_network", "") 320 cluster["install_image"] = \ 321 cluster.get("install_image", "") 322 cluster["zeroing_image"] = \ 323 cluster.get("zeroing_image", "") 324 cluster["compression_tools"] = \ 325 cluster.get("compression_tools", constants.IEC_DEFAULT_TOOLS) 326 if "enabled_user_shutdown" not in cluster: 327 cluster["enabled_user_shutdown"] = False 328 cluster["data_collectors"] = cluster.get("data_collectors", {}) 329 for name in constants.DATA_COLLECTOR_NAMES: 330 cluster["data_collectors"][name] = \ 331 cluster["data_collectors"].get( 332 name, dict(active=True, 333 interval=constants.MOND_TIME_INTERVAL * 1e6))
334 335 @OrFail("Upgrading groups")
336 - def UpgradeGroups(self):
337 cl_ipolicy = self.config_data["cluster"].get("ipolicy") 338 for group in self.config_data["nodegroups"].values(): 339 networks = group.get("networks", None) 340 if not networks: 341 group["networks"] = {} 342 ipolicy = group.get("ipolicy", None) 343 if ipolicy: 344 if cl_ipolicy is None: 345 raise Error("A group defines an instance policy but there is no" 346 " instance policy at cluster level") 347 self.UpgradeIPolicy(ipolicy, cl_ipolicy, True)
348
349 - def GetExclusiveStorageValue(self):
350 """Return a conservative value of the exclusive_storage flag. 351 352 Return C{True} if the cluster or at least a nodegroup have the flag set. 353 354 """ 355 ret = False 356 cluster = self.config_data["cluster"] 357 ndparams = cluster.get("ndparams") 358 if ndparams is not None and ndparams.get("exclusive_storage"): 359 ret = True 360 for group in self.config_data["nodegroups"].values(): 361 ndparams = group.get("ndparams") 362 if ndparams is not None and ndparams.get("exclusive_storage"): 363 ret = True 364 return ret
365
366 - def RemovePhysicalId(self, disk):
367 if "children" in disk: 368 for d in disk["children"]: 369 self.RemovePhysicalId(d) 370 if "physical_id" in disk: 371 del disk["physical_id"]
372
373 - def ChangeDiskDevType(self, disk, dev_type_map):
374 """Replaces disk's dev_type attributes according to the given map. 375 376 This can be used for both, up or downgrading the disks. 377 """ 378 if disk["dev_type"] in dev_type_map: 379 disk["dev_type"] = dev_type_map[disk["dev_type"]] 380 if "children" in disk: 381 for child in disk["children"]: 382 self.ChangeDiskDevType(child, dev_type_map)
383
384 - def UpgradeDiskDevType(self, disk):
385 """Upgrades the disks' device type.""" 386 self.ChangeDiskDevType(disk, DEV_TYPE_OLD_NEW)
387 388 @staticmethod
389 - def _ConvertNicNameToUuid(iobj, network2uuid):
390 for nic in iobj["nics"]: 391 name = nic.get("network", None) 392 if name: 393 uuid = network2uuid.get(name, None) 394 if uuid: 395 print("NIC with network name %s found." 396 " Substituting with uuid %s." % (name, uuid)) 397 nic["network"] = uuid
398 399 @classmethod
400 - def AssignUuid(cls, disk):
401 if not "uuid" in disk: 402 disk["uuid"] = utils.io.NewUUID() 403 if "children" in disk: 404 for d in disk["children"]: 405 cls.AssignUuid(d)
406
407 - def _ConvertDiskAndCheckMissingSpindles(self, iobj, instance):
408 missing_spindles = False 409 if "disks" not in iobj: 410 raise Error("Instance '%s' doesn't have a disks entry?!" % instance) 411 disks = iobj["disks"] 412 if not all(isinstance(d, str) for d in disks): 413 # Disks are not top level citizens 414 for idx, dobj in enumerate(disks): 415 self.RemovePhysicalId(dobj) 416 417 expected = "disk/%s" % idx 418 current = dobj.get("iv_name", "") 419 if current != expected: 420 logging.warning("Updating iv_name for instance %s/disk %s" 421 " from '%s' to '%s'", 422 instance, idx, current, expected) 423 dobj["iv_name"] = expected 424 425 if "dev_type" in dobj: 426 self.UpgradeDiskDevType(dobj) 427 428 if not "spindles" in dobj: 429 missing_spindles = True 430 431 self.AssignUuid(dobj) 432 return missing_spindles
433 434 @OrFail("Upgrading instance with spindles")
435 - def UpgradeInstances(self):
436 """Upgrades the instances' configuration.""" 437 438 network2uuid = dict((n["name"], n["uuid"]) 439 for n in self.config_data["networks"].values()) 440 if "instances" not in self.config_data: 441 raise Error("Can't find the 'instances' key in the configuration!") 442 443 missing_spindles = False 444 for instance, iobj in self.config_data["instances"].items(): 445 self._ConvertNicNameToUuid(iobj, network2uuid) 446 if self._ConvertDiskAndCheckMissingSpindles(iobj, instance): 447 missing_spindles = True 448 if "admin_state_source" not in iobj: 449 iobj["admin_state_source"] = constants.ADMIN_SOURCE 450 451 if self.GetExclusiveStorageValue() and missing_spindles: 452 # We cannot be sure that the instances that are missing spindles have 453 # exclusive storage enabled (the check would be more complicated), so we 454 # give a noncommittal message 455 logging.warning("Some instance disks could be needing to update the" 456 " spindles parameter; you can check by running" 457 " 'gnt-cluster verify', and fix any problem with" 458 " 'gnt-cluster repair-disk-sizes'")
459
460 - def UpgradeRapiUsers(self):
461 if (os.path.isfile(self.opts.RAPI_USERS_FILE_PRE24) and 462 not os.path.islink(self.opts.RAPI_USERS_FILE_PRE24)): 463 if os.path.exists(self.opts.RAPI_USERS_FILE): 464 raise Error("Found pre-2.4 RAPI users file at %s, but another file" 465 " already exists at %s" % 466 (self.opts.RAPI_USERS_FILE_PRE24, 467 self.opts.RAPI_USERS_FILE)) 468 logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s", 469 self.opts.RAPI_USERS_FILE_PRE24, self.opts.RAPI_USERS_FILE) 470 if not self.opts.dry_run: 471 utils.RenameFile(self.opts.RAPI_USERS_FILE_PRE24, 472 self.opts.RAPI_USERS_FILE, mkdir=True, mkdir_mode=0750) 473 474 # Create a symlink for RAPI users file 475 if (not (os.path.islink(self.opts.RAPI_USERS_FILE_PRE24) or 476 os.path.isfile(self.opts.RAPI_USERS_FILE_PRE24)) and 477 os.path.isfile(self.opts.RAPI_USERS_FILE)): 478 logging.info("Creating symlink from %s to %s", 479 self.opts.RAPI_USERS_FILE_PRE24, self.opts.RAPI_USERS_FILE) 480 if not self.opts.dry_run: 481 os.symlink(self.opts.RAPI_USERS_FILE, self.opts.RAPI_USERS_FILE_PRE24)
482
483 - def UpgradeWatcher(self):
484 # Remove old watcher state file if it exists 485 if os.path.exists(self.opts.WATCHER_STATEFILE): 486 logging.info("Removing watcher state file %s", 487 self.opts.WATCHER_STATEFILE) 488 if not self.opts.dry_run: 489 utils.RemoveFile(self.opts.WATCHER_STATEFILE)
490 491 @OrFail("Upgrading file storage paths")
492 - def UpgradeFileStoragePaths(self):
493 # Write file storage paths 494 if not os.path.exists(self.opts.FILE_STORAGE_PATHS_FILE): 495 cluster = self.config_data["cluster"] 496 file_storage_dir = cluster.get("file_storage_dir") 497 shared_file_storage_dir = cluster.get("shared_file_storage_dir") 498 del cluster 499 500 logging.info("Ganeti 2.7 and later only allow whitelisted directories" 501 " for file storage; writing existing configuration values" 502 " into '%s'", 503 self.opts.FILE_STORAGE_PATHS_FILE) 504 505 if file_storage_dir: 506 logging.info("File storage directory: %s", file_storage_dir) 507 if shared_file_storage_dir: 508 logging.info("Shared file storage directory: %s", 509 shared_file_storage_dir) 510 511 buf = StringIO() 512 buf.write("# List automatically generated from configuration by\n") 513 buf.write("# cfgupgrade at %s\n" % time.asctime()) 514 if file_storage_dir: 515 buf.write("%s\n" % file_storage_dir) 516 if shared_file_storage_dir: 517 buf.write("%s\n" % shared_file_storage_dir) 518 utils.WriteFile(file_name=self.opts.FILE_STORAGE_PATHS_FILE, 519 data=buf.getvalue(), 520 mode=0600, 521 dry_run=self.opts.dry_run, 522 backup=True)
523 524 @staticmethod
525 - def GetNewNodeIndex(nodes_by_old_key, old_key, new_key_field):
526 if old_key not in nodes_by_old_key: 527 logging.warning("Can't find node '%s' in configuration, " 528 "assuming that it's already up-to-date", old_key) 529 return old_key 530 return nodes_by_old_key[old_key][new_key_field]
531
532 - def ChangeNodeIndices(self, config_data, old_key_field, new_key_field):
533 def ChangeDiskNodeIndices(disk): 534 # Note: 'drbd8' is a legacy device type from pre 2.9 and needs to be 535 # considered when up/downgrading from/to any versions touching 2.9 on the 536 # way. 537 drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD 538 if disk["dev_type"] in drbd_disk_types: 539 for i in range(0, 2): 540 disk["logical_id"][i] = self.GetNewNodeIndex(nodes_by_old_key, 541 disk["logical_id"][i], 542 new_key_field) 543 if "children" in disk: 544 for child in disk["children"]: 545 ChangeDiskNodeIndices(child)
546 547 nodes_by_old_key = {} 548 nodes_by_new_key = {} 549 for (_, node) in config_data["nodes"].items(): 550 nodes_by_old_key[node[old_key_field]] = node 551 nodes_by_new_key[node[new_key_field]] = node 552 553 config_data["nodes"] = nodes_by_new_key 554 555 cluster = config_data["cluster"] 556 cluster["master_node"] = self.GetNewNodeIndex(nodes_by_old_key, 557 cluster["master_node"], 558 new_key_field) 559 560 for inst in config_data["instances"].values(): 561 inst["primary_node"] = self.GetNewNodeIndex(nodes_by_old_key, 562 inst["primary_node"], 563 new_key_field) 564 565 for disk in config_data["disks"].values(): 566 ChangeDiskNodeIndices(disk)
567 568 @staticmethod
569 - def ChangeInstanceIndices(config_data, old_key_field, new_key_field):
570 insts_by_old_key = {} 571 insts_by_new_key = {} 572 for (_, inst) in config_data["instances"].items(): 573 insts_by_old_key[inst[old_key_field]] = inst 574 insts_by_new_key[inst[new_key_field]] = inst 575 576 config_data["instances"] = insts_by_new_key
577 578 @OrFail("Changing node indices")
579 - def UpgradeNodeIndices(self):
580 self.ChangeNodeIndices(self.config_data, "name", "uuid")
581 582 @OrFail("Changing instance indices")
583 - def UpgradeInstanceIndices(self):
584 self.ChangeInstanceIndices(self.config_data, "name", "uuid")
585 586 @OrFail("Adding filters")
587 - def UpgradeFilters(self):
588 # pylint can't infer config_data type 589 # pylint: disable=E1103 590 filters = self.config_data.get("filters", None) 591 if not filters: 592 self.config_data["filters"] = {}
593 594 @OrFail("Set top level disks")
595 - def UpgradeTopLevelDisks(self):
596 """Upgrades the disks as config top level citizens.""" 597 if "instances" not in self.config_data: 598 raise Error("Can't find the 'instances' key in the configuration!") 599 600 if "disks" in self.config_data: 601 # Disks are already top level citizens 602 return 603 604 self.config_data["disks"] = dict() 605 for iobj in self.config_data["instances"].values(): 606 disk_uuids = [] 607 for disk in iobj["disks"]: 608 duuid = disk["uuid"] 609 disk["serial_no"] = 1 610 # Instances may not have the ctime value, and the Haskell serialization 611 # will have set it to zero. 612 disk["ctime"] = disk["mtime"] = iobj.get("ctime", 0) 613 self.config_data["disks"][duuid] = disk 614 disk_uuids.append(duuid) 615 iobj["disks"] = disk_uuids
616 617 @OrFail("Removing disk template")
618 - def UpgradeDiskTemplate(self):
619 if "instances" not in self.config_data: 620 raise Error("Can't find the 'instances' dictionary in the configuration.") 621 instances = self.config_data["instances"] 622 for inst in instances.values(): 623 if "disk_template" in inst: 624 del inst["disk_template"]
625 626 # The following function is based on a method of class Disk with the same 627 # name, but adjusted to work with dicts and sets.
628 - def _ComputeAllNodes(self, disk):
629 """Recursively compute nodes given a top device.""" 630 nodes = set() 631 if disk["dev_type"] in constants.DTS_DRBD: 632 nodes = set(disk["logical_id"][:2]) 633 for child in disk.get("children", []): 634 nodes |= self._ComputeAllNodes(child) 635 return nodes
636
637 - def _RecursiveUpdateNodes(self, disk, nodes):
638 disk["nodes"] = nodes 639 for child in disk.get("children", []): 640 self._RecursiveUpdateNodes(child, nodes)
641 642 @OrFail("Upgrading disk nodes")
643 - def UpgradeDiskNodes(self):
644 """Specify the nodes from which a disk is accessible in its definition. 645 646 For every disk that is attached to an instance, get the UUIDs of the nodes 647 that it's accessible from. There are three main cases: 648 1) Internally mirrored disks (DRBD): 649 These disks are accessible from two nodes, so the nodes list will include 650 these. Their children (data, meta) are also accessible from two nodes, 651 therefore they will inherit the nodes of the parent. 652 2) Externally mirrored disks (Blockdev, Ext, Gluster, RBD, Shared File): 653 These disks should be accessible from any node in the cluster, therefore the 654 nodes list will be empty. 655 3) Single-node disks (Plain, File): 656 These disks are accessible from one node only, therefore the nodes list will 657 consist only of the primary instance node. 658 """ 659 disks = self.config_data["disks"] 660 for instance in self.config_data["instances"].itervalues(): 661 # Get all disk nodes for an instance 662 instance_node = set([instance["primary_node"]]) 663 disk_nodes = set() 664 for disk_uuid in instance["disks"]: 665 disk_nodes |= self._ComputeAllNodes(disks[disk_uuid]) 666 all_nodes = list(instance_node | disk_nodes) 667 668 # Populate the `nodes` list field of each disk. 669 for disk_uuid in instance["disks"]: 670 disk = disks[disk_uuid] 671 if "nodes" in disk: 672 # The "nodes" field has already been added for this disk. 673 continue 674 675 if disk["dev_type"] in constants.DTS_INT_MIRROR: 676 self._RecursiveUpdateNodes(disk, all_nodes) 677 elif disk["dev_type"] in (constants.DT_PLAIN, constants.DT_FILE): 678 disk["nodes"] = all_nodes 679 else: 680 disk["nodes"] = []
681
682 - def UpgradeAll(self):
683 self.config_data["version"] = version.BuildVersion(TARGET_MAJOR, 684 TARGET_MINOR, 0) 685 self.UpgradeRapiUsers() 686 self.UpgradeWatcher() 687 steps = [self.UpgradeFileStoragePaths, 688 self.UpgradeNetworks, 689 self.UpgradeCluster, 690 self.UpgradeGroups, 691 self.UpgradeInstances, 692 self.UpgradeTopLevelDisks, 693 self.UpgradeNodeIndices, 694 self.UpgradeInstanceIndices, 695 self.UpgradeFilters, 696 self.UpgradeDiskNodes, 697 self.UpgradeDiskTemplate] 698 for s in steps: 699 s() 700 return not self.errors
701 702 # DOWNGRADE ------------------------------------------------------------ 703
704 - def _RecursiveRemoveNodes(self, disk):
705 if "nodes" in disk: 706 del disk["nodes"] 707 for disk in disk.get("children", []): 708 self._RecursiveRemoveNodes(disk)
709 710 @OrFail("Downgrading disk nodes")
711 - def DowngradeDiskNodes(self):
712 if "disks" not in self.config_data: 713 raise Error("Can't find the 'disks' dictionary in the configuration.") 714 for disk in self.config_data["disks"].itervalues(): 715 self._RecursiveRemoveNodes(disk)
716 717 @OrFail("Removing forthcoming instances")
718 - def DowngradeForthcomingInstances(self):
719 if "instances" not in self.config_data: 720 raise Error("Can't find the 'instances' dictionary in the configuration.") 721 instances = self.config_data["instances"] 722 uuids = instances.keys() 723 for uuid in uuids: 724 if instances[uuid].get("forthcoming"): 725 del instances[uuid]
726 727 @OrFail("Removing forthcoming disks")
728 - def DowngradeForthcomingDisks(self):
729 if "instances" not in self.config_data: 730 raise Error("Can't find the 'instances' dictionary in the configuration.") 731 instances = self.config_data["instances"] 732 if "disks" not in self.config_data: 733 raise Error("Can't find the 'disks' dictionary in the configuration.") 734 disks = self.config_data["disks"] 735 uuids = disks.keys() 736 for uuid in uuids: 737 if disks[uuid].get("forthcoming"): 738 del disks[uuid] 739 for inst in instances: 740 if "disk" in inst and uuid in inst["disks"]: 741 inst["disks"].remove(uuid)
742 743 @OrFail("Re-adding disk template")
744 - def DowngradeDiskTemplate(self):
745 if "instances" not in self.config_data: 746 raise Error("Can't find the 'instances' dictionary in the configuration.") 747 instances = self.config_data["instances"] 748 if "disks" not in self.config_data: 749 raise Error("Can't find the 'disks' dictionary in the configuration.") 750 disks = self.config_data["disks"] 751 for inst in instances.values(): 752 instance_disks = [disks.get(uuid) for uuid in inst["disks"]] 753 if any(d is None for d in instance_disks): 754 raise Error("Can't find all disks of instance %s in the configuration." 755 % inst.name) 756 dev_types = set(d["dev_type"] for d in instance_disks) 757 if len(dev_types) > 1: 758 raise Error("Instance %s has mixed disk types: %s" % 759 (inst.name, ', '.join(dev_types))) 760 elif len(dev_types) < 1: 761 inst["disk_template"] = constants.DT_DISKLESS 762 else: 763 inst["disk_template"] = dev_types.pop()
764
765 - def DowngradeAll(self):
766 self.config_data["version"] = version.BuildVersion(DOWNGRADE_MAJOR, 767 DOWNGRADE_MINOR, 0) 768 steps = [self.DowngradeForthcomingInstances, 769 self.DowngradeForthcomingDisks, 770 self.DowngradeDiskNodes, 771 self.DowngradeDiskTemplate] 772 for s in steps: 773 s() 774 return not self.errors
775
776 - def _ComposePaths(self):
777 # We need to keep filenames locally because they might be renamed between 778 # versions. 779 self.opts.data_dir = os.path.abspath(self.opts.data_dir) 780 self.opts.CONFIG_DATA_PATH = self.opts.data_dir + "/config.data" 781 self.opts.SERVER_PEM_PATH = self.opts.data_dir + "/server.pem" 782 self.opts.CLIENT_PEM_PATH = self.opts.data_dir + "/client.pem" 783 self.opts.KNOWN_HOSTS_PATH = self.opts.data_dir + "/known_hosts" 784 self.opts.RAPI_CERT_FILE = self.opts.data_dir + "/rapi.pem" 785 self.opts.SPICE_CERT_FILE = self.opts.data_dir + "/spice.pem" 786 self.opts.SPICE_CACERT_FILE = self.opts.data_dir + "/spice-ca.pem" 787 self.opts.RAPI_USERS_FILE = self.opts.data_dir + "/rapi/users" 788 self.opts.RAPI_USERS_FILE_PRE24 = self.opts.data_dir + "/rapi_users" 789 self.opts.CONFD_HMAC_KEY = self.opts.data_dir + "/hmac.key" 790 self.opts.CDS_FILE = self.opts.data_dir + "/cluster-domain-secret" 791 self.opts.SSCONF_MASTER_NODE = self.opts.data_dir + "/ssconf_master_node" 792 self.opts.WATCHER_STATEFILE = self.opts.data_dir + "/watcher.data" 793 self.opts.FILE_STORAGE_PATHS_FILE = (self.opts.conf_dir + 794 "/file-storage-paths")
795
796 - def _AskUser(self):
797 if not self.opts.force: 798 if self.opts.downgrade: 799 usertext = ("The configuration is going to be DOWNGRADED " 800 "to version %s.%s. Some configuration data might be " 801 " removed if they don't fit" 802 " in the old format. Please make sure you have read the" 803 " upgrade notes (available in the UPGRADE file and included" 804 " in other documentation formats) to understand what they" 805 " are. Continue with *DOWNGRADING* the configuration?" % 806 (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)) 807 else: 808 usertext = ("Please make sure you have read the upgrade notes for" 809 " Ganeti %s (available in the UPGRADE file and included" 810 " in other documentation formats). Continue with upgrading" 811 " configuration?" % constants.RELEASE_VERSION) 812 if not cli.AskUser(usertext): 813 sys.exit(constants.EXIT_FAILURE)
814
815 - def _Downgrade(self, config_major, config_minor, config_version, 816 config_revision):
817 if not ((config_major == TARGET_MAJOR and config_minor == TARGET_MINOR) or 818 (config_major == DOWNGRADE_MAJOR and 819 config_minor == DOWNGRADE_MINOR)): 820 raise Error("Downgrade supported only from the latest version (%s.%s)," 821 " found %s (%s.%s.%s) instead" % 822 (TARGET_MAJOR, TARGET_MINOR, config_version, config_major, 823 config_minor, config_revision)) 824 if not self.DowngradeAll(): 825 raise Error("Downgrade failed:\n%s" % "\n".join(self.errors))
826
827 - def _TestLoadingConfigFile(self):
828 # test loading the config file 829 all_ok = True 830 if not (self.opts.dry_run or self.opts.no_verify): 831 logging.info("Testing the new config file...") 832 cfg = config.ConfigWriter(cfg_file=self.opts.CONFIG_DATA_PATH, 833 accept_foreign=self.opts.ignore_hostname, 834 offline=True) 835 # if we reached this, it's all fine 836 vrfy = cfg.VerifyConfig() 837 if vrfy: 838 logging.error("Errors after conversion:") 839 for item in vrfy: 840 logging.error(" - %s", item) 841 all_ok = False 842 else: 843 logging.info("File loaded successfully after upgrading") 844 del cfg 845 846 if self.opts.downgrade: 847 action = "downgraded" 848 out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR) 849 else: 850 action = "upgraded" 851 out_ver = constants.RELEASE_VERSION 852 if all_ok: 853 cli.ToStderr("Configuration successfully %s to version %s.", 854 action, out_ver) 855 else: 856 cli.ToStderr("Configuration %s to version %s, but there are errors." 857 "\nPlease review the file.", action, out_ver)
858