1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30 """Library of the tools/cfgupgrade utility.
31
32 This code handles only the types supported by simplejson. As an
33 example, 'set' is a 'list'.
34
35 """
36
37 import copy
38 import os
39 import os.path
40 import sys
41 import logging
42 import optparse
43 import time
44 import functools
45 from cStringIO import StringIO
46
47 from ganeti import cli
48 from ganeti import constants
49 from ganeti import serializer
50 from ganeti import utils
51 from ganeti import bootstrap
52 from ganeti import config
53 from ganeti import pathutils
54 from ganeti import netutils
55
56 from ganeti.utils import version
57
58
59
60 TARGET_MAJOR = 2
61
62 TARGET_MINOR = 15
63
64 DOWNGRADE_MAJOR = 2
65
66 DOWNGRADE_MINOR = 14
67
68
69
70 DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
71
72 DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items())
73
74
75 -class Error(Exception):
76 """Generic exception"""
77 pass
78
81 parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
82 parser.add_option("--dry-run", dest="dry_run",
83 action="store_true",
84 help="Try to do the conversion, but don't write"
85 " output file")
86 parser.add_option(cli.FORCE_OPT)
87 parser.add_option(cli.DEBUG_OPT)
88 parser.add_option(cli.VERBOSE_OPT)
89 parser.add_option("--ignore-hostname", dest="ignore_hostname",
90 action="store_true", default=False,
91 help="Don't abort if hostname doesn't match")
92 parser.add_option("--path", help="Convert configuration in this"
93 " directory instead of '%s'" % pathutils.DATA_DIR,
94 default=pathutils.DATA_DIR, dest="data_dir")
95 parser.add_option("--confdir",
96 help=("Use this directory instead of '%s'" %
97 pathutils.CONF_DIR),
98 default=pathutils.CONF_DIR, dest="conf_dir")
99 parser.add_option("--no-verify",
100 help="Do not verify configuration after upgrade",
101 action="store_true", dest="no_verify", default=False)
102 parser.add_option("--downgrade",
103 help="Downgrade to the previous stable version",
104 action="store_true", dest="downgrade", default=False)
105 return parser.parse_args(args=args)
106
107
108 -def OrFail(description=None):
109 """Make failure non-fatal and improve reporting."""
110 def wrapper(f):
111 @functools.wraps(f)
112 def wrapped(self):
113 safety = copy.deepcopy(self.config_data)
114 try:
115 f(self)
116 except BaseException, e:
117 msg = "%s failed:\n%s" % (description or f.func_name, e)
118 logging.exception(msg)
119 self.config_data = safety
120 self.errors.append(msg)
121 return wrapped
122 return wrapper
123
130
132 """Main program.
133
134 """
135 self._ComposePaths()
136
137 self.SetupLogging()
138
139
140 if self.args:
141 raise Error("No arguments expected")
142 if self.opts.downgrade and not self.opts.no_verify:
143 self.opts.no_verify = True
144
145
146 if not (self.CheckHostname(self.opts.SSCONF_MASTER_NODE) or
147 self.opts.ignore_hostname):
148 logging.error("Aborting due to hostname mismatch")
149 sys.exit(constants.EXIT_FAILURE)
150
151 self._AskUser()
152
153
154 if not (os.path.isfile(self.opts.CONFIG_DATA_PATH) and
155 os.path.isfile(self.opts.SERVER_PEM_PATH) and
156 os.path.isfile(self.opts.KNOWN_HOSTS_PATH)):
157 raise Error(("%s does not seem to be a Ganeti configuration"
158 " directory") % self.opts.data_dir)
159
160 if not os.path.isdir(self.opts.conf_dir):
161 raise Error("Not a directory: %s" % self.opts.conf_dir)
162
163 self.config_data = serializer.LoadJson(utils.ReadFile(
164 self.opts.CONFIG_DATA_PATH))
165
166 try:
167 config_version = self.config_data["version"]
168 except KeyError:
169 raise Error("Unable to determine configuration version")
170
171 (config_major, config_minor, config_revision) = \
172 version.SplitVersion(config_version)
173
174 logging.info("Found configuration version %s (%d.%d.%d)",
175 config_version, config_major, config_minor, config_revision)
176
177 if "config_version" in self.config_data["cluster"]:
178 raise Error("Inconsistent configuration: found config_version in"
179 " configuration file")
180
181
182 if self.opts.downgrade:
183 self._Downgrade(config_major, config_minor, config_version,
184 config_revision)
185
186
187 elif config_major == 2 and config_minor in range(0, 15):
188 if config_revision != 0:
189 logging.warning("Config revision is %s, not 0", config_revision)
190 if not self.UpgradeAll():
191 raise Error("Upgrade failed:\n%s" % '\n'.join(self.errors))
192
193 elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
194 logging.info("No changes necessary")
195
196 else:
197 raise Error("Configuration version %d.%d.%d not supported by this tool" %
198 (config_major, config_minor, config_revision))
199
200 try:
201 logging.info("Writing configuration file to %s",
202 self.opts.CONFIG_DATA_PATH)
203 utils.WriteFile(file_name=self.opts.CONFIG_DATA_PATH,
204 data=serializer.DumpJson(self.config_data),
205 mode=0600,
206 dry_run=self.opts.dry_run,
207 backup=True)
208
209 if not self.opts.dry_run:
210
211
212
213
214
215
216
217 bootstrap.GenerateClusterCrypto(
218 False, False, False, False, False, False, None,
219 nodecert_file=self.opts.SERVER_PEM_PATH,
220 rapicert_file=self.opts.RAPI_CERT_FILE,
221 spicecert_file=self.opts.SPICE_CERT_FILE,
222 spicecacert_file=self.opts.SPICE_CACERT_FILE,
223 hmackey_file=self.opts.CONFD_HMAC_KEY,
224 cds_file=self.opts.CDS_FILE)
225
226 except Exception:
227 logging.critical("Writing configuration failed. It is probably in an"
228 " inconsistent state and needs manual intervention.")
229 raise
230
231 self._TestLoadingConfigFile()
232
234 """Configures the logging module.
235
236 """
237 formatter = logging.Formatter("%(asctime)s: %(message)s")
238
239 stderr_handler = logging.StreamHandler()
240 stderr_handler.setFormatter(formatter)
241 if self.opts.debug:
242 stderr_handler.setLevel(logging.NOTSET)
243 elif self.opts.verbose:
244 stderr_handler.setLevel(logging.INFO)
245 else:
246 stderr_handler.setLevel(logging.WARNING)
247
248 root_logger = logging.getLogger("")
249 root_logger.setLevel(logging.NOTSET)
250 root_logger.addHandler(stderr_handler)
251
252 @staticmethod
254 """Ensures hostname matches ssconf value.
255
256 @param path: Path to ssconf file
257
258 """
259 ssconf_master_node = utils.ReadOneLineFile(path)
260 hostname = netutils.GetHostname().name
261
262 if ssconf_master_node == hostname:
263 return True
264
265 logging.warning("Warning: ssconf says master node is '%s', but this"
266 " machine's name is '%s'; this tool must be run on"
267 " the master node", ssconf_master_node, hostname)
268 return False
269
270 @staticmethod
272 if "minmax" in ipolicy:
273 for (key, spec) in ipolicy["minmax"][0].items():
274 for (par, val) in default_ipolicy["minmax"][0][key].items():
275 if par not in spec:
276 spec[par] = val
277
279 minmax_keys = ["min", "max"]
280 if any((k in ipolicy) for k in minmax_keys):
281 minmax = {}
282 for key in minmax_keys:
283 if key in ipolicy:
284 if ipolicy[key]:
285 minmax[key] = ipolicy[key]
286 del ipolicy[key]
287 if minmax:
288 ipolicy["minmax"] = [minmax]
289 if isgroup and "std" in ipolicy:
290 del ipolicy["std"]
291 self._FillIPolicySpecs(default_ipolicy, ipolicy)
292
293 @OrFail("Setting networks")
295 assert isinstance(self.config_data, dict)
296
297
298 networks = self.config_data.get("networks", None)
299 if not networks:
300 self.config_data["networks"] = {}
301
302 @OrFail("Upgrading cluster")
304 assert isinstance(self.config_data, dict)
305
306
307 cluster = self.config_data.get("cluster", None)
308 if cluster is None:
309 raise Error("Cannot find cluster")
310 ipolicy = cluster.setdefault("ipolicy", None)
311 if ipolicy:
312 self.UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
313 ial_params = cluster.get("default_iallocator_params", None)
314 if not ial_params:
315 cluster["default_iallocator_params"] = {}
316 if not "candidate_certs" in cluster:
317 cluster["candidate_certs"] = {}
318 cluster["instance_communication_network"] = \
319 cluster.get("instance_communication_network", "")
320 cluster["install_image"] = \
321 cluster.get("install_image", "")
322 cluster["zeroing_image"] = \
323 cluster.get("zeroing_image", "")
324 cluster["compression_tools"] = \
325 cluster.get("compression_tools", constants.IEC_DEFAULT_TOOLS)
326 if "enabled_user_shutdown" not in cluster:
327 cluster["enabled_user_shutdown"] = False
328 cluster["data_collectors"] = cluster.get("data_collectors", {})
329 for name in constants.DATA_COLLECTOR_NAMES:
330 cluster["data_collectors"][name] = \
331 cluster["data_collectors"].get(
332 name, dict(active=True,
333 interval=constants.MOND_TIME_INTERVAL * 1e6))
334
335 @OrFail("Upgrading groups")
337 cl_ipolicy = self.config_data["cluster"].get("ipolicy")
338 for group in self.config_data["nodegroups"].values():
339 networks = group.get("networks", None)
340 if not networks:
341 group["networks"] = {}
342 ipolicy = group.get("ipolicy", None)
343 if ipolicy:
344 if cl_ipolicy is None:
345 raise Error("A group defines an instance policy but there is no"
346 " instance policy at cluster level")
347 self.UpgradeIPolicy(ipolicy, cl_ipolicy, True)
348
350 """Return a conservative value of the exclusive_storage flag.
351
352 Return C{True} if the cluster or at least a nodegroup have the flag set.
353
354 """
355 ret = False
356 cluster = self.config_data["cluster"]
357 ndparams = cluster.get("ndparams")
358 if ndparams is not None and ndparams.get("exclusive_storage"):
359 ret = True
360 for group in self.config_data["nodegroups"].values():
361 ndparams = group.get("ndparams")
362 if ndparams is not None and ndparams.get("exclusive_storage"):
363 ret = True
364 return ret
365
367 if "children" in disk:
368 for d in disk["children"]:
369 self.RemovePhysicalId(d)
370 if "physical_id" in disk:
371 del disk["physical_id"]
372
374 """Replaces disk's dev_type attributes according to the given map.
375
376 This can be used for both, up or downgrading the disks.
377 """
378 if disk["dev_type"] in dev_type_map:
379 disk["dev_type"] = dev_type_map[disk["dev_type"]]
380 if "children" in disk:
381 for child in disk["children"]:
382 self.ChangeDiskDevType(child, dev_type_map)
383
387
388 @staticmethod
390 for nic in iobj["nics"]:
391 name = nic.get("network", None)
392 if name:
393 uuid = network2uuid.get(name, None)
394 if uuid:
395 print("NIC with network name %s found."
396 " Substituting with uuid %s." % (name, uuid))
397 nic["network"] = uuid
398
399 @classmethod
401 if not "uuid" in disk:
402 disk["uuid"] = utils.io.NewUUID()
403 if "children" in disk:
404 for d in disk["children"]:
405 cls.AssignUuid(d)
406
408 missing_spindles = False
409 if "disks" not in iobj:
410 raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
411 disks = iobj["disks"]
412 if not all(isinstance(d, str) for d in disks):
413
414 for idx, dobj in enumerate(disks):
415 self.RemovePhysicalId(dobj)
416
417 expected = "disk/%s" % idx
418 current = dobj.get("iv_name", "")
419 if current != expected:
420 logging.warning("Updating iv_name for instance %s/disk %s"
421 " from '%s' to '%s'",
422 instance, idx, current, expected)
423 dobj["iv_name"] = expected
424
425 if "dev_type" in dobj:
426 self.UpgradeDiskDevType(dobj)
427
428 if not "spindles" in dobj:
429 missing_spindles = True
430
431 self.AssignUuid(dobj)
432 return missing_spindles
433
434 @OrFail("Upgrading instance with spindles")
436 """Upgrades the instances' configuration."""
437
438 network2uuid = dict((n["name"], n["uuid"])
439 for n in self.config_data["networks"].values())
440 if "instances" not in self.config_data:
441 raise Error("Can't find the 'instances' key in the configuration!")
442
443 missing_spindles = False
444 for instance, iobj in self.config_data["instances"].items():
445 self._ConvertNicNameToUuid(iobj, network2uuid)
446 if self._ConvertDiskAndCheckMissingSpindles(iobj, instance):
447 missing_spindles = True
448 if "admin_state_source" not in iobj:
449 iobj["admin_state_source"] = constants.ADMIN_SOURCE
450
451 if self.GetExclusiveStorageValue() and missing_spindles:
452
453
454
455 logging.warning("Some instance disks could be needing to update the"
456 " spindles parameter; you can check by running"
457 " 'gnt-cluster verify', and fix any problem with"
458 " 'gnt-cluster repair-disk-sizes'")
459
461 if (os.path.isfile(self.opts.RAPI_USERS_FILE_PRE24) and
462 not os.path.islink(self.opts.RAPI_USERS_FILE_PRE24)):
463 if os.path.exists(self.opts.RAPI_USERS_FILE):
464 raise Error("Found pre-2.4 RAPI users file at %s, but another file"
465 " already exists at %s" %
466 (self.opts.RAPI_USERS_FILE_PRE24,
467 self.opts.RAPI_USERS_FILE))
468 logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
469 self.opts.RAPI_USERS_FILE_PRE24, self.opts.RAPI_USERS_FILE)
470 if not self.opts.dry_run:
471 utils.RenameFile(self.opts.RAPI_USERS_FILE_PRE24,
472 self.opts.RAPI_USERS_FILE, mkdir=True, mkdir_mode=0750)
473
474
475 if (not (os.path.islink(self.opts.RAPI_USERS_FILE_PRE24) or
476 os.path.isfile(self.opts.RAPI_USERS_FILE_PRE24)) and
477 os.path.isfile(self.opts.RAPI_USERS_FILE)):
478 logging.info("Creating symlink from %s to %s",
479 self.opts.RAPI_USERS_FILE_PRE24, self.opts.RAPI_USERS_FILE)
480 if not self.opts.dry_run:
481 os.symlink(self.opts.RAPI_USERS_FILE, self.opts.RAPI_USERS_FILE_PRE24)
482
484
485 if os.path.exists(self.opts.WATCHER_STATEFILE):
486 logging.info("Removing watcher state file %s",
487 self.opts.WATCHER_STATEFILE)
488 if not self.opts.dry_run:
489 utils.RemoveFile(self.opts.WATCHER_STATEFILE)
490
491 @OrFail("Upgrading file storage paths")
493
494 if not os.path.exists(self.opts.FILE_STORAGE_PATHS_FILE):
495 cluster = self.config_data["cluster"]
496 file_storage_dir = cluster.get("file_storage_dir")
497 shared_file_storage_dir = cluster.get("shared_file_storage_dir")
498 del cluster
499
500 logging.info("Ganeti 2.7 and later only allow whitelisted directories"
501 " for file storage; writing existing configuration values"
502 " into '%s'",
503 self.opts.FILE_STORAGE_PATHS_FILE)
504
505 if file_storage_dir:
506 logging.info("File storage directory: %s", file_storage_dir)
507 if shared_file_storage_dir:
508 logging.info("Shared file storage directory: %s",
509 shared_file_storage_dir)
510
511 buf = StringIO()
512 buf.write("# List automatically generated from configuration by\n")
513 buf.write("# cfgupgrade at %s\n" % time.asctime())
514 if file_storage_dir:
515 buf.write("%s\n" % file_storage_dir)
516 if shared_file_storage_dir:
517 buf.write("%s\n" % shared_file_storage_dir)
518 utils.WriteFile(file_name=self.opts.FILE_STORAGE_PATHS_FILE,
519 data=buf.getvalue(),
520 mode=0600,
521 dry_run=self.opts.dry_run,
522 backup=True)
523
524 @staticmethod
526 if old_key not in nodes_by_old_key:
527 logging.warning("Can't find node '%s' in configuration, "
528 "assuming that it's already up-to-date", old_key)
529 return old_key
530 return nodes_by_old_key[old_key][new_key_field]
531
533 def ChangeDiskNodeIndices(disk):
534
535
536
537 drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD
538 if disk["dev_type"] in drbd_disk_types:
539 for i in range(0, 2):
540 disk["logical_id"][i] = self.GetNewNodeIndex(nodes_by_old_key,
541 disk["logical_id"][i],
542 new_key_field)
543 if "children" in disk:
544 for child in disk["children"]:
545 ChangeDiskNodeIndices(child)
546
547 nodes_by_old_key = {}
548 nodes_by_new_key = {}
549 for (_, node) in config_data["nodes"].items():
550 nodes_by_old_key[node[old_key_field]] = node
551 nodes_by_new_key[node[new_key_field]] = node
552
553 config_data["nodes"] = nodes_by_new_key
554
555 cluster = config_data["cluster"]
556 cluster["master_node"] = self.GetNewNodeIndex(nodes_by_old_key,
557 cluster["master_node"],
558 new_key_field)
559
560 for inst in config_data["instances"].values():
561 inst["primary_node"] = self.GetNewNodeIndex(nodes_by_old_key,
562 inst["primary_node"],
563 new_key_field)
564
565 for disk in config_data["disks"].values():
566 ChangeDiskNodeIndices(disk)
567
568 @staticmethod
570 insts_by_old_key = {}
571 insts_by_new_key = {}
572 for (_, inst) in config_data["instances"].items():
573 insts_by_old_key[inst[old_key_field]] = inst
574 insts_by_new_key[inst[new_key_field]] = inst
575
576 config_data["instances"] = insts_by_new_key
577
578 @OrFail("Changing node indices")
581
582 @OrFail("Changing instance indices")
585
586 @OrFail("Adding filters")
588
589
590 filters = self.config_data.get("filters", None)
591 if not filters:
592 self.config_data["filters"] = {}
593
594 @OrFail("Set top level disks")
596 """Upgrades the disks as config top level citizens."""
597 if "instances" not in self.config_data:
598 raise Error("Can't find the 'instances' key in the configuration!")
599
600 if "disks" in self.config_data:
601
602 return
603
604 self.config_data["disks"] = dict()
605 for iobj in self.config_data["instances"].values():
606 disk_uuids = []
607 for disk in iobj["disks"]:
608 duuid = disk["uuid"]
609 disk["serial_no"] = 1
610
611
612 disk["ctime"] = disk["mtime"] = iobj.get("ctime", 0)
613 self.config_data["disks"][duuid] = disk
614 disk_uuids.append(duuid)
615 iobj["disks"] = disk_uuids
616
617 @OrFail("Removing disk template")
619 if "instances" not in self.config_data:
620 raise Error("Can't find the 'instances' dictionary in the configuration.")
621 instances = self.config_data["instances"]
622 for inst in instances.values():
623 if "disk_template" in inst:
624 del inst["disk_template"]
625
626
627
629 """Recursively compute nodes given a top device."""
630 nodes = set()
631 if disk["dev_type"] in constants.DTS_DRBD:
632 nodes = set(disk["logical_id"][:2])
633 for child in disk.get("children", []):
634 nodes |= self._ComputeAllNodes(child)
635 return nodes
636
641
642 @OrFail("Upgrading disk nodes")
644 """Specify the nodes from which a disk is accessible in its definition.
645
646 For every disk that is attached to an instance, get the UUIDs of the nodes
647 that it's accessible from. There are three main cases:
648 1) Internally mirrored disks (DRBD):
649 These disks are accessible from two nodes, so the nodes list will include
650 these. Their children (data, meta) are also accessible from two nodes,
651 therefore they will inherit the nodes of the parent.
652 2) Externally mirrored disks (Blockdev, Ext, Gluster, RBD, Shared File):
653 These disks should be accessible from any node in the cluster, therefore the
654 nodes list will be empty.
655 3) Single-node disks (Plain, File):
656 These disks are accessible from one node only, therefore the nodes list will
657 consist only of the primary instance node.
658 """
659 disks = self.config_data["disks"]
660 for instance in self.config_data["instances"].itervalues():
661
662 instance_node = set([instance["primary_node"]])
663 disk_nodes = set()
664 for disk_uuid in instance["disks"]:
665 disk_nodes |= self._ComputeAllNodes(disks[disk_uuid])
666 all_nodes = list(instance_node | disk_nodes)
667
668
669 for disk_uuid in instance["disks"]:
670 disk = disks[disk_uuid]
671 if "nodes" in disk:
672
673 continue
674
675 if disk["dev_type"] in constants.DTS_INT_MIRROR:
676 self._RecursiveUpdateNodes(disk, all_nodes)
677 elif disk["dev_type"] in (constants.DT_PLAIN, constants.DT_FILE):
678 disk["nodes"] = all_nodes
679 else:
680 disk["nodes"] = []
681
683 self.config_data["version"] = version.BuildVersion(TARGET_MAJOR,
684 TARGET_MINOR, 0)
685 self.UpgradeRapiUsers()
686 self.UpgradeWatcher()
687 steps = [self.UpgradeFileStoragePaths,
688 self.UpgradeNetworks,
689 self.UpgradeCluster,
690 self.UpgradeGroups,
691 self.UpgradeInstances,
692 self.UpgradeTopLevelDisks,
693 self.UpgradeNodeIndices,
694 self.UpgradeInstanceIndices,
695 self.UpgradeFilters,
696 self.UpgradeDiskNodes,
697 self.UpgradeDiskTemplate]
698 for s in steps:
699 s()
700 return not self.errors
701
702
703
704 @classmethod
708
711
717
737
739 if not self.opts.force:
740 if self.opts.downgrade:
741 usertext = ("The configuration is going to be DOWNGRADED "
742 "to version %s.%s. Some configuration data might be "
743 " removed if they don't fit"
744 " in the old format. Please make sure you have read the"
745 " upgrade notes (available in the UPGRADE file and included"
746 " in other documentation formats) to understand what they"
747 " are. Continue with *DOWNGRADING* the configuration?" %
748 (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
749 else:
750 usertext = ("Please make sure you have read the upgrade notes for"
751 " Ganeti %s (available in the UPGRADE file and included"
752 " in other documentation formats). Continue with upgrading"
753 " configuration?" % constants.RELEASE_VERSION)
754 if not cli.AskUser(usertext):
755 sys.exit(constants.EXIT_FAILURE)
756
757 - def _Downgrade(self, config_major, config_minor, config_version,
758 config_revision):
768
770
771 all_ok = True
772 if not (self.opts.dry_run or self.opts.no_verify):
773 logging.info("Testing the new config file...")
774 cfg = config.ConfigWriter(cfg_file=self.opts.CONFIG_DATA_PATH,
775 accept_foreign=self.opts.ignore_hostname,
776 offline=True)
777
778 vrfy = cfg.VerifyConfig()
779 if vrfy:
780 logging.error("Errors after conversion:")
781 for item in vrfy:
782 logging.error(" - %s", item)
783 all_ok = False
784 else:
785 logging.info("File loaded successfully after upgrading")
786 del cfg
787
788 if self.opts.downgrade:
789 action = "downgraded"
790 out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
791 else:
792 action = "upgraded"
793 out_ver = constants.RELEASE_VERSION
794 if all_ok:
795 cli.ToStderr("Configuration successfully %s to version %s.",
796 action, out_ver)
797 else:
798 cli.ToStderr("Configuration %s to version %s, but there are errors."
799 "\nPlease review the file.", action, out_ver)
800