1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30 """Library of the tools/cfgupgrade utility.
31
32 This code handles only the types supported by simplejson. As an
33 example, 'set' is a 'list'.
34
35 """
36
37 import copy
38 import os
39 import os.path
40 import sys
41 import logging
42 import optparse
43 import time
44 import functools
45 from cStringIO import StringIO
46
47 from ganeti import cli
48 from ganeti import constants
49 from ganeti import serializer
50 from ganeti import utils
51 from ganeti import bootstrap
52 from ganeti import config
53 from ganeti import pathutils
54 from ganeti import netutils
55
56 from ganeti.utils import version
57
58
59
60 TARGET_MAJOR = 2
61
62 TARGET_MINOR = 18
63
64 DOWNGRADE_MAJOR = 2
65
66 DOWNGRADE_MINOR = 17
67
68
69
70 DEV_TYPE_OLD_NEW = {"lvm": constants.DT_PLAIN, "drbd8": constants.DT_DRBD8}
71
72 DEV_TYPE_NEW_OLD = dict((v, k) for k, v in DEV_TYPE_OLD_NEW.items())
73
74
75 -class Error(Exception):
76 """Generic exception"""
77 pass
78
81 parser = optparse.OptionParser(usage="%prog [--debug|--verbose] [--force]")
82 parser.add_option("--dry-run", dest="dry_run",
83 action="store_true",
84 help="Try to do the conversion, but don't write"
85 " output file")
86 parser.add_option(cli.FORCE_OPT)
87 parser.add_option(cli.DEBUG_OPT)
88 parser.add_option(cli.VERBOSE_OPT)
89 parser.add_option("--ignore-hostname", dest="ignore_hostname",
90 action="store_true", default=False,
91 help="Don't abort if hostname doesn't match")
92 parser.add_option("--path", help="Convert configuration in this"
93 " directory instead of '%s'" % pathutils.DATA_DIR,
94 default=pathutils.DATA_DIR, dest="data_dir")
95 parser.add_option("--confdir",
96 help=("Use this directory instead of '%s'" %
97 pathutils.CONF_DIR),
98 default=pathutils.CONF_DIR, dest="conf_dir")
99 parser.add_option("--no-verify",
100 help="Do not verify configuration after upgrade",
101 action="store_true", dest="no_verify", default=False)
102 parser.add_option("--downgrade",
103 help="Downgrade to the previous stable version",
104 action="store_true", dest="downgrade", default=False)
105 return parser.parse_args(args=args)
106
107
108 -def OrFail(description=None):
109 """Make failure non-fatal and improve reporting."""
110 def wrapper(f):
111 @functools.wraps(f)
112 def wrapped(self):
113 safety = copy.deepcopy(self.config_data)
114 try:
115 f(self)
116 except BaseException, e:
117 msg = "%s failed:\n%s" % (description or f.func_name, e)
118 logging.exception(msg)
119 self.config_data = safety
120 self.errors.append(msg)
121 return wrapped
122 return wrapper
123
130
132 """Main program.
133
134 """
135 self._ComposePaths()
136
137 self.SetupLogging()
138
139
140 if self.args:
141 raise Error("No arguments expected")
142 if self.opts.downgrade and not self.opts.no_verify:
143 self.opts.no_verify = True
144
145
146 if not (self.CheckHostname(self.opts.SSCONF_MASTER_NODE) or
147 self.opts.ignore_hostname):
148 logging.error("Aborting due to hostname mismatch")
149 sys.exit(constants.EXIT_FAILURE)
150
151 self._AskUser()
152
153
154 if not (os.path.isfile(self.opts.CONFIG_DATA_PATH) and
155 os.path.isfile(self.opts.SERVER_PEM_PATH) and
156 os.path.isfile(self.opts.KNOWN_HOSTS_PATH)):
157 raise Error(("%s does not seem to be a Ganeti configuration"
158 " directory") % self.opts.data_dir)
159
160 if not os.path.isdir(self.opts.conf_dir):
161 raise Error("Not a directory: %s" % self.opts.conf_dir)
162
163 self.config_data = serializer.LoadJson(utils.ReadFile(
164 self.opts.CONFIG_DATA_PATH))
165
166 try:
167 config_version = self.config_data["version"]
168 except KeyError:
169 raise Error("Unable to determine configuration version")
170
171 (config_major, config_minor, config_revision) = \
172 version.SplitVersion(config_version)
173
174 logging.info("Found configuration version %s (%d.%d.%d)",
175 config_version, config_major, config_minor, config_revision)
176
177 if "config_version" in self.config_data["cluster"]:
178 raise Error("Inconsistent configuration: found config_version in"
179 " configuration file")
180
181
182 if self.opts.downgrade:
183 self._Downgrade(config_major, config_minor, config_version,
184 config_revision)
185
186
187 elif config_major == 2 and config_minor in range(0, TARGET_MINOR):
188 if config_revision != 0:
189 logging.warning("Config revision is %s, not 0", config_revision)
190 if not self.UpgradeAll():
191 raise Error("Upgrade failed:\n%s" % '\n'.join(self.errors))
192
193 elif config_major == TARGET_MAJOR and config_minor == TARGET_MINOR:
194 logging.info("No changes necessary")
195
196 else:
197 raise Error("Configuration version %d.%d.%d not supported by this tool" %
198 (config_major, config_minor, config_revision))
199
200 try:
201 logging.info("Writing configuration file to %s",
202 self.opts.CONFIG_DATA_PATH)
203 utils.WriteFile(file_name=self.opts.CONFIG_DATA_PATH,
204 data=serializer.DumpJson(self.config_data),
205 mode=0600,
206 dry_run=self.opts.dry_run,
207 backup=True)
208
209 if not self.opts.dry_run:
210
211
212
213
214
215
216
217 bootstrap.GenerateClusterCrypto(
218 False, False, False, False, False, False, None,
219 nodecert_file=self.opts.SERVER_PEM_PATH,
220 rapicert_file=self.opts.RAPI_CERT_FILE,
221 spicecert_file=self.opts.SPICE_CERT_FILE,
222 spicecacert_file=self.opts.SPICE_CACERT_FILE,
223 hmackey_file=self.opts.CONFD_HMAC_KEY,
224 cds_file=self.opts.CDS_FILE)
225
226 except Exception:
227 logging.critical("Writing configuration failed. It is probably in an"
228 " inconsistent state and needs manual intervention.")
229 raise
230
231 self._TestLoadingConfigFile()
232
234 """Configures the logging module.
235
236 """
237 formatter = logging.Formatter("%(asctime)s: %(message)s")
238
239 stderr_handler = logging.StreamHandler()
240 stderr_handler.setFormatter(formatter)
241 if self.opts.debug:
242 stderr_handler.setLevel(logging.NOTSET)
243 elif self.opts.verbose:
244 stderr_handler.setLevel(logging.INFO)
245 else:
246 stderr_handler.setLevel(logging.WARNING)
247
248 root_logger = logging.getLogger("")
249 root_logger.setLevel(logging.NOTSET)
250 root_logger.addHandler(stderr_handler)
251
252 @staticmethod
254 """Ensures hostname matches ssconf value.
255
256 @param path: Path to ssconf file
257
258 """
259 ssconf_master_node = utils.ReadOneLineFile(path)
260 hostname = netutils.GetHostname().name
261
262 if ssconf_master_node == hostname:
263 return True
264
265 logging.warning("Warning: ssconf says master node is '%s', but this"
266 " machine's name is '%s'; this tool must be run on"
267 " the master node", ssconf_master_node, hostname)
268 return False
269
270 @staticmethod
272 if "minmax" in ipolicy:
273 for (key, spec) in ipolicy["minmax"][0].items():
274 for (par, val) in default_ipolicy["minmax"][0][key].items():
275 if par not in spec:
276 spec[par] = val
277
279 minmax_keys = ["min", "max"]
280 if any((k in ipolicy) for k in minmax_keys):
281 minmax = {}
282 for key in minmax_keys:
283 if key in ipolicy:
284 if ipolicy[key]:
285 minmax[key] = ipolicy[key]
286 del ipolicy[key]
287 if minmax:
288 ipolicy["minmax"] = [minmax]
289 if isgroup and "std" in ipolicy:
290 del ipolicy["std"]
291 self._FillIPolicySpecs(default_ipolicy, ipolicy)
292
293 @OrFail("Setting networks")
295 assert isinstance(self.config_data, dict)
296
297
298 networks = self.config_data.get("networks", None)
299 if not networks:
300 self.config_data["networks"] = {}
301
302 @OrFail("Upgrading cluster")
304 assert isinstance(self.config_data, dict)
305
306
307 cluster = self.config_data.get("cluster", None)
308 if cluster is None:
309 raise Error("Cannot find cluster")
310
311 ipolicy = cluster.setdefault("ipolicy", None)
312 if ipolicy:
313 self.UpgradeIPolicy(ipolicy, constants.IPOLICY_DEFAULTS, False)
314 ial_params = cluster.get("default_iallocator_params", None)
315
316 if not ial_params:
317 cluster["default_iallocator_params"] = {}
318
319 if not "candidate_certs" in cluster:
320 cluster["candidate_certs"] = {}
321
322 cluster["instance_communication_network"] = \
323 cluster.get("instance_communication_network", "")
324
325 cluster["install_image"] = \
326 cluster.get("install_image", "")
327
328 cluster["zeroing_image"] = \
329 cluster.get("zeroing_image", "")
330
331 cluster["compression_tools"] = \
332 cluster.get("compression_tools", constants.IEC_DEFAULT_TOOLS)
333
334 if "enabled_user_shutdown" not in cluster:
335 cluster["enabled_user_shutdown"] = False
336
337 cluster["data_collectors"] = cluster.get("data_collectors", {})
338 for name in constants.DATA_COLLECTOR_NAMES:
339 cluster["data_collectors"][name] = \
340 cluster["data_collectors"].get(
341 name, dict(active=True,
342 interval=constants.MOND_TIME_INTERVAL * 1e6))
343 if "diagnose_data_collector_filename" not in cluster:
344 cluster["diagnose_data_collector_filename"] = ""
345
346
347
348 if "ssh_key_type" not in cluster:
349 cluster["ssh_key_type"] = constants.SSHK_DSA
350
351 if "ssh_key_bits" not in cluster:
352 cluster["ssh_key_bits"] = 1024
353
354 @OrFail("Upgrading groups")
356 cl_ipolicy = self.config_data["cluster"].get("ipolicy")
357 for group in self.config_data["nodegroups"].values():
358 networks = group.get("networks", None)
359 if not networks:
360 group["networks"] = {}
361 ipolicy = group.get("ipolicy", None)
362 if ipolicy:
363 if cl_ipolicy is None:
364 raise Error("A group defines an instance policy but there is no"
365 " instance policy at cluster level")
366 self.UpgradeIPolicy(ipolicy, cl_ipolicy, True)
367
369 """Return a conservative value of the exclusive_storage flag.
370
371 Return C{True} if the cluster or at least a nodegroup have the flag set.
372
373 """
374 ret = False
375 cluster = self.config_data["cluster"]
376 ndparams = cluster.get("ndparams")
377 if ndparams is not None and ndparams.get("exclusive_storage"):
378 ret = True
379 for group in self.config_data["nodegroups"].values():
380 ndparams = group.get("ndparams")
381 if ndparams is not None and ndparams.get("exclusive_storage"):
382 ret = True
383 return ret
384
386 if "children" in disk:
387 for d in disk["children"]:
388 self.RemovePhysicalId(d)
389 if "physical_id" in disk:
390 del disk["physical_id"]
391
393 """Replaces disk's dev_type attributes according to the given map.
394
395 This can be used for both, up or downgrading the disks.
396 """
397 if disk["dev_type"] in dev_type_map:
398 disk["dev_type"] = dev_type_map[disk["dev_type"]]
399 if "children" in disk:
400 for child in disk["children"]:
401 self.ChangeDiskDevType(child, dev_type_map)
402
406
407 @staticmethod
409 for nic in iobj["nics"]:
410 name = nic.get("network", None)
411 if name:
412 uuid = network2uuid.get(name, None)
413 if uuid:
414 print("NIC with network name %s found."
415 " Substituting with uuid %s." % (name, uuid))
416 nic["network"] = uuid
417
418 @classmethod
420 if not "uuid" in disk:
421 disk["uuid"] = utils.io.NewUUID()
422 if "children" in disk:
423 for d in disk["children"]:
424 cls.AssignUuid(d)
425
427 missing_spindles = False
428 if "disks" not in iobj:
429 raise Error("Instance '%s' doesn't have a disks entry?!" % instance)
430 disks = iobj["disks"]
431 if not all(isinstance(d, str) for d in disks):
432
433 for idx, dobj in enumerate(disks):
434 self.RemovePhysicalId(dobj)
435
436 expected = "disk/%s" % idx
437 current = dobj.get("iv_name", "")
438 if current != expected:
439 logging.warning("Updating iv_name for instance %s/disk %s"
440 " from '%s' to '%s'",
441 instance, idx, current, expected)
442 dobj["iv_name"] = expected
443
444 if "dev_type" in dobj:
445 self.UpgradeDiskDevType(dobj)
446
447 if not "spindles" in dobj:
448 missing_spindles = True
449
450 self.AssignUuid(dobj)
451 return missing_spindles
452
453 @OrFail("Upgrading instance with spindles")
455 """Upgrades the instances' configuration."""
456
457 network2uuid = dict((n["name"], n["uuid"])
458 for n in self.config_data["networks"].values())
459 if "instances" not in self.config_data:
460 raise Error("Can't find the 'instances' key in the configuration!")
461
462 missing_spindles = False
463 for instance, iobj in self.config_data["instances"].items():
464 self._ConvertNicNameToUuid(iobj, network2uuid)
465 if self._ConvertDiskAndCheckMissingSpindles(iobj, instance):
466 missing_spindles = True
467 if "admin_state_source" not in iobj:
468 iobj["admin_state_source"] = constants.ADMIN_SOURCE
469
470 if self.GetExclusiveStorageValue() and missing_spindles:
471
472
473
474 logging.warning("Some instance disks could be needing to update the"
475 " spindles parameter; you can check by running"
476 " 'gnt-cluster verify', and fix any problem with"
477 " 'gnt-cluster repair-disk-sizes'")
478
480 if (os.path.isfile(self.opts.RAPI_USERS_FILE_PRE24) and
481 not os.path.islink(self.opts.RAPI_USERS_FILE_PRE24)):
482 if os.path.exists(self.opts.RAPI_USERS_FILE):
483 raise Error("Found pre-2.4 RAPI users file at %s, but another file"
484 " already exists at %s" %
485 (self.opts.RAPI_USERS_FILE_PRE24,
486 self.opts.RAPI_USERS_FILE))
487 logging.info("Found pre-2.4 RAPI users file at %s, renaming to %s",
488 self.opts.RAPI_USERS_FILE_PRE24, self.opts.RAPI_USERS_FILE)
489 if not self.opts.dry_run:
490 utils.RenameFile(self.opts.RAPI_USERS_FILE_PRE24,
491 self.opts.RAPI_USERS_FILE, mkdir=True, mkdir_mode=0750)
492
493
494 if (not (os.path.islink(self.opts.RAPI_USERS_FILE_PRE24) or
495 os.path.isfile(self.opts.RAPI_USERS_FILE_PRE24)) and
496 os.path.isfile(self.opts.RAPI_USERS_FILE)):
497 logging.info("Creating symlink from %s to %s",
498 self.opts.RAPI_USERS_FILE_PRE24, self.opts.RAPI_USERS_FILE)
499 if not self.opts.dry_run:
500 os.symlink(self.opts.RAPI_USERS_FILE, self.opts.RAPI_USERS_FILE_PRE24)
501
503
504 if os.path.exists(self.opts.WATCHER_STATEFILE):
505 logging.info("Removing watcher state file %s",
506 self.opts.WATCHER_STATEFILE)
507 if not self.opts.dry_run:
508 utils.RemoveFile(self.opts.WATCHER_STATEFILE)
509
510 @OrFail("Upgrading file storage paths")
512
513 if not os.path.exists(self.opts.FILE_STORAGE_PATHS_FILE):
514 cluster = self.config_data["cluster"]
515 file_storage_dir = cluster.get("file_storage_dir")
516 shared_file_storage_dir = cluster.get("shared_file_storage_dir")
517 del cluster
518
519 logging.info("Ganeti 2.7 and later only allow whitelisted directories"
520 " for file storage; writing existing configuration values"
521 " into '%s'",
522 self.opts.FILE_STORAGE_PATHS_FILE)
523
524 if file_storage_dir:
525 logging.info("File storage directory: %s", file_storage_dir)
526 if shared_file_storage_dir:
527 logging.info("Shared file storage directory: %s",
528 shared_file_storage_dir)
529
530 buf = StringIO()
531 buf.write("# List automatically generated from configuration by\n")
532 buf.write("# cfgupgrade at %s\n" % time.asctime())
533 if file_storage_dir:
534 buf.write("%s\n" % file_storage_dir)
535 if shared_file_storage_dir:
536 buf.write("%s\n" % shared_file_storage_dir)
537 utils.WriteFile(file_name=self.opts.FILE_STORAGE_PATHS_FILE,
538 data=buf.getvalue(),
539 mode=0600,
540 dry_run=self.opts.dry_run,
541 backup=True)
542
543 @staticmethod
545 if old_key not in nodes_by_old_key:
546 logging.warning("Can't find node '%s' in configuration, "
547 "assuming that it's already up-to-date", old_key)
548 return old_key
549 return nodes_by_old_key[old_key][new_key_field]
550
552 def ChangeDiskNodeIndices(disk):
553
554
555
556 drbd_disk_types = set(["drbd8"]) | constants.DTS_DRBD
557 if disk["dev_type"] in drbd_disk_types:
558 for i in range(0, 2):
559 disk["logical_id"][i] = self.GetNewNodeIndex(nodes_by_old_key,
560 disk["logical_id"][i],
561 new_key_field)
562 if "children" in disk:
563 for child in disk["children"]:
564 ChangeDiskNodeIndices(child)
565
566 nodes_by_old_key = {}
567 nodes_by_new_key = {}
568 for (_, node) in config_data["nodes"].items():
569 nodes_by_old_key[node[old_key_field]] = node
570 nodes_by_new_key[node[new_key_field]] = node
571
572 config_data["nodes"] = nodes_by_new_key
573
574 cluster = config_data["cluster"]
575 cluster["master_node"] = self.GetNewNodeIndex(nodes_by_old_key,
576 cluster["master_node"],
577 new_key_field)
578
579 for inst in config_data["instances"].values():
580 inst["primary_node"] = self.GetNewNodeIndex(nodes_by_old_key,
581 inst["primary_node"],
582 new_key_field)
583
584 for disk in config_data["disks"].values():
585 ChangeDiskNodeIndices(disk)
586
587 @staticmethod
589 insts_by_old_key = {}
590 insts_by_new_key = {}
591 for (_, inst) in config_data["instances"].items():
592 insts_by_old_key[inst[old_key_field]] = inst
593 insts_by_new_key[inst[new_key_field]] = inst
594
595 config_data["instances"] = insts_by_new_key
596
597 @OrFail("Changing node indices")
600
601 @OrFail("Changing instance indices")
604
605 @OrFail("Adding filters")
607
608
609 filters = self.config_data.get("filters", None)
610 if not filters:
611 self.config_data["filters"] = {}
612
613 @OrFail("Set top level disks")
615 """Upgrades the disks as config top level citizens."""
616 if "instances" not in self.config_data:
617 raise Error("Can't find the 'instances' key in the configuration!")
618
619 if "disks" in self.config_data:
620
621 return
622
623 self.config_data["disks"] = dict()
624 for iobj in self.config_data["instances"].values():
625 disk_uuids = []
626 for disk in iobj["disks"]:
627 duuid = disk["uuid"]
628 disk["serial_no"] = 1
629
630
631 disk["ctime"] = disk["mtime"] = iobj.get("ctime", 0)
632 self.config_data["disks"][duuid] = disk
633 disk_uuids.append(duuid)
634 iobj["disks"] = disk_uuids
635
636 @OrFail("Removing disk template")
638 if "instances" not in self.config_data:
639 raise Error("Can't find the 'instances' dictionary in the configuration.")
640 instances = self.config_data["instances"]
641 for inst in instances.values():
642 if "disk_template" in inst:
643 del inst["disk_template"]
644
645
646
648 """Recursively compute nodes given a top device."""
649 nodes = set()
650 if disk["dev_type"] in constants.DTS_DRBD:
651 nodes = set(disk["logical_id"][:2])
652 for child in disk.get("children", []):
653 nodes |= self._ComputeAllNodes(child)
654 return nodes
655
660
661 @OrFail("Upgrading disk nodes")
663 """Specify the nodes from which a disk is accessible in its definition.
664
665 For every disk that is attached to an instance, get the UUIDs of the nodes
666 that it's accessible from. There are three main cases:
667 1) Internally mirrored disks (DRBD):
668 These disks are accessible from two nodes, so the nodes list will include
669 these. Their children (data, meta) are also accessible from two nodes,
670 therefore they will inherit the nodes of the parent.
671 2) Externally mirrored disks (Blockdev, Ext, Gluster, RBD, Shared File):
672 These disks should be accessible from any node in the cluster, therefore the
673 nodes list will be empty.
674 3) Single-node disks (Plain, File):
675 These disks are accessible from one node only, therefore the nodes list will
676 consist only of the primary instance node.
677 """
678 disks = self.config_data["disks"]
679 for instance in self.config_data["instances"].itervalues():
680
681 instance_node = set([instance["primary_node"]])
682 disk_nodes = set()
683 for disk_uuid in instance["disks"]:
684 disk_nodes |= self._ComputeAllNodes(disks[disk_uuid])
685 all_nodes = list(instance_node | disk_nodes)
686
687
688 for disk_uuid in instance["disks"]:
689 disk = disks[disk_uuid]
690 if "nodes" in disk:
691
692 continue
693
694 if disk["dev_type"] in constants.DTS_INT_MIRROR:
695 self._RecursiveUpdateNodes(disk, all_nodes)
696 elif disk["dev_type"] in (constants.DT_PLAIN, constants.DT_FILE):
697 disk["nodes"] = all_nodes
698 else:
699 disk["nodes"] = []
700
701 @OrFail("Upgrading maintenance data")
703
704
705 maintenance = self.config_data.get("maintenance", None)
706 if maintenance is None:
707 self.config_data["maintenance"] = {}
708
710 self.config_data["version"] = version.BuildVersion(TARGET_MAJOR,
711 TARGET_MINOR, 0)
712 self.UpgradeRapiUsers()
713 self.UpgradeWatcher()
714 steps = [self.UpgradeFileStoragePaths,
715 self.UpgradeNetworks,
716 self.UpgradeCluster,
717 self.UpgradeGroups,
718 self.UpgradeInstances,
719 self.UpgradeTopLevelDisks,
720 self.UpgradeNodeIndices,
721 self.UpgradeInstanceIndices,
722 self.UpgradeFilters,
723 self.UpgradeDiskNodes,
724 self.UpgradeDiskTemplate,
725 self.UpgradeMaintenance]
726 for s in steps:
727 s()
728 return not self.errors
729
730
731
737
757
759 if not self.opts.force:
760 if self.opts.downgrade:
761 usertext = ("The configuration is going to be DOWNGRADED "
762 "to version %s.%s. Some configuration data might be "
763 " removed if they don't fit"
764 " in the old format. Please make sure you have read the"
765 " upgrade notes (available in the UPGRADE file and included"
766 " in other documentation formats) to understand what they"
767 " are. Continue with *DOWNGRADING* the configuration?" %
768 (DOWNGRADE_MAJOR, DOWNGRADE_MINOR))
769 else:
770 usertext = ("Please make sure you have read the upgrade notes for"
771 " Ganeti %s (available in the UPGRADE file and included"
772 " in other documentation formats). Continue with upgrading"
773 " configuration?" % constants.RELEASE_VERSION)
774 if not cli.AskUser(usertext):
775 sys.exit(constants.EXIT_FAILURE)
776
777 - def _Downgrade(self, config_major, config_minor, config_version,
778 config_revision):
788
790
791 all_ok = True
792 if not (self.opts.dry_run or self.opts.no_verify):
793 logging.info("Testing the new config file...")
794 cfg = config.ConfigWriter(cfg_file=self.opts.CONFIG_DATA_PATH,
795 accept_foreign=self.opts.ignore_hostname,
796 offline=True)
797
798 vrfy = cfg.VerifyConfig()
799 if vrfy:
800 logging.error("Errors after conversion:")
801 for item in vrfy:
802 logging.error(" - %s", item)
803 all_ok = False
804 else:
805 logging.info("File loaded successfully after upgrading")
806 del cfg
807
808 if self.opts.downgrade:
809 action = "downgraded"
810 out_ver = "%s.%s" % (DOWNGRADE_MAJOR, DOWNGRADE_MINOR)
811 else:
812 action = "upgraded"
813 out_ver = constants.RELEASE_VERSION
814 if all_ok:
815 cli.ToStderr("Configuration successfully %s to version %s.",
816 action, out_ver)
817 else:
818 cli.ToStderr("Configuration %s to version %s, but there are errors."
819 "\nPlease review the file.", action, out_ver)
820