Package ganeti :: Module bootstrap
[hide private]
[frames] | no frames]

Source Code for Module ganeti.bootstrap

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2010, 2011, 2012 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Functions to bootstrap a new cluster. 
  32   
  33  """ 
  34   
  35  import os 
  36  import os.path 
  37  import re 
  38  import logging 
  39  import time 
  40  import tempfile 
  41   
  42  from ganeti.cmdlib import cluster 
  43  import ganeti.rpc.node as rpc 
  44  from ganeti import ssh 
  45  from ganeti import utils 
  46  from ganeti import errors 
  47  from ganeti import config 
  48  from ganeti import constants 
  49  from ganeti import objects 
  50  from ganeti import ssconf 
  51  from ganeti import serializer 
  52  from ganeti import hypervisor 
  53  from ganeti.storage import drbd 
  54  from ganeti.storage import filestorage 
  55  from ganeti import netutils 
  56  from ganeti import luxi 
  57  from ganeti import jstore 
  58  from ganeti import pathutils 
  59  from ganeti import runtime 
  60   
  61   
  62  # ec_id for InitConfig's temporary reservation manager 
  63  _INITCONF_ECID = "initconfig-ecid" 
  64   
  65  #: After how many seconds daemon must be responsive 
  66  _DAEMON_READY_TIMEOUT = 10.0 
  67   
  68   
69 -def _InitSSHSetup():
70 """Setup the SSH configuration for the cluster. 71 72 This generates a dsa keypair for root, adds the pub key to the 73 permitted hosts and adds the hostkey to its own known hosts. 74 75 """ 76 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.SSH_LOGIN_USER) 77 78 for name in priv_key, pub_key: 79 if os.path.exists(name): 80 utils.CreateBackup(name) 81 utils.RemoveFile(name) 82 83 result = utils.RunCmd(["ssh-keygen", "-t", "dsa", 84 "-f", priv_key, 85 "-q", "-N", ""]) 86 if result.failed: 87 raise errors.OpExecError("Could not generate ssh keypair, error %s" % 88 result.output) 89 90 utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
91 92
93 -def GenerateHmacKey(file_name):
94 """Writes a new HMAC key. 95 96 @type file_name: str 97 @param file_name: Path to output file 98 99 """ 100 utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400, 101 backup=True)
102 103 104 # pylint: disable=R0913
105 -def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert, 106 new_confd_hmac_key, new_cds, 107 rapi_cert_pem=None, spice_cert_pem=None, 108 spice_cacert_pem=None, cds=None, 109 nodecert_file=pathutils.NODED_CERT_FILE, 110 rapicert_file=pathutils.RAPI_CERT_FILE, 111 spicecert_file=pathutils.SPICE_CERT_FILE, 112 spicecacert_file=pathutils.SPICE_CACERT_FILE, 113 hmackey_file=pathutils.CONFD_HMAC_KEY, 114 cds_file=pathutils.CLUSTER_DOMAIN_SECRET_FILE):
115 """Updates the cluster certificates, keys and secrets. 116 117 @type new_cluster_cert: bool 118 @param new_cluster_cert: Whether to generate a new cluster certificate 119 @type new_rapi_cert: bool 120 @param new_rapi_cert: Whether to generate a new RAPI certificate 121 @type new_spice_cert: bool 122 @param new_spice_cert: Whether to generate a new SPICE certificate 123 @type new_confd_hmac_key: bool 124 @param new_confd_hmac_key: Whether to generate a new HMAC key 125 @type new_cds: bool 126 @param new_cds: Whether to generate a new cluster domain secret 127 @type rapi_cert_pem: string 128 @param rapi_cert_pem: New RAPI certificate in PEM format 129 @type spice_cert_pem: string 130 @param spice_cert_pem: New SPICE certificate in PEM format 131 @type spice_cacert_pem: string 132 @param spice_cacert_pem: Certificate of the CA that signed the SPICE 133 certificate, in PEM format 134 @type cds: string 135 @param cds: New cluster domain secret 136 @type nodecert_file: string 137 @param nodecert_file: optional override of the node cert file path 138 @type rapicert_file: string 139 @param rapicert_file: optional override of the rapi cert file path 140 @type spicecert_file: string 141 @param spicecert_file: optional override of the spice cert file path 142 @type spicecacert_file: string 143 @param spicecacert_file: optional override of the spice CA cert file path 144 @type hmackey_file: string 145 @param hmackey_file: optional override of the hmac key file path 146 147 """ 148 # pylint: disable=R0913 149 # noded SSL certificate 150 utils.GenerateNewSslCert( 151 new_cluster_cert, nodecert_file, 1, 152 "Generating new cluster certificate at %s" % nodecert_file) 153 154 # confd HMAC key 155 if new_confd_hmac_key or not os.path.exists(hmackey_file): 156 logging.debug("Writing new confd HMAC key to %s", hmackey_file) 157 GenerateHmacKey(hmackey_file) 158 159 if rapi_cert_pem: 160 # Assume rapi_pem contains a valid PEM-formatted certificate and key 161 logging.debug("Writing RAPI certificate at %s", rapicert_file) 162 utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True) 163 164 else: 165 utils.GenerateNewSslCert( 166 new_rapi_cert, rapicert_file, 1, 167 "Generating new RAPI certificate at %s" % rapicert_file) 168 169 # SPICE 170 spice_cert_exists = os.path.exists(spicecert_file) 171 spice_cacert_exists = os.path.exists(spicecacert_file) 172 if spice_cert_pem: 173 # spice_cert_pem implies also spice_cacert_pem 174 logging.debug("Writing SPICE certificate at %s", spicecert_file) 175 utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True) 176 logging.debug("Writing SPICE CA certificate at %s", spicecacert_file) 177 utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True) 178 elif new_spice_cert or not spice_cert_exists: 179 if spice_cert_exists: 180 utils.CreateBackup(spicecert_file) 181 if spice_cacert_exists: 182 utils.CreateBackup(spicecacert_file) 183 184 logging.debug("Generating new self-signed SPICE certificate at %s", 185 spicecert_file) 186 (_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file, 1) 187 188 # Self-signed certificate -> the public certificate is also the CA public 189 # certificate 190 logging.debug("Writing the public certificate to %s", 191 spicecert_file) 192 utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem) 193 194 # Cluster domain secret 195 if cds: 196 logging.debug("Writing cluster domain secret to %s", cds_file) 197 utils.WriteFile(cds_file, data=cds, backup=True) 198 199 elif new_cds or not os.path.exists(cds_file): 200 logging.debug("Generating new cluster domain secret at %s", cds_file) 201 GenerateHmacKey(cds_file)
202 203
204 -def _InitGanetiServerSetup(master_name):
205 """Setup the necessary configuration for the initial node daemon. 206 207 This creates the nodepass file containing the shared password for 208 the cluster, generates the SSL certificate and starts the node daemon. 209 210 @type master_name: str 211 @param master_name: Name of the master node 212 213 """ 214 # Generate cluster secrets 215 GenerateClusterCrypto(True, False, False, False, False) 216 217 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start", constants.NODED]) 218 if result.failed: 219 raise errors.OpExecError("Could not start the node daemon, command %s" 220 " had exitcode %s and error %s" % 221 (result.cmd, result.exit_code, result.output)) 222 223 _WaitForNodeDaemon(master_name)
224 225
226 -def _WaitForNodeDaemon(node_name):
227 """Wait for node daemon to become responsive. 228 229 """ 230 def _CheckNodeDaemon(): 231 # Pylint bug <http://www.logilab.org/ticket/35642> 232 # pylint: disable=E1101 233 result = rpc.BootstrapRunner().call_version([node_name])[node_name] 234 if result.fail_msg: 235 raise utils.RetryAgain()
236 237 try: 238 utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT) 239 except utils.RetryTimeout: 240 raise errors.OpExecError("Node daemon on %s didn't answer queries within" 241 " %s seconds" % (node_name, _DAEMON_READY_TIMEOUT)) 242 243
244 -def _WaitForMasterDaemon():
245 """Wait for master daemon to become responsive. 246 247 """ 248 def _CheckMasterDaemon(): 249 try: 250 cl = luxi.Client() 251 (cluster_name, ) = cl.QueryConfigValues(["cluster_name"]) 252 except Exception: 253 raise utils.RetryAgain() 254 255 logging.debug("Received cluster name %s from master", cluster_name)
256 257 try: 258 utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT) 259 except utils.RetryTimeout: 260 raise errors.OpExecError("Master daemon didn't answer queries within" 261 " %s seconds" % _DAEMON_READY_TIMEOUT) 262 263
264 -def _WaitForSshDaemon(hostname, port):
265 """Wait for SSH daemon to become responsive. 266 267 """ 268 family = ssconf.SimpleStore().GetPrimaryIPFamily() 269 hostip = netutils.GetHostname(name=hostname, family=family).ip 270 271 def _CheckSshDaemon(): 272 if netutils.TcpPing(hostip, port, timeout=1.0, live_port_needed=True): 273 logging.debug("SSH daemon on %s:%s (IP address %s) has become" 274 " responsive", hostname, port, hostip) 275 else: 276 raise utils.RetryAgain()
277 278 try: 279 utils.Retry(_CheckSshDaemon, 1.0, _DAEMON_READY_TIMEOUT) 280 except utils.RetryTimeout: 281 raise errors.OpExecError("SSH daemon on %s:%s (IP address %s) didn't" 282 " become responsive within %s seconds" % 283 (hostname, port, hostip, _DAEMON_READY_TIMEOUT)) 284 285
286 -def RunNodeSetupCmd(cluster_name, node, basecmd, debug, verbose, 287 use_cluster_key, ask_key, strict_host_check, 288 port, data):
289 """Runs a command to configure something on a remote machine. 290 291 @type cluster_name: string 292 @param cluster_name: Cluster name 293 @type node: string 294 @param node: Node name 295 @type basecmd: string 296 @param basecmd: Base command (path on the remote machine) 297 @type debug: bool 298 @param debug: Enable debug output 299 @type verbose: bool 300 @param verbose: Enable verbose output 301 @type use_cluster_key: bool 302 @param use_cluster_key: See L{ssh.SshRunner.BuildCmd} 303 @type ask_key: bool 304 @param ask_key: See L{ssh.SshRunner.BuildCmd} 305 @type strict_host_check: bool 306 @param strict_host_check: See L{ssh.SshRunner.BuildCmd} 307 @type port: int 308 @param port: The SSH port of the remote machine or None for the default 309 @param data: JSON-serializable input data for script (passed to stdin) 310 311 """ 312 cmd = [basecmd] 313 314 # Pass --debug/--verbose to the external script if set on our invocation 315 if debug: 316 cmd.append("--debug") 317 318 if verbose: 319 cmd.append("--verbose") 320 321 logging.debug("Node setup command: %s", cmd) 322 323 version = constants.DIR_VERSION 324 all_cmds = [["test", "-d", os.path.join(pathutils.PKGLIBDIR, version)]] 325 if constants.HAS_GNU_LN: 326 all_cmds.extend([["ln", "-s", "-f", "-T", 327 os.path.join(pathutils.PKGLIBDIR, version), 328 os.path.join(pathutils.SYSCONFDIR, "ganeti/lib")], 329 ["ln", "-s", "-f", "-T", 330 os.path.join(pathutils.SHAREDIR, version), 331 os.path.join(pathutils.SYSCONFDIR, "ganeti/share")]]) 332 else: 333 all_cmds.extend([["rm", "-f", 334 os.path.join(pathutils.SYSCONFDIR, "ganeti/lib")], 335 ["ln", "-s", "-f", 336 os.path.join(pathutils.PKGLIBDIR, version), 337 os.path.join(pathutils.SYSCONFDIR, "ganeti/lib")], 338 ["rm", "-f", 339 os.path.join(pathutils.SYSCONFDIR, "ganeti/share")], 340 ["ln", "-s", "-f", 341 os.path.join(pathutils.SHAREDIR, version), 342 os.path.join(pathutils.SYSCONFDIR, "ganeti/share")]]) 343 all_cmds.append(cmd) 344 345 if port is None: 346 port = netutils.GetDaemonPort(constants.SSH) 347 348 srun = ssh.SshRunner(cluster_name) 349 scmd = srun.BuildCmd(node, constants.SSH_LOGIN_USER, 350 utils.ShellQuoteArgs( 351 utils.ShellCombineCommands(all_cmds)), 352 batch=False, ask_key=ask_key, quiet=False, 353 strict_host_check=strict_host_check, 354 use_cluster_key=use_cluster_key, 355 port=port) 356 357 tempfh = tempfile.TemporaryFile() 358 try: 359 tempfh.write(serializer.DumpJson(data)) 360 tempfh.seek(0) 361 362 result = utils.RunCmd(scmd, interactive=True, input_fd=tempfh) 363 finally: 364 tempfh.close() 365 366 if result.failed: 367 raise errors.OpExecError("Command '%s' failed: %s" % 368 (result.cmd, result.fail_reason)) 369 370 _WaitForSshDaemon(node, port)
371 372
373 -def _InitFileStorageDir(file_storage_dir):
374 """Initialize if needed the file storage. 375 376 @param file_storage_dir: the user-supplied value 377 @return: either empty string (if file storage was disabled at build 378 time) or the normalized path to the storage directory 379 380 """ 381 file_storage_dir = os.path.normpath(file_storage_dir) 382 383 if not os.path.isabs(file_storage_dir): 384 raise errors.OpPrereqError("File storage directory '%s' is not an absolute" 385 " path" % file_storage_dir, errors.ECODE_INVAL) 386 387 if not os.path.exists(file_storage_dir): 388 try: 389 os.makedirs(file_storage_dir, 0750) 390 except OSError, err: 391 raise errors.OpPrereqError("Cannot create file storage directory" 392 " '%s': %s" % (file_storage_dir, err), 393 errors.ECODE_ENVIRON) 394 395 if not os.path.isdir(file_storage_dir): 396 raise errors.OpPrereqError("The file storage directory '%s' is not" 397 " a directory." % file_storage_dir, 398 errors.ECODE_ENVIRON) 399 400 return file_storage_dir
401 402
403 -def _PrepareFileBasedStorage( 404 enabled_disk_templates, file_storage_dir, 405 default_dir, file_disk_template, 406 init_fn=_InitFileStorageDir, acceptance_fn=None):
407 """Checks if a file-base storage type is enabled and inits the dir. 408 409 @type enabled_disk_templates: list of string 410 @param enabled_disk_templates: list of enabled disk templates 411 @type file_storage_dir: string 412 @param file_storage_dir: the file storage directory 413 @type default_dir: string 414 @param default_dir: default file storage directory when C{file_storage_dir} 415 is 'None' 416 @type file_disk_template: string 417 @param file_disk_template: a disk template whose storage type is 'ST_FILE' or 418 'ST_SHARED_FILE' 419 @rtype: string 420 @returns: the name of the actual file storage directory 421 422 """ 423 assert (file_disk_template in utils.storage.GetDiskTemplatesOfStorageTypes( 424 constants.ST_FILE, constants.ST_SHARED_FILE 425 )) 426 427 if file_storage_dir is None: 428 file_storage_dir = default_dir 429 if not acceptance_fn: 430 acceptance_fn = \ 431 lambda path: filestorage.CheckFileStoragePathAcceptance( 432 path, exact_match_ok=True) 433 434 cluster.CheckFileStoragePathVsEnabledDiskTemplates( 435 logging.warning, file_storage_dir, enabled_disk_templates) 436 437 file_storage_enabled = file_disk_template in enabled_disk_templates 438 if file_storage_enabled: 439 try: 440 acceptance_fn(file_storage_dir) 441 except errors.FileStoragePathError as e: 442 raise errors.OpPrereqError(str(e)) 443 result_file_storage_dir = init_fn(file_storage_dir) 444 else: 445 result_file_storage_dir = file_storage_dir 446 return result_file_storage_dir
447 448
449 -def _PrepareFileStorage( 450 enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir, 451 acceptance_fn=None):
452 """Checks if file storage is enabled and inits the dir. 453 454 @see: C{_PrepareFileBasedStorage} 455 456 """ 457 return _PrepareFileBasedStorage( 458 enabled_disk_templates, file_storage_dir, 459 pathutils.DEFAULT_FILE_STORAGE_DIR, constants.DT_FILE, 460 init_fn=init_fn, acceptance_fn=acceptance_fn)
461 462
463 -def _PrepareSharedFileStorage( 464 enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir, 465 acceptance_fn=None):
466 """Checks if shared file storage is enabled and inits the dir. 467 468 @see: C{_PrepareFileBasedStorage} 469 470 """ 471 return _PrepareFileBasedStorage( 472 enabled_disk_templates, file_storage_dir, 473 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR, constants.DT_SHARED_FILE, 474 init_fn=init_fn, acceptance_fn=acceptance_fn)
475 476
477 -def _PrepareGlusterStorage( 478 enabled_disk_templates, file_storage_dir, init_fn=_InitFileStorageDir, 479 acceptance_fn=None):
480 """Checks if gluster storage is enabled and inits the dir. 481 482 @see: C{_PrepareFileBasedStorage} 483 484 """ 485 return _PrepareFileBasedStorage( 486 enabled_disk_templates, file_storage_dir, 487 pathutils.DEFAULT_GLUSTER_STORAGE_DIR, constants.DT_GLUSTER, 488 init_fn=init_fn, acceptance_fn=acceptance_fn)
489 490
491 -def _InitCheckEnabledDiskTemplates(enabled_disk_templates):
492 """Checks the sanity of the enabled disk templates. 493 494 """ 495 if not enabled_disk_templates: 496 raise errors.OpPrereqError("Enabled disk templates list must contain at" 497 " least one member", errors.ECODE_INVAL) 498 invalid_disk_templates = \ 499 set(enabled_disk_templates) - constants.DISK_TEMPLATES 500 if invalid_disk_templates: 501 raise errors.OpPrereqError("Enabled disk templates list contains invalid" 502 " entries: %s" % invalid_disk_templates, 503 errors.ECODE_INVAL)
504 505
506 -def _RestrictIpolicyToEnabledDiskTemplates(ipolicy, enabled_disk_templates):
507 """Restricts the ipolicy's disk templates to the enabled ones. 508 509 This function clears the ipolicy's list of allowed disk templates from the 510 ones that are not enabled by the cluster. 511 512 @type ipolicy: dict 513 @param ipolicy: the instance policy 514 @type enabled_disk_templates: list of string 515 @param enabled_disk_templates: the list of cluster-wide enabled disk 516 templates 517 518 """ 519 assert constants.IPOLICY_DTS in ipolicy 520 allowed_disk_templates = ipolicy[constants.IPOLICY_DTS] 521 restricted_disk_templates = list(set(allowed_disk_templates) 522 .intersection(set(enabled_disk_templates))) 523 ipolicy[constants.IPOLICY_DTS] = restricted_disk_templates
524 525
526 -def _InitCheckDrbdHelper(drbd_helper, drbd_enabled):
527 """Checks the DRBD usermode helper. 528 529 @type drbd_helper: string 530 @param drbd_helper: name of the DRBD usermode helper that the system should 531 use 532 533 """ 534 if not drbd_enabled: 535 return 536 537 if drbd_helper is not None: 538 try: 539 curr_helper = drbd.DRBD8.GetUsermodeHelper() 540 except errors.BlockDeviceError, err: 541 raise errors.OpPrereqError("Error while checking drbd helper" 542 " (disable drbd with --enabled-disk-templates" 543 " if you are not using drbd): %s" % str(err), 544 errors.ECODE_ENVIRON) 545 if drbd_helper != curr_helper: 546 raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s" 547 " is the current helper" % (drbd_helper, 548 curr_helper), 549 errors.ECODE_INVAL)
550 551
552 -def InitCluster(cluster_name, mac_prefix, # pylint: disable=R0913, R0914 553 master_netmask, master_netdev, file_storage_dir, 554 shared_file_storage_dir, gluster_storage_dir, 555 candidate_pool_size, secondary_ip=None, 556 vg_name=None, beparams=None, nicparams=None, ndparams=None, 557 hvparams=None, diskparams=None, enabled_hypervisors=None, 558 modify_etc_hosts=True, modify_ssh_setup=True, 559 maintain_node_health=False, drbd_helper=None, uid_pool=None, 560 default_iallocator=None, default_iallocator_params=None, 561 primary_ip_version=None, ipolicy=None, 562 prealloc_wipe_disks=False, use_external_mip_script=False, 563 hv_state=None, disk_state=None, enabled_disk_templates=None, 564 enabled_user_shutdown=False):
565 """Initialise the cluster. 566 567 @type candidate_pool_size: int 568 @param candidate_pool_size: master candidate pool size 569 570 @type enabled_disk_templates: list of string 571 @param enabled_disk_templates: list of disk_templates to be used in this 572 cluster 573 574 @type enabled_user_shutdown: bool 575 @param enabled_user_shutdown: whether user shutdown is enabled cluster 576 wide 577 578 """ 579 # TODO: complete the docstring 580 if config.ConfigWriter.IsCluster(): 581 raise errors.OpPrereqError("Cluster is already initialised", 582 errors.ECODE_STATE) 583 584 if not enabled_hypervisors: 585 raise errors.OpPrereqError("Enabled hypervisors list must contain at" 586 " least one member", errors.ECODE_INVAL) 587 invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES 588 if invalid_hvs: 589 raise errors.OpPrereqError("Enabled hypervisors contains invalid" 590 " entries: %s" % invalid_hvs, 591 errors.ECODE_INVAL) 592 593 _InitCheckEnabledDiskTemplates(enabled_disk_templates) 594 595 try: 596 ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version) 597 except errors.ProgrammerError: 598 raise errors.OpPrereqError("Invalid primary ip version: %d." % 599 primary_ip_version, errors.ECODE_INVAL) 600 601 hostname = netutils.GetHostname(family=ipcls.family) 602 if not ipcls.IsValid(hostname.ip): 603 raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d" 604 " address." % (hostname.ip, primary_ip_version), 605 errors.ECODE_INVAL) 606 607 if ipcls.IsLoopback(hostname.ip): 608 raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback" 609 " address. Please fix DNS or %s." % 610 (hostname.ip, pathutils.ETC_HOSTS), 611 errors.ECODE_ENVIRON) 612 613 if not ipcls.Own(hostname.ip): 614 raise errors.OpPrereqError("Inconsistency: this host's name resolves" 615 " to %s,\nbut this ip address does not" 616 " belong to this host" % 617 hostname.ip, errors.ECODE_ENVIRON) 618 619 clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family) 620 621 if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5): 622 raise errors.OpPrereqError("Cluster IP already active", 623 errors.ECODE_NOTUNIQUE) 624 625 if not secondary_ip: 626 if primary_ip_version == constants.IP6_VERSION: 627 raise errors.OpPrereqError("When using a IPv6 primary address, a valid" 628 " IPv4 address must be given as secondary", 629 errors.ECODE_INVAL) 630 secondary_ip = hostname.ip 631 632 if not netutils.IP4Address.IsValid(secondary_ip): 633 raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid" 634 " IPv4 address." % secondary_ip, 635 errors.ECODE_INVAL) 636 637 if not netutils.IP4Address.Own(secondary_ip): 638 raise errors.OpPrereqError("You gave %s as secondary IP," 639 " but it does not belong to this host." % 640 secondary_ip, errors.ECODE_ENVIRON) 641 642 if master_netmask is not None: 643 if not ipcls.ValidateNetmask(master_netmask): 644 raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " % 645 (master_netmask, primary_ip_version), 646 errors.ECODE_INVAL) 647 else: 648 master_netmask = ipcls.iplen 649 650 if vg_name: 651 # Check if volume group is valid 652 vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name, 653 constants.MIN_VG_SIZE) 654 if vgstatus: 655 raise errors.OpPrereqError("Error: %s" % vgstatus, errors.ECODE_INVAL) 656 657 drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates 658 _InitCheckDrbdHelper(drbd_helper, drbd_enabled) 659 660 logging.debug("Stopping daemons (if any are running)") 661 result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"]) 662 if result.failed: 663 raise errors.OpExecError("Could not stop daemons, command %s" 664 " had exitcode %s and error '%s'" % 665 (result.cmd, result.exit_code, result.output)) 666 667 file_storage_dir = _PrepareFileStorage(enabled_disk_templates, 668 file_storage_dir) 669 shared_file_storage_dir = _PrepareSharedFileStorage(enabled_disk_templates, 670 shared_file_storage_dir) 671 672 if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix): 673 raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix, 674 errors.ECODE_INVAL) 675 676 if not nicparams.get('mode', None) == constants.NIC_MODE_OVS: 677 # Do not do this check if mode=openvswitch, since the openvswitch is not 678 # created yet 679 result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev]) 680 if result.failed: 681 raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" % 682 (master_netdev, 683 result.output.strip()), errors.ECODE_INVAL) 684 685 dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)] 686 utils.EnsureDirs(dirs) 687 688 objects.UpgradeBeParams(beparams) 689 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES) 690 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES) 691 692 objects.NIC.CheckParameterSyntax(nicparams) 693 694 full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy) 695 _RestrictIpolicyToEnabledDiskTemplates(full_ipolicy, enabled_disk_templates) 696 697 if ndparams is not None: 698 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES) 699 else: 700 ndparams = dict(constants.NDC_DEFAULTS) 701 702 # This is ugly, as we modify the dict itself 703 # FIXME: Make utils.ForceDictType pure functional or write a wrapper 704 # around it 705 if hv_state: 706 for hvname, hvs_data in hv_state.items(): 707 utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES) 708 hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data) 709 else: 710 hv_state = dict((hvname, constants.HVST_DEFAULTS) 711 for hvname in enabled_hypervisors) 712 713 # FIXME: disk_state has no default values yet 714 if disk_state: 715 for storage, ds_data in disk_state.items(): 716 if storage not in constants.DS_VALID_TYPES: 717 raise errors.OpPrereqError("Invalid storage type in disk state: %s" % 718 storage, errors.ECODE_INVAL) 719 for ds_name, state in ds_data.items(): 720 utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES) 721 ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state) 722 723 # hvparams is a mapping of hypervisor->hvparams dict 724 for hv_name, hv_params in hvparams.iteritems(): 725 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES) 726 hv_class = hypervisor.GetHypervisor(hv_name) 727 hv_class.CheckParameterSyntax(hv_params) 728 729 # diskparams is a mapping of disk-template->diskparams dict 730 for template, dt_params in diskparams.items(): 731 param_keys = set(dt_params.keys()) 732 default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys()) 733 if not (param_keys <= default_param_keys): 734 unknown_params = param_keys - default_param_keys 735 raise errors.OpPrereqError("Invalid parameters for disk template %s:" 736 " %s" % (template, 737 utils.CommaJoin(unknown_params)), 738 errors.ECODE_INVAL) 739 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES) 740 if template == constants.DT_DRBD8 and vg_name is not None: 741 # The default METAVG value is equal to the VG name set at init time, 742 # if provided 743 dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name 744 745 try: 746 utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS) 747 except errors.OpPrereqError, err: 748 raise errors.OpPrereqError("While verify diskparam options: %s" % err, 749 errors.ECODE_INVAL) 750 751 # set up ssh config and /etc/hosts 752 rsa_sshkey = "" 753 dsa_sshkey = "" 754 if os.path.isfile(pathutils.SSH_HOST_RSA_PUB): 755 sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB) 756 rsa_sshkey = sshline.split(" ")[1] 757 if os.path.isfile(pathutils.SSH_HOST_DSA_PUB): 758 sshline = utils.ReadFile(pathutils.SSH_HOST_DSA_PUB) 759 dsa_sshkey = sshline.split(" ")[1] 760 if not rsa_sshkey and not dsa_sshkey: 761 raise errors.OpPrereqError("Failed to find SSH public keys", 762 errors.ECODE_ENVIRON) 763 764 if modify_etc_hosts: 765 utils.AddHostToEtcHosts(hostname.name, hostname.ip) 766 767 if modify_ssh_setup: 768 _InitSSHSetup() 769 770 if default_iallocator is not None: 771 alloc_script = utils.FindFile(default_iallocator, 772 constants.IALLOCATOR_SEARCH_PATH, 773 os.path.isfile) 774 if alloc_script is None: 775 raise errors.OpPrereqError("Invalid default iallocator script '%s'" 776 " specified" % default_iallocator, 777 errors.ECODE_INVAL) 778 else: 779 # default to htools 780 if utils.FindFile(constants.IALLOC_HAIL, 781 constants.IALLOCATOR_SEARCH_PATH, 782 os.path.isfile): 783 default_iallocator = constants.IALLOC_HAIL 784 785 # check if we have all the users we need 786 try: 787 runtime.GetEnts() 788 except errors.ConfigurationError, err: 789 raise errors.OpPrereqError("Required system user/group missing: %s" % 790 err, errors.ECODE_ENVIRON) 791 792 candidate_certs = {} 793 794 now = time.time() 795 796 # init of cluster config file 797 cluster_config = objects.Cluster( 798 serial_no=1, 799 rsahostkeypub=rsa_sshkey, 800 dsahostkeypub=dsa_sshkey, 801 highest_used_port=(constants.FIRST_DRBD_PORT - 1), 802 mac_prefix=mac_prefix, 803 volume_group_name=vg_name, 804 tcpudp_port_pool=set(), 805 master_ip=clustername.ip, 806 master_netmask=master_netmask, 807 master_netdev=master_netdev, 808 cluster_name=clustername.name, 809 file_storage_dir=file_storage_dir, 810 shared_file_storage_dir=shared_file_storage_dir, 811 gluster_storage_dir=gluster_storage_dir, 812 enabled_hypervisors=enabled_hypervisors, 813 beparams={constants.PP_DEFAULT: beparams}, 814 nicparams={constants.PP_DEFAULT: nicparams}, 815 ndparams=ndparams, 816 hvparams=hvparams, 817 diskparams=diskparams, 818 candidate_pool_size=candidate_pool_size, 819 modify_etc_hosts=modify_etc_hosts, 820 modify_ssh_setup=modify_ssh_setup, 821 uid_pool=uid_pool, 822 ctime=now, 823 mtime=now, 824 maintain_node_health=maintain_node_health, 825 drbd_usermode_helper=drbd_helper, 826 default_iallocator=default_iallocator, 827 default_iallocator_params=default_iallocator_params, 828 primary_ip_family=ipcls.family, 829 prealloc_wipe_disks=prealloc_wipe_disks, 830 use_external_mip_script=use_external_mip_script, 831 ipolicy=full_ipolicy, 832 hv_state_static=hv_state, 833 disk_state_static=disk_state, 834 enabled_disk_templates=enabled_disk_templates, 835 candidate_certs=candidate_certs, 836 enabled_user_shutdown=enabled_user_shutdown, 837 ) 838 master_node_config = objects.Node(name=hostname.name, 839 primary_ip=hostname.ip, 840 secondary_ip=secondary_ip, 841 serial_no=1, 842 master_candidate=True, 843 offline=False, drained=False, 844 ctime=now, mtime=now, 845 ) 846 InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config) 847 cfg = config.ConfigWriter(offline=True) 848 ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE) 849 cfg.Update(cfg.GetClusterInfo(), logging.error) 850 ssconf.WriteSsconfFiles(cfg.GetSsconfValues()) 851 852 # set up the inter-node password and certificate 853 _InitGanetiServerSetup(hostname.name) 854 855 logging.debug("Starting daemons") 856 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"]) 857 if result.failed: 858 raise errors.OpExecError("Could not start daemons, command %s" 859 " had exitcode %s and error %s" % 860 (result.cmd, result.exit_code, result.output)) 861 862 _WaitForMasterDaemon()
863 864
865 -def InitConfig(version, cluster_config, master_node_config, 866 cfg_file=pathutils.CLUSTER_CONF_FILE):
867 """Create the initial cluster configuration. 868 869 It will contain the current node, which will also be the master 870 node, and no instances. 871 872 @type version: int 873 @param version: configuration version 874 @type cluster_config: L{objects.Cluster} 875 @param cluster_config: cluster configuration 876 @type master_node_config: L{objects.Node} 877 @param master_node_config: master node configuration 878 @type cfg_file: string 879 @param cfg_file: configuration file path 880 881 """ 882 uuid_generator = config.TemporaryReservationManager() 883 cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID, 884 _INITCONF_ECID) 885 master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID, 886 _INITCONF_ECID) 887 cluster_config.master_node = master_node_config.uuid 888 nodes = { 889 master_node_config.uuid: master_node_config, 890 } 891 default_nodegroup = objects.NodeGroup( 892 uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID), 893 name=constants.INITIAL_NODE_GROUP_NAME, 894 members=[master_node_config.uuid], 895 diskparams={}, 896 ) 897 nodegroups = { 898 default_nodegroup.uuid: default_nodegroup, 899 } 900 now = time.time() 901 config_data = objects.ConfigData(version=version, 902 cluster=cluster_config, 903 nodegroups=nodegroups, 904 nodes=nodes, 905 instances={}, 906 networks={}, 907 serial_no=1, 908 ctime=now, mtime=now) 909 utils.WriteFile(cfg_file, 910 data=serializer.Dump(config_data.ToDict()), 911 mode=0600)
912 913
914 -def FinalizeClusterDestroy(master_uuid):
915 """Execute the last steps of cluster destroy 916 917 This function shuts down all the daemons, completing the destroy 918 begun in cmdlib.LUDestroyOpcode. 919 920 """ 921 cfg = config.ConfigWriter() 922 modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup 923 runner = rpc.BootstrapRunner() 924 925 master_name = cfg.GetNodeName(master_uuid) 926 927 master_params = cfg.GetMasterNetworkParameters() 928 master_params.uuid = master_uuid 929 ems = cfg.GetUseExternalMipScript() 930 result = runner.call_node_deactivate_master_ip(master_name, master_params, 931 ems) 932 933 msg = result.fail_msg 934 if msg: 935 logging.warning("Could not disable the master IP: %s", msg) 936 937 result = runner.call_node_stop_master(master_name) 938 msg = result.fail_msg 939 if msg: 940 logging.warning("Could not disable the master role: %s", msg) 941 942 result = runner.call_node_leave_cluster(master_name, modify_ssh_setup) 943 msg = result.fail_msg 944 if msg: 945 logging.warning("Could not shutdown the node daemon and cleanup" 946 " the node: %s", msg)
947 948
949 -def SetupNodeDaemon(opts, cluster_name, node, ssh_port):
950 """Add a node to the cluster. 951 952 This function must be called before the actual opcode, and will ssh 953 to the remote node, copy the needed files, and start ganeti-noded, 954 allowing the master to do the rest via normal rpc calls. 955 956 @param cluster_name: the cluster name 957 @param node: the name of the new node 958 @param ssh_port: the SSH port of the new node 959 960 """ 961 data = { 962 constants.NDS_CLUSTER_NAME: cluster_name, 963 constants.NDS_NODE_DAEMON_CERTIFICATE: 964 utils.ReadFile(pathutils.NODED_CERT_FILE), 965 constants.NDS_SSCONF: ssconf.SimpleStore().ReadAll(), 966 constants.NDS_START_NODE_DAEMON: True, 967 } 968 969 RunNodeSetupCmd(cluster_name, node, pathutils.NODE_DAEMON_SETUP, 970 opts.debug, opts.verbose, 971 True, opts.ssh_key_check, opts.ssh_key_check, 972 ssh_port, data) 973 974 _WaitForNodeDaemon(node)
975 976
977 -def MasterFailover(no_voting=False):
978 """Failover the master node. 979 980 This checks that we are not already the master, and will cause the 981 current master to cease being master, and the non-master to become 982 new master. 983 984 @type no_voting: boolean 985 @param no_voting: force the operation without remote nodes agreement 986 (dangerous) 987 988 @returns: the pair of an exit code and warnings to display 989 """ 990 sstore = ssconf.SimpleStore() 991 992 old_master, new_master = ssconf.GetMasterAndMyself(sstore) 993 node_names = sstore.GetNodeList() 994 mc_list = sstore.GetMasterCandidates() 995 996 if old_master == new_master: 997 raise errors.OpPrereqError("This commands must be run on the node" 998 " where you want the new master to be." 999 " %s is already the master" % 1000 old_master, errors.ECODE_INVAL) 1001 1002 if new_master not in mc_list: 1003 mc_no_master = [name for name in mc_list if name != old_master] 1004 raise errors.OpPrereqError("This node is not among the nodes marked" 1005 " as master candidates. Only these nodes" 1006 " can become masters. Current list of" 1007 " master candidates is:\n" 1008 "%s" % ("\n".join(mc_no_master)), 1009 errors.ECODE_STATE) 1010 1011 if not no_voting: 1012 vote_list = GatherMasterVotes(node_names) 1013 1014 if vote_list: 1015 voted_master = vote_list[0][0] 1016 if voted_master is None: 1017 raise errors.OpPrereqError("Cluster is inconsistent, most nodes did" 1018 " not respond.", errors.ECODE_ENVIRON) 1019 elif voted_master != old_master: 1020 raise errors.OpPrereqError("I have a wrong configuration, I believe" 1021 " the master is %s but the other nodes" 1022 " voted %s. Please resync the configuration" 1023 " of this node." % 1024 (old_master, voted_master), 1025 errors.ECODE_STATE) 1026 # end checks 1027 1028 rcode = 0 1029 warnings = [] 1030 1031 logging.info("Setting master to %s, old master: %s", new_master, old_master) 1032 1033 try: 1034 # instantiate a real config writer, as we now know we have the 1035 # configuration data 1036 cfg = config.ConfigWriter(accept_foreign=True) 1037 1038 old_master_node = cfg.GetNodeInfoByName(old_master) 1039 if old_master_node is None: 1040 raise errors.OpPrereqError("Could not find old master node '%s' in" 1041 " cluster configuration." % old_master, 1042 errors.ECODE_NOENT) 1043 1044 cluster_info = cfg.GetClusterInfo() 1045 new_master_node = cfg.GetNodeInfoByName(new_master) 1046 if new_master_node is None: 1047 raise errors.OpPrereqError("Could not find new master node '%s' in" 1048 " cluster configuration." % new_master, 1049 errors.ECODE_NOENT) 1050 1051 cluster_info.master_node = new_master_node.uuid 1052 # this will also regenerate the ssconf files, since we updated the 1053 # cluster info 1054 cfg.Update(cluster_info, logging.error) 1055 except errors.ConfigurationError, err: 1056 logging.error("Error while trying to set the new master: %s", 1057 str(err)) 1058 return 1 1059 1060 # if cfg.Update worked, then it means the old master daemon won't be 1061 # able now to write its own config file (we rely on locking in both 1062 # backend.UploadFile() and ConfigWriter._Write(); hence the next 1063 # step is to kill the old master 1064 1065 logging.info("Stopping the master daemon on node %s", old_master) 1066 1067 runner = rpc.BootstrapRunner() 1068 master_params = cfg.GetMasterNetworkParameters() 1069 master_params.uuid = old_master_node.uuid 1070 ems = cfg.GetUseExternalMipScript() 1071 result = runner.call_node_deactivate_master_ip(old_master, 1072 master_params, ems) 1073 1074 msg = result.fail_msg 1075 if msg: 1076 warning = "Could not disable the master IP: %s" % (msg,) 1077 logging.warning("%s", warning) 1078 warnings.append(warning) 1079 1080 result = runner.call_node_stop_master(old_master) 1081 msg = result.fail_msg 1082 if msg: 1083 warning = ("Could not disable the master role on the old master" 1084 " %s, please disable manually: %s" % (old_master, msg)) 1085 logging.error("%s", warning) 1086 warnings.append(warning) 1087 1088 logging.info("Checking master IP non-reachability...") 1089 1090 master_ip = sstore.GetMasterIP() 1091 total_timeout = 30 1092 1093 # Here we have a phase where no master should be running 1094 def _check_ip(expected): 1095 if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT) != expected: 1096 raise utils.RetryAgain()
1097 1098 try: 1099 utils.Retry(_check_ip, (1, 1.5, 5), total_timeout, args=[False]) 1100 except utils.RetryTimeout: 1101 warning = ("The master IP is still reachable after %s seconds," 1102 " continuing but activating the master IP on the current" 1103 " node will probably fail" % total_timeout) 1104 logging.warning("%s", warning) 1105 warnings.append(warning) 1106 rcode = 1 1107 1108 if jstore.CheckDrainFlag(): 1109 logging.info("Undraining job queue") 1110 jstore.SetDrainFlag(False) 1111 1112 logging.info("Starting the master daemons on the new master") 1113 1114 result = rpc.BootstrapRunner().call_node_start_master_daemons(new_master, 1115 no_voting) 1116 msg = result.fail_msg 1117 if msg: 1118 logging.error("Could not start the master role on the new master" 1119 " %s, please check: %s", new_master, msg) 1120 rcode = 1 1121 1122 # Finally verify that the new master managed to set up the master IP 1123 # and warn if it didn't. 1124 try: 1125 utils.Retry(_check_ip, (1, 1.5, 5), total_timeout, args=[True]) 1126 except utils.RetryTimeout: 1127 warning = ("The master IP did not come up within %s seconds; the" 1128 " cluster should still be working and reachable via %s," 1129 " but not via the master IP address" 1130 % (total_timeout, new_master)) 1131 logging.warning("%s", warning) 1132 warnings.append(warning) 1133 rcode = 1 1134 1135 logging.info("Master failed over from %s to %s", old_master, new_master) 1136 return rcode, warnings 1137 1138
1139 -def GetMaster():
1140 """Returns the current master node. 1141 1142 This is a separate function in bootstrap since it's needed by 1143 gnt-cluster, and instead of importing directly ssconf, it's better 1144 to abstract it in bootstrap, where we do use ssconf in other 1145 functions too. 1146 1147 """ 1148 sstore = ssconf.SimpleStore() 1149 1150 old_master, _ = ssconf.GetMasterAndMyself(sstore) 1151 1152 return old_master
1153 1154
1155 -def GatherMasterVotes(node_names):
1156 """Check the agreement on who is the master. 1157 1158 This function will return a list of (node, number of votes), ordered 1159 by the number of votes. Errors will be denoted by the key 'None'. 1160 1161 Note that the sum of votes is the number of nodes this machine 1162 knows, whereas the number of entries in the list could be different 1163 (if some nodes vote for another master). 1164 1165 @type node_names: list 1166 @param node_names: the list of nodes to query for master info; the current 1167 node will be removed if it is in the list 1168 @rtype: list 1169 @return: list of (node, votes) 1170 1171 """ 1172 if not node_names: 1173 # no nodes 1174 return [] 1175 results = rpc.BootstrapRunner().call_master_node_name(node_names) 1176 if not isinstance(results, dict): 1177 # this should not happen (unless internal error in rpc) 1178 logging.critical("Can't complete rpc call, aborting master startup") 1179 return [(None, len(node_names))] 1180 votes = {} 1181 for node_name in results: 1182 nres = results[node_name] 1183 msg = nres.fail_msg 1184 1185 if msg: 1186 logging.warning("Error contacting node %s: %s", node_name, msg) 1187 node = None 1188 else: 1189 node = nres.payload 1190 1191 if node not in votes: 1192 votes[node] = 1 1193 else: 1194 votes[node] += 1 1195 1196 vote_list = [v for v in votes.items()] 1197 # sort first on number of votes then on name, since we want None 1198 # sorted later if we have the half of the nodes not responding, and 1199 # half voting all for the same master 1200 vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True) 1201 1202 return vote_list
1203