1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Functions to bootstrap a new cluster.
32
33 """
34
35 import os
36 import os.path
37 import re
38 import logging
39 import time
40
41 from ganeti.cmdlib import cluster
42 import ganeti.rpc.node as rpc
43 from ganeti import ssh
44 from ganeti import utils
45 from ganeti import errors
46 from ganeti import config
47 from ganeti import constants
48 from ganeti import objects
49 from ganeti import ssconf
50 from ganeti import serializer
51 from ganeti import hypervisor
52 from ganeti.storage import drbd
53 from ganeti.storage import filestorage
54 from ganeti import netutils
55 from ganeti import luxi
56 from ganeti import jstore
57 from ganeti import pathutils
58 from ganeti import runtime
59 from ganeti import vcluster
60
61
62
63 _INITCONF_ECID = "initconfig-ecid"
64
65
66 _DAEMON_READY_TIMEOUT = 10.0
67
68
70 """Writes a new HMAC key.
71
72 @type file_name: str
73 @param file_name: Path to output file
74
75 """
76 utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
77 backup=True)
78
79
80
81 -def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert,
82 new_confd_hmac_key, new_cds, new_client_cert,
83 master_name,
84 rapi_cert_pem=None, spice_cert_pem=None,
85 spice_cacert_pem=None, cds=None,
86 nodecert_file=pathutils.NODED_CERT_FILE,
87 clientcert_file=pathutils.NODED_CLIENT_CERT_FILE,
88 rapicert_file=pathutils.RAPI_CERT_FILE,
89 spicecert_file=pathutils.SPICE_CERT_FILE,
90 spicecacert_file=pathutils.SPICE_CACERT_FILE,
91 hmackey_file=pathutils.CONFD_HMAC_KEY,
92 cds_file=pathutils.CLUSTER_DOMAIN_SECRET_FILE):
93 """Updates the cluster certificates, keys and secrets.
94
95 @type new_cluster_cert: bool
96 @param new_cluster_cert: Whether to generate a new cluster certificate
97 @type new_rapi_cert: bool
98 @param new_rapi_cert: Whether to generate a new RAPI certificate
99 @type new_spice_cert: bool
100 @param new_spice_cert: Whether to generate a new SPICE certificate
101 @type new_confd_hmac_key: bool
102 @param new_confd_hmac_key: Whether to generate a new HMAC key
103 @type new_cds: bool
104 @param new_cds: Whether to generate a new cluster domain secret
105 @type new_client_cert: bool
106 @param new_client_cert: Whether to generate a new client certificate
107 @type master_name: string
108 @param master_name: FQDN of the master node
109 @type rapi_cert_pem: string
110 @param rapi_cert_pem: New RAPI certificate in PEM format
111 @type spice_cert_pem: string
112 @param spice_cert_pem: New SPICE certificate in PEM format
113 @type spice_cacert_pem: string
114 @param spice_cacert_pem: Certificate of the CA that signed the SPICE
115 certificate, in PEM format
116 @type cds: string
117 @param cds: New cluster domain secret
118 @type nodecert_file: string
119 @param nodecert_file: optional override of the node cert file path
120 @type rapicert_file: string
121 @param rapicert_file: optional override of the rapi cert file path
122 @type spicecert_file: string
123 @param spicecert_file: optional override of the spice cert file path
124 @type spicecacert_file: string
125 @param spicecacert_file: optional override of the spice CA cert file path
126 @type hmackey_file: string
127 @param hmackey_file: optional override of the hmac key file path
128
129 """
130
131
132 utils.GenerateNewSslCert(
133 new_cluster_cert, nodecert_file, 1,
134 "Generating new cluster certificate at %s" % nodecert_file)
135
136
137
138 if new_cluster_cert or new_client_cert:
139 utils.GenerateNewClientSslCert(clientcert_file, nodecert_file,
140 master_name)
141
142
143 if new_confd_hmac_key or not os.path.exists(hmackey_file):
144 logging.debug("Writing new confd HMAC key to %s", hmackey_file)
145 GenerateHmacKey(hmackey_file)
146
147 if rapi_cert_pem:
148
149 logging.debug("Writing RAPI certificate at %s", rapicert_file)
150 utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
151
152 else:
153 utils.GenerateNewSslCert(
154 new_rapi_cert, rapicert_file, 1,
155 "Generating new RAPI certificate at %s" % rapicert_file)
156
157
158 spice_cert_exists = os.path.exists(spicecert_file)
159 spice_cacert_exists = os.path.exists(spicecacert_file)
160 if spice_cert_pem:
161
162 logging.debug("Writing SPICE certificate at %s", spicecert_file)
163 utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True)
164 logging.debug("Writing SPICE CA certificate at %s", spicecacert_file)
165 utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True)
166 elif new_spice_cert or not spice_cert_exists:
167 if spice_cert_exists:
168 utils.CreateBackup(spicecert_file)
169 if spice_cacert_exists:
170 utils.CreateBackup(spicecacert_file)
171
172 logging.debug("Generating new self-signed SPICE certificate at %s",
173 spicecert_file)
174 (_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file, 1)
175
176
177
178 logging.debug("Writing the public certificate to %s",
179 spicecert_file)
180 utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem)
181
182
183 if cds:
184 logging.debug("Writing cluster domain secret to %s", cds_file)
185 utils.WriteFile(cds_file, data=cds, backup=True)
186
187 elif new_cds or not os.path.exists(cds_file):
188 logging.debug("Generating new cluster domain secret at %s", cds_file)
189 GenerateHmacKey(cds_file)
190
191
193 """Setup the necessary configuration for the initial node daemon.
194
195 This creates the nodepass file containing the shared password for
196 the cluster, generates the SSL certificate and starts the node daemon.
197
198 @type master_name: str
199 @param master_name: Name of the master node
200 @type cfg: ConfigWriter
201 @param cfg: the configuration writer
202
203 """
204
205 GenerateClusterCrypto(True, False, False, False, False, False, master_name)
206
207
208 master_uuid = cfg.GetMasterNode()
209 master_digest = utils.GetCertificateDigest()
210 cfg.AddNodeToCandidateCerts(master_uuid, master_digest)
211 cfg.Update(cfg.GetClusterInfo(), logging.error)
212 ssconf.WriteSsconfFiles(cfg.GetSsconfValues())
213
214 if not os.path.exists(
215 os.path.join(pathutils.DATA_DIR,
216 "%s%s" % (constants.SSCONF_FILEPREFIX,
217 constants.SS_MASTER_CANDIDATES_CERTS))):
218 raise errors.OpExecError("Ssconf file for master candidate certificates"
219 " was not written.")
220
221 if not os.path.exists(pathutils.NODED_CERT_FILE):
222 raise errors.OpExecError("The server certficate was not created properly.")
223
224 if not os.path.exists(pathutils.NODED_CLIENT_CERT_FILE):
225 raise errors.OpExecError("The client certificate was not created"
226 " properly.")
227
228
229 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start", constants.NODED])
230 if result.failed:
231 raise errors.OpExecError("Could not start the node daemon, command %s"
232 " had exitcode %s and error %s" %
233 (result.cmd, result.exit_code, result.output))
234
235 _WaitForNodeDaemon(master_name)
236
237
248
249 try:
250 utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT)
251 except utils.RetryTimeout:
252 raise errors.OpExecError("Node daemon on %s didn't answer queries within"
253 " %s seconds" % (node_name, _DAEMON_READY_TIMEOUT))
254
255
268
269 try:
270 utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT)
271 except utils.RetryTimeout:
272 raise errors.OpExecError("Master daemon didn't answer queries within"
273 " %s seconds" % _DAEMON_READY_TIMEOUT)
274
275
289
290 try:
291 utils.Retry(_CheckSshDaemon, 1.0, _DAEMON_READY_TIMEOUT)
292 except utils.RetryTimeout:
293 raise errors.OpExecError("SSH daemon on %s:%s (IP address %s) didn't"
294 " become responsive within %s seconds" %
295 (hostname, port, hostip, _DAEMON_READY_TIMEOUT))
296
297
299 """Initialize if needed the file storage.
300
301 @param file_storage_dir: the user-supplied value
302 @return: either empty string (if file storage was disabled at build
303 time) or the normalized path to the storage directory
304
305 """
306 file_storage_dir = os.path.normpath(file_storage_dir)
307
308 if not os.path.isabs(file_storage_dir):
309 raise errors.OpPrereqError("File storage directory '%s' is not an absolute"
310 " path" % file_storage_dir, errors.ECODE_INVAL)
311
312 if not os.path.exists(file_storage_dir):
313 try:
314 os.makedirs(file_storage_dir, 0750)
315 except OSError, err:
316 raise errors.OpPrereqError("Cannot create file storage directory"
317 " '%s': %s" % (file_storage_dir, err),
318 errors.ECODE_ENVIRON)
319
320 if not os.path.isdir(file_storage_dir):
321 raise errors.OpPrereqError("The file storage directory '%s' is not"
322 " a directory." % file_storage_dir,
323 errors.ECODE_ENVIRON)
324
325 return file_storage_dir
326
327
332 """Checks if a file-base storage type is enabled and inits the dir.
333
334 @type enabled_disk_templates: list of string
335 @param enabled_disk_templates: list of enabled disk templates
336 @type file_storage_dir: string
337 @param file_storage_dir: the file storage directory
338 @type default_dir: string
339 @param default_dir: default file storage directory when C{file_storage_dir}
340 is 'None'
341 @type file_disk_template: string
342 @param file_disk_template: a disk template whose storage type is 'ST_FILE',
343 'ST_SHARED_FILE' or 'ST_GLUSTER'
344 @type _storage_path_acceptance_fn: function
345 @param _storage_path_acceptance_fn: checks whether the given file-based
346 storage directory is acceptable
347 @see: C{cluster.CheckFileBasedStoragePathVsEnabledDiskTemplates} for details
348
349 @rtype: string
350 @returns: the name of the actual file storage directory
351
352 """
353 assert (file_disk_template in utils.storage.GetDiskTemplatesOfStorageTypes(
354 constants.ST_FILE, constants.ST_SHARED_FILE, constants.ST_GLUSTER
355 ))
356
357 if file_storage_dir is None:
358 file_storage_dir = default_dir
359 if not acceptance_fn:
360 acceptance_fn = \
361 lambda path: filestorage.CheckFileStoragePathAcceptance(
362 path, exact_match_ok=True)
363
364 _storage_path_acceptance_fn(logging.warning, file_storage_dir,
365 enabled_disk_templates)
366
367 file_storage_enabled = file_disk_template in enabled_disk_templates
368 if file_storage_enabled:
369 try:
370 acceptance_fn(file_storage_dir)
371 except errors.FileStoragePathError as e:
372 raise errors.OpPrereqError(str(e))
373 result_file_storage_dir = init_fn(file_storage_dir)
374 else:
375 result_file_storage_dir = file_storage_dir
376 return result_file_storage_dir
377
378
392
393
407
408
422
423
425 """Checks the sanity of the enabled disk templates.
426
427 """
428 if not enabled_disk_templates:
429 raise errors.OpPrereqError("Enabled disk templates list must contain at"
430 " least one member", errors.ECODE_INVAL)
431 invalid_disk_templates = \
432 set(enabled_disk_templates) - constants.DISK_TEMPLATES
433 if invalid_disk_templates:
434 raise errors.OpPrereqError("Enabled disk templates list contains invalid"
435 " entries: %s" % invalid_disk_templates,
436 errors.ECODE_INVAL)
437
438
440 """Restricts the ipolicy's disk templates to the enabled ones.
441
442 This function clears the ipolicy's list of allowed disk templates from the
443 ones that are not enabled by the cluster.
444
445 @type ipolicy: dict
446 @param ipolicy: the instance policy
447 @type enabled_disk_templates: list of string
448 @param enabled_disk_templates: the list of cluster-wide enabled disk
449 templates
450
451 """
452 assert constants.IPOLICY_DTS in ipolicy
453 allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
454 restricted_disk_templates = list(set(allowed_disk_templates)
455 .intersection(set(enabled_disk_templates)))
456 ipolicy[constants.IPOLICY_DTS] = restricted_disk_templates
457
458
460 """Checks the DRBD usermode helper.
461
462 @type drbd_helper: string
463 @param drbd_helper: name of the DRBD usermode helper that the system should
464 use
465
466 """
467 if not drbd_enabled:
468 return
469
470 if drbd_helper is not None:
471 try:
472 curr_helper = drbd.DRBD8.GetUsermodeHelper()
473 except errors.BlockDeviceError, err:
474 raise errors.OpPrereqError("Error while checking drbd helper"
475 " (disable drbd with --enabled-disk-templates"
476 " if you are not using drbd): %s" % str(err),
477 errors.ECODE_ENVIRON)
478 if drbd_helper != curr_helper:
479 raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
480 " is the current helper" % (drbd_helper,
481 curr_helper),
482 errors.ECODE_INVAL)
483
484
485 -def InitCluster(cluster_name, mac_prefix,
486 master_netmask, master_netdev, file_storage_dir,
487 shared_file_storage_dir, gluster_storage_dir,
488 candidate_pool_size, secondary_ip=None,
489 vg_name=None, beparams=None, nicparams=None, ndparams=None,
490 hvparams=None, diskparams=None, enabled_hypervisors=None,
491 modify_etc_hosts=True, modify_ssh_setup=True,
492 maintain_node_health=False, drbd_helper=None, uid_pool=None,
493 default_iallocator=None, default_iallocator_params=None,
494 primary_ip_version=None, ipolicy=None,
495 prealloc_wipe_disks=False, use_external_mip_script=False,
496 hv_state=None, disk_state=None, enabled_disk_templates=None,
497 install_image=None, zeroing_image=None, compression_tools=None,
498 enabled_user_shutdown=False):
499 """Initialise the cluster.
500
501 @type candidate_pool_size: int
502 @param candidate_pool_size: master candidate pool size
503
504 @type enabled_disk_templates: list of string
505 @param enabled_disk_templates: list of disk_templates to be used in this
506 cluster
507
508 @type enabled_user_shutdown: bool
509 @param enabled_user_shutdown: whether user shutdown is enabled cluster
510 wide
511
512 """
513
514 if config.ConfigWriter.IsCluster():
515 raise errors.OpPrereqError("Cluster is already initialised",
516 errors.ECODE_STATE)
517
518 data_dir = vcluster.AddNodePrefix(pathutils.DATA_DIR)
519 queue_dir = vcluster.AddNodePrefix(pathutils.QUEUE_DIR)
520 archive_dir = vcluster.AddNodePrefix(pathutils.JOB_QUEUE_ARCHIVE_DIR)
521 for ddir in [queue_dir, data_dir, archive_dir]:
522 if os.path.isdir(ddir):
523 for entry in os.listdir(ddir):
524 if not os.path.isdir(os.path.join(ddir, entry)):
525 raise errors.OpPrereqError(
526 "%s contains non-directory entries like %s. Remove left-overs of an"
527 " old cluster before initialising a new one" % (ddir, entry),
528 errors.ECODE_STATE)
529
530 if not enabled_hypervisors:
531 raise errors.OpPrereqError("Enabled hypervisors list must contain at"
532 " least one member", errors.ECODE_INVAL)
533 invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
534 if invalid_hvs:
535 raise errors.OpPrereqError("Enabled hypervisors contains invalid"
536 " entries: %s" % invalid_hvs,
537 errors.ECODE_INVAL)
538
539 _InitCheckEnabledDiskTemplates(enabled_disk_templates)
540
541 try:
542 ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
543 except errors.ProgrammerError:
544 raise errors.OpPrereqError("Invalid primary ip version: %d." %
545 primary_ip_version, errors.ECODE_INVAL)
546
547 hostname = netutils.GetHostname(family=ipcls.family)
548 if not ipcls.IsValid(hostname.ip):
549 raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
550 " address." % (hostname.ip, primary_ip_version),
551 errors.ECODE_INVAL)
552
553 if ipcls.IsLoopback(hostname.ip):
554 raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
555 " address. Please fix DNS or %s." %
556 (hostname.ip, pathutils.ETC_HOSTS),
557 errors.ECODE_ENVIRON)
558
559 if not ipcls.Own(hostname.ip):
560 raise errors.OpPrereqError("Inconsistency: this host's name resolves"
561 " to %s,\nbut this ip address does not"
562 " belong to this host" %
563 hostname.ip, errors.ECODE_ENVIRON)
564
565 clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)
566
567 if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
568 raise errors.OpPrereqError("Cluster IP already active",
569 errors.ECODE_NOTUNIQUE)
570
571 if not secondary_ip:
572 if primary_ip_version == constants.IP6_VERSION:
573 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
574 " IPv4 address must be given as secondary",
575 errors.ECODE_INVAL)
576 secondary_ip = hostname.ip
577
578 if not netutils.IP4Address.IsValid(secondary_ip):
579 raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
580 " IPv4 address." % secondary_ip,
581 errors.ECODE_INVAL)
582
583 if not netutils.IP4Address.Own(secondary_ip):
584 raise errors.OpPrereqError("You gave %s as secondary IP,"
585 " but it does not belong to this host." %
586 secondary_ip, errors.ECODE_ENVIRON)
587
588 if master_netmask is not None:
589 if not ipcls.ValidateNetmask(master_netmask):
590 raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " %
591 (master_netmask, primary_ip_version),
592 errors.ECODE_INVAL)
593 else:
594 master_netmask = ipcls.iplen
595
596 if vg_name:
597
598 vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
599 constants.MIN_VG_SIZE)
600 if vgstatus:
601 raise errors.OpPrereqError("Error: %s" % vgstatus, errors.ECODE_INVAL)
602
603 drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
604 _InitCheckDrbdHelper(drbd_helper, drbd_enabled)
605
606 logging.debug("Stopping daemons (if any are running)")
607 result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"])
608 if result.failed:
609 raise errors.OpExecError("Could not stop daemons, command %s"
610 " had exitcode %s and error '%s'" %
611 (result.cmd, result.exit_code, result.output))
612
613 file_storage_dir = _PrepareFileStorage(enabled_disk_templates,
614 file_storage_dir)
615 shared_file_storage_dir = _PrepareSharedFileStorage(enabled_disk_templates,
616 shared_file_storage_dir)
617 gluster_storage_dir = _PrepareGlusterStorage(enabled_disk_templates,
618 gluster_storage_dir)
619
620 if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
621 raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
622 errors.ECODE_INVAL)
623
624 if not nicparams.get('mode', None) == constants.NIC_MODE_OVS:
625
626
627 result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
628 if result.failed:
629 raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
630 (master_netdev,
631 result.output.strip()), errors.ECODE_INVAL)
632
633 dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)]
634 utils.EnsureDirs(dirs)
635
636 objects.UpgradeBeParams(beparams)
637 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
638 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
639
640 objects.NIC.CheckParameterSyntax(nicparams)
641
642 full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy)
643 _RestrictIpolicyToEnabledDiskTemplates(full_ipolicy, enabled_disk_templates)
644
645 if ndparams is not None:
646 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
647 else:
648 ndparams = dict(constants.NDC_DEFAULTS)
649
650
651
652
653 if hv_state:
654 for hvname, hvs_data in hv_state.items():
655 utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES)
656 hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data)
657 else:
658 hv_state = dict((hvname, constants.HVST_DEFAULTS)
659 for hvname in enabled_hypervisors)
660
661
662 if disk_state:
663 for storage, ds_data in disk_state.items():
664 if storage not in constants.DS_VALID_TYPES:
665 raise errors.OpPrereqError("Invalid storage type in disk state: %s" %
666 storage, errors.ECODE_INVAL)
667 for ds_name, state in ds_data.items():
668 utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES)
669 ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state)
670
671
672 for hv_name, hv_params in hvparams.iteritems():
673 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
674 hv_class = hypervisor.GetHypervisor(hv_name)
675 hv_class.CheckParameterSyntax(hv_params)
676
677
678 for template, dt_params in diskparams.items():
679 param_keys = set(dt_params.keys())
680 default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
681 if not (param_keys <= default_param_keys):
682 unknown_params = param_keys - default_param_keys
683 raise errors.OpPrereqError("Invalid parameters for disk template %s:"
684 " %s" % (template,
685 utils.CommaJoin(unknown_params)),
686 errors.ECODE_INVAL)
687 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
688 if template == constants.DT_DRBD8 and vg_name is not None:
689
690
691 dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name
692
693 try:
694 utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS)
695 except errors.OpPrereqError, err:
696 raise errors.OpPrereqError("While verify diskparam options: %s" % err,
697 errors.ECODE_INVAL)
698
699
700 rsa_sshkey = ""
701 dsa_sshkey = ""
702 if os.path.isfile(pathutils.SSH_HOST_RSA_PUB):
703 sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB)
704 rsa_sshkey = sshline.split(" ")[1]
705 if os.path.isfile(pathutils.SSH_HOST_DSA_PUB):
706 sshline = utils.ReadFile(pathutils.SSH_HOST_DSA_PUB)
707 dsa_sshkey = sshline.split(" ")[1]
708 if not rsa_sshkey and not dsa_sshkey:
709 raise errors.OpPrereqError("Failed to find SSH public keys",
710 errors.ECODE_ENVIRON)
711
712 if modify_etc_hosts:
713 utils.AddHostToEtcHosts(hostname.name, hostname.ip)
714
715 if modify_ssh_setup:
716 ssh.InitSSHSetup()
717
718 if default_iallocator is not None:
719 alloc_script = utils.FindFile(default_iallocator,
720 constants.IALLOCATOR_SEARCH_PATH,
721 os.path.isfile)
722 if alloc_script is None:
723 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
724 " specified" % default_iallocator,
725 errors.ECODE_INVAL)
726 else:
727
728 if utils.FindFile(constants.IALLOC_HAIL,
729 constants.IALLOCATOR_SEARCH_PATH,
730 os.path.isfile):
731 default_iallocator = constants.IALLOC_HAIL
732
733
734 try:
735 runtime.GetEnts()
736 except errors.ConfigurationError, err:
737 raise errors.OpPrereqError("Required system user/group missing: %s" %
738 err, errors.ECODE_ENVIRON)
739
740 candidate_certs = {}
741
742 now = time.time()
743
744 if compression_tools is not None:
745 cluster.CheckCompressionTools(compression_tools)
746
747 initial_dc_config = dict(active=True,
748 interval=int(constants.MOND_TIME_INTERVAL * 1e6))
749 data_collectors = dict(
750 (name, initial_dc_config.copy())
751 for name in constants.DATA_COLLECTOR_NAMES)
752
753
754 cluster_config = objects.Cluster(
755 serial_no=1,
756 rsahostkeypub=rsa_sshkey,
757 dsahostkeypub=dsa_sshkey,
758 highest_used_port=(constants.FIRST_DRBD_PORT - 1),
759 mac_prefix=mac_prefix,
760 volume_group_name=vg_name,
761 tcpudp_port_pool=set(),
762 master_ip=clustername.ip,
763 master_netmask=master_netmask,
764 master_netdev=master_netdev,
765 cluster_name=clustername.name,
766 file_storage_dir=file_storage_dir,
767 shared_file_storage_dir=shared_file_storage_dir,
768 gluster_storage_dir=gluster_storage_dir,
769 enabled_hypervisors=enabled_hypervisors,
770 beparams={constants.PP_DEFAULT: beparams},
771 nicparams={constants.PP_DEFAULT: nicparams},
772 ndparams=ndparams,
773 hvparams=hvparams,
774 diskparams=diskparams,
775 candidate_pool_size=candidate_pool_size,
776 modify_etc_hosts=modify_etc_hosts,
777 modify_ssh_setup=modify_ssh_setup,
778 uid_pool=uid_pool,
779 ctime=now,
780 mtime=now,
781 maintain_node_health=maintain_node_health,
782 data_collectors=data_collectors,
783 drbd_usermode_helper=drbd_helper,
784 default_iallocator=default_iallocator,
785 default_iallocator_params=default_iallocator_params,
786 primary_ip_family=ipcls.family,
787 prealloc_wipe_disks=prealloc_wipe_disks,
788 use_external_mip_script=use_external_mip_script,
789 ipolicy=full_ipolicy,
790 hv_state_static=hv_state,
791 disk_state_static=disk_state,
792 enabled_disk_templates=enabled_disk_templates,
793 candidate_certs=candidate_certs,
794 osparams={},
795 osparams_private_cluster={},
796 install_image=install_image,
797 zeroing_image=zeroing_image,
798 compression_tools=compression_tools,
799 enabled_user_shutdown=enabled_user_shutdown,
800 )
801 master_node_config = objects.Node(name=hostname.name,
802 primary_ip=hostname.ip,
803 secondary_ip=secondary_ip,
804 serial_no=1,
805 master_candidate=True,
806 offline=False, drained=False,
807 ctime=now, mtime=now,
808 )
809 InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
810 cfg = config.ConfigWriter(offline=True)
811 ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
812 cfg.Update(cfg.GetClusterInfo(), logging.error)
813 ssconf.WriteSsconfFiles(cfg.GetSsconfValues())
814
815 master_uuid = cfg.GetMasterNode()
816 if modify_ssh_setup:
817 ssh.InitPubKeyFile(master_uuid)
818
819 _InitGanetiServerSetup(hostname.name, cfg)
820
821 logging.debug("Starting daemons")
822 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"])
823 if result.failed:
824 raise errors.OpExecError("Could not start daemons, command %s"
825 " had exitcode %s and error %s" %
826 (result.cmd, result.exit_code, result.output))
827
828 _WaitForMasterDaemon()
829
830
833 """Create the initial cluster configuration.
834
835 It will contain the current node, which will also be the master
836 node, and no instances.
837
838 @type version: int
839 @param version: configuration version
840 @type cluster_config: L{objects.Cluster}
841 @param cluster_config: cluster configuration
842 @type master_node_config: L{objects.Node}
843 @param master_node_config: master node configuration
844 @type cfg_file: string
845 @param cfg_file: configuration file path
846
847 """
848 uuid_generator = config.TemporaryReservationManager()
849 cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
850 _INITCONF_ECID)
851 master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
852 _INITCONF_ECID)
853 cluster_config.master_node = master_node_config.uuid
854 nodes = {
855 master_node_config.uuid: master_node_config,
856 }
857 default_nodegroup = objects.NodeGroup(
858 uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
859 name=constants.INITIAL_NODE_GROUP_NAME,
860 members=[master_node_config.uuid],
861 diskparams={},
862 )
863 nodegroups = {
864 default_nodegroup.uuid: default_nodegroup,
865 }
866 now = time.time()
867 config_data = objects.ConfigData(version=version,
868 cluster=cluster_config,
869 nodegroups=nodegroups,
870 nodes=nodes,
871 instances={},
872 networks={},
873 disks={},
874 filters={},
875 serial_no=1,
876 ctime=now, mtime=now)
877 utils.WriteFile(cfg_file,
878 data=serializer.Dump(config_data.ToDict()),
879 mode=0600)
880
881
883 """Execute the last steps of cluster destroy
884
885 This function shuts down all the daemons, completing the destroy
886 begun in cmdlib.LUDestroyOpcode.
887
888 """
889 livelock = utils.livelock.LiveLock("bootstrap_destroy")
890 cfg = config.GetConfig(None, livelock)
891 modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
892 runner = rpc.BootstrapRunner()
893
894 master_name = cfg.GetNodeName(master_uuid)
895
896 master_params = cfg.GetMasterNetworkParameters()
897 master_params.uuid = master_uuid
898 ems = cfg.GetUseExternalMipScript()
899 result = runner.call_node_deactivate_master_ip(master_name, master_params,
900 ems)
901
902 msg = result.fail_msg
903 if msg:
904 logging.warning("Could not disable the master IP: %s", msg)
905
906 result = runner.call_node_stop_master(master_name)
907 msg = result.fail_msg
908 if msg:
909 logging.warning("Could not disable the master role: %s", msg)
910
911 result = runner.call_node_leave_cluster(master_name, modify_ssh_setup)
912 msg = result.fail_msg
913 if msg:
914 logging.warning("Could not shutdown the node daemon and cleanup"
915 " the node: %s", msg)
916
917
919 """Add a node to the cluster.
920
921 This function must be called before the actual opcode, and will ssh
922 to the remote node, copy the needed files, and start ganeti-noded,
923 allowing the master to do the rest via normal rpc calls.
924
925 @param cluster_name: the cluster name
926 @param node: the name of the new node
927 @param ssh_port: the SSH port of the new node
928
929 """
930 data = {
931 constants.NDS_CLUSTER_NAME: cluster_name,
932 constants.NDS_NODE_DAEMON_CERTIFICATE:
933 utils.ReadFile(pathutils.NODED_CERT_FILE),
934 constants.NDS_SSCONF: ssconf.SimpleStore().ReadAll(),
935 constants.NDS_START_NODE_DAEMON: True,
936 constants.NDS_NODE_NAME: node,
937 }
938
939 ssh.RunSshCmdWithStdin(cluster_name, node, pathutils.NODE_DAEMON_SETUP,
940 ssh_port, data,
941 debug=opts.debug, verbose=opts.verbose,
942 use_cluster_key=True, ask_key=opts.ssh_key_check,
943 strict_host_check=opts.ssh_key_check,
944 ensure_version=True)
945
946 _WaitForSshDaemon(node, ssh_port)
947 _WaitForNodeDaemon(node)
948
949
951 """Failover the master node.
952
953 This checks that we are not already the master, and will cause the
954 current master to cease being master, and the non-master to become
955 new master.
956
957 @type no_voting: boolean
958 @param no_voting: force the operation without remote nodes agreement
959 (dangerous)
960
961 @returns: the pair of an exit code and warnings to display
962 """
963 sstore = ssconf.SimpleStore()
964
965 old_master, new_master = ssconf.GetMasterAndMyself(sstore)
966 node_names = sstore.GetNodeList()
967 mc_list = sstore.GetMasterCandidates()
968
969 if old_master == new_master:
970 raise errors.OpPrereqError("This commands must be run on the node"
971 " where you want the new master to be."
972 " %s is already the master" %
973 old_master, errors.ECODE_INVAL)
974
975 if new_master not in mc_list:
976 mc_no_master = [name for name in mc_list if name != old_master]
977 raise errors.OpPrereqError("This node is not among the nodes marked"
978 " as master candidates. Only these nodes"
979 " can become masters. Current list of"
980 " master candidates is:\n"
981 "%s" % ("\n".join(mc_no_master)),
982 errors.ECODE_STATE)
983
984 if not no_voting:
985 vote_list = GatherMasterVotes(node_names)
986
987 if vote_list:
988 voted_master = vote_list[0][0]
989 if voted_master is None:
990 raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
991 " not respond.", errors.ECODE_ENVIRON)
992 elif voted_master != old_master:
993 raise errors.OpPrereqError("I have a wrong configuration, I believe"
994 " the master is %s but the other nodes"
995 " voted %s. Please resync the configuration"
996 " of this node." %
997 (old_master, voted_master),
998 errors.ECODE_STATE)
999
1000
1001 rcode = 0
1002 warnings = []
1003
1004 logging.info("Setting master to %s, old master: %s", new_master, old_master)
1005
1006 try:
1007
1008 result = utils.RunCmd([pathutils.DAEMON_UTIL,
1009 "start", constants.WCONFD, "--force-node",
1010 "--no-voting", "--yes-do-it"])
1011 if result.failed:
1012 raise errors.OpPrereqError("Could not start the configuration daemon,"
1013 " command %s had exitcode %s and error %s" %
1014 (result.cmd, result.exit_code, result.output),
1015 errors.ECODE_NOENT)
1016
1017
1018
1019 livelock = utils.livelock.LiveLock("bootstrap_failover")
1020 cfg = config.GetConfig(None, livelock, accept_foreign=True)
1021
1022 old_master_node = cfg.GetNodeInfoByName(old_master)
1023 if old_master_node is None:
1024 raise errors.OpPrereqError("Could not find old master node '%s' in"
1025 " cluster configuration." % old_master,
1026 errors.ECODE_NOENT)
1027
1028 cluster_info = cfg.GetClusterInfo()
1029 new_master_node = cfg.GetNodeInfoByName(new_master)
1030 if new_master_node is None:
1031 raise errors.OpPrereqError("Could not find new master node '%s' in"
1032 " cluster configuration." % new_master,
1033 errors.ECODE_NOENT)
1034
1035 cluster_info.master_node = new_master_node.uuid
1036
1037
1038 cfg.Update(cluster_info, logging.error)
1039
1040
1041
1042
1043
1044
1045 logging.info("Stopping the master daemon on node %s", old_master)
1046
1047 runner = rpc.BootstrapRunner()
1048 master_params = cfg.GetMasterNetworkParameters()
1049 master_params.uuid = old_master_node.uuid
1050 ems = cfg.GetUseExternalMipScript()
1051 result = runner.call_node_deactivate_master_ip(old_master,
1052 master_params, ems)
1053
1054 msg = result.fail_msg
1055 if msg:
1056 warning = "Could not disable the master IP: %s" % (msg,)
1057 logging.warning("%s", warning)
1058 warnings.append(warning)
1059
1060 result = runner.call_node_stop_master(old_master)
1061 msg = result.fail_msg
1062 if msg:
1063 warning = ("Could not disable the master role on the old master"
1064 " %s, please disable manually: %s" % (old_master, msg))
1065 logging.error("%s", warning)
1066 warnings.append(warning)
1067 except errors.ConfigurationError, err:
1068 logging.error("Error while trying to set the new master: %s",
1069 str(err))
1070 return 1, warnings
1071 finally:
1072
1073 result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop", constants.WCONFD])
1074 if result.failed:
1075 warning = ("Could not stop the configuration daemon,"
1076 " command %s had exitcode %s and error %s"
1077 % (result.cmd, result.exit_code, result.output))
1078 logging.error("%s", warning)
1079 rcode = 1
1080
1081 logging.info("Checking master IP non-reachability...")
1082
1083 master_ip = sstore.GetMasterIP()
1084 total_timeout = 30
1085
1086
1087 def _check_ip(expected):
1088 if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT) != expected:
1089 raise utils.RetryAgain()
1090
1091 try:
1092 utils.Retry(_check_ip, (1, 1.5, 5), total_timeout, args=[False])
1093 except utils.RetryTimeout:
1094 warning = ("The master IP is still reachable after %s seconds,"
1095 " continuing but activating the master IP on the current"
1096 " node will probably fail" % total_timeout)
1097 logging.warning("%s", warning)
1098 warnings.append(warning)
1099 rcode = 1
1100
1101 if jstore.CheckDrainFlag():
1102 logging.info("Undraining job queue")
1103 jstore.SetDrainFlag(False)
1104
1105 logging.info("Starting the master daemons on the new master")
1106
1107 result = rpc.BootstrapRunner().call_node_start_master_daemons(new_master,
1108 no_voting)
1109 msg = result.fail_msg
1110 if msg:
1111 logging.error("Could not start the master role on the new master"
1112 " %s, please check: %s", new_master, msg)
1113 rcode = 1
1114
1115
1116
1117 try:
1118 utils.Retry(_check_ip, (1, 1.5, 5), total_timeout, args=[True])
1119 except utils.RetryTimeout:
1120 warning = ("The master IP did not come up within %s seconds; the"
1121 " cluster should still be working and reachable via %s,"
1122 " but not via the master IP address"
1123 % (total_timeout, new_master))
1124 logging.warning("%s", warning)
1125 warnings.append(warning)
1126 rcode = 1
1127
1128 logging.info("Master failed over from %s to %s", old_master, new_master)
1129 return rcode, warnings
1130
1131
1133 """Returns the current master node.
1134
1135 This is a separate function in bootstrap since it's needed by
1136 gnt-cluster, and instead of importing directly ssconf, it's better
1137 to abstract it in bootstrap, where we do use ssconf in other
1138 functions too.
1139
1140 """
1141 sstore = ssconf.SimpleStore()
1142
1143 old_master, _ = ssconf.GetMasterAndMyself(sstore)
1144
1145 return old_master
1146
1147
1149 """Check the agreement on who is the master.
1150
1151 This function will return a list of (node, number of votes), ordered
1152 by the number of votes. Errors will be denoted by the key 'None'.
1153
1154 Note that the sum of votes is the number of nodes this machine
1155 knows, whereas the number of entries in the list could be different
1156 (if some nodes vote for another master).
1157
1158 @type node_names: list
1159 @param node_names: the list of nodes to query for master info
1160 @rtype: list
1161 @return: list of (node, votes)
1162
1163 """
1164 if not node_names:
1165
1166 return []
1167 results = rpc.BootstrapRunner().call_master_node_name(node_names)
1168 if not isinstance(results, dict):
1169
1170 logging.critical("Can't complete rpc call, aborting master startup")
1171 return [(None, len(node_names))]
1172 votes = {}
1173 for node_name in results:
1174 nres = results[node_name]
1175 msg = nres.fail_msg
1176
1177 if msg:
1178 logging.warning("Error contacting node %s: %s", node_name, msg)
1179 node = None
1180 else:
1181 node = nres.payload
1182
1183 if node not in votes:
1184 votes[node] = 1
1185 else:
1186 votes[node] += 1
1187
1188 vote_list = [v for v in votes.items()]
1189
1190
1191
1192 vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
1193
1194 return vote_list
1195
1196
1198 """Check if the majority of nodes is healthy
1199
1200 Gather master votes from all nodes known to this node;
1201 return True if a strict majority of nodes is reachable and
1202 has some opinion on which node is master. Note that this will
1203 not guarantee any node to win an election but it ensures that
1204 a standard master-failover is still possible.
1205
1206 """
1207 node_names = ssconf.SimpleStore().GetNodeList()
1208 node_count = len(node_names)
1209 vote_list = GatherMasterVotes(node_names)
1210 if vote_list is None:
1211 return False
1212 total_votes = sum([count for (node, count) in vote_list if node is not None])
1213 logging.info("Total %d nodes, %d votes: %s", node_count, total_votes,
1214 vote_list)
1215 return 2 * total_votes > node_count
1216