1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Functions to bootstrap a new cluster.
32
33 """
34
35 import os
36 import os.path
37 import re
38 import logging
39 import time
40
41 from ganeti.cmdlib import cluster
42 import ganeti.rpc.node as rpc
43 from ganeti import ssh
44 from ganeti import utils
45 from ganeti import errors
46 from ganeti import config
47 from ganeti import constants
48 from ganeti import objects
49 from ganeti import ssconf
50 from ganeti import serializer
51 from ganeti import hypervisor
52 from ganeti.storage import drbd
53 from ganeti.storage import filestorage
54 from ganeti import netutils
55 from ganeti import luxi
56 from ganeti import jstore
57 from ganeti import pathutils
58 from ganeti import runtime
59 from ganeti import vcluster
60
61
62
63 _INITCONF_ECID = "initconfig-ecid"
64
65
66 _DAEMON_READY_TIMEOUT = 10.0
67
68
70 """Writes a new HMAC key.
71
72 @type file_name: str
73 @param file_name: Path to output file
74
75 """
76 utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
77 backup=True)
78
79
80
81 -def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert,
82 new_confd_hmac_key, new_cds, new_client_cert,
83 master_name,
84 rapi_cert_pem=None, spice_cert_pem=None,
85 spice_cacert_pem=None, cds=None,
86 nodecert_file=pathutils.NODED_CERT_FILE,
87 clientcert_file=pathutils.NODED_CLIENT_CERT_FILE,
88 rapicert_file=pathutils.RAPI_CERT_FILE,
89 spicecert_file=pathutils.SPICE_CERT_FILE,
90 spicecacert_file=pathutils.SPICE_CACERT_FILE,
91 hmackey_file=pathutils.CONFD_HMAC_KEY,
92 cds_file=pathutils.CLUSTER_DOMAIN_SECRET_FILE):
93 """Updates the cluster certificates, keys and secrets.
94
95 @type new_cluster_cert: bool
96 @param new_cluster_cert: Whether to generate a new cluster certificate
97 @type new_rapi_cert: bool
98 @param new_rapi_cert: Whether to generate a new RAPI certificate
99 @type new_spice_cert: bool
100 @param new_spice_cert: Whether to generate a new SPICE certificate
101 @type new_confd_hmac_key: bool
102 @param new_confd_hmac_key: Whether to generate a new HMAC key
103 @type new_cds: bool
104 @param new_cds: Whether to generate a new cluster domain secret
105 @type new_client_cert: bool
106 @param new_client_cert: Whether to generate a new client certificate
107 @type master_name: string
108 @param master_name: FQDN of the master node
109 @type rapi_cert_pem: string
110 @param rapi_cert_pem: New RAPI certificate in PEM format
111 @type spice_cert_pem: string
112 @param spice_cert_pem: New SPICE certificate in PEM format
113 @type spice_cacert_pem: string
114 @param spice_cacert_pem: Certificate of the CA that signed the SPICE
115 certificate, in PEM format
116 @type cds: string
117 @param cds: New cluster domain secret
118 @type nodecert_file: string
119 @param nodecert_file: optional override of the node cert file path
120 @type rapicert_file: string
121 @param rapicert_file: optional override of the rapi cert file path
122 @type spicecert_file: string
123 @param spicecert_file: optional override of the spice cert file path
124 @type spicecacert_file: string
125 @param spicecacert_file: optional override of the spice CA cert file path
126 @type hmackey_file: string
127 @param hmackey_file: optional override of the hmac key file path
128
129 """
130
131
132 utils.GenerateNewSslCert(
133 new_cluster_cert, nodecert_file, 1,
134 "Generating new cluster certificate at %s" % nodecert_file)
135
136
137
138 if new_cluster_cert or new_client_cert:
139 utils.GenerateNewClientSslCert(clientcert_file, nodecert_file,
140 master_name)
141
142
143 if new_confd_hmac_key or not os.path.exists(hmackey_file):
144 logging.debug("Writing new confd HMAC key to %s", hmackey_file)
145 GenerateHmacKey(hmackey_file)
146
147 if rapi_cert_pem:
148
149 logging.debug("Writing RAPI certificate at %s", rapicert_file)
150 utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
151
152 else:
153 utils.GenerateNewSslCert(
154 new_rapi_cert, rapicert_file, 1,
155 "Generating new RAPI certificate at %s" % rapicert_file)
156
157
158 spice_cert_exists = os.path.exists(spicecert_file)
159 spice_cacert_exists = os.path.exists(spicecacert_file)
160 if spice_cert_pem:
161
162 logging.debug("Writing SPICE certificate at %s", spicecert_file)
163 utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True)
164 logging.debug("Writing SPICE CA certificate at %s", spicecacert_file)
165 utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True)
166 elif new_spice_cert or not spice_cert_exists:
167 if spice_cert_exists:
168 utils.CreateBackup(spicecert_file)
169 if spice_cacert_exists:
170 utils.CreateBackup(spicecacert_file)
171
172 logging.debug("Generating new self-signed SPICE certificate at %s",
173 spicecert_file)
174 (_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file, 1)
175
176
177
178 logging.debug("Writing the public certificate to %s",
179 spicecert_file)
180 utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem)
181
182
183 if cds:
184 logging.debug("Writing cluster domain secret to %s", cds_file)
185 utils.WriteFile(cds_file, data=cds, backup=True)
186
187 elif new_cds or not os.path.exists(cds_file):
188 logging.debug("Generating new cluster domain secret at %s", cds_file)
189 GenerateHmacKey(cds_file)
190
191
193 """Setup the necessary configuration for the initial node daemon.
194
195 This creates the nodepass file containing the shared password for
196 the cluster, generates the SSL certificate and starts the node daemon.
197
198 @type master_name: str
199 @param master_name: Name of the master node
200 @type cfg: ConfigWriter
201 @param cfg: the configuration writer
202
203 """
204
205 GenerateClusterCrypto(True, False, False, False, False, False, master_name)
206
207
208 master_uuid = cfg.GetMasterNode()
209 master_digest = utils.GetCertificateDigest()
210 cfg.AddNodeToCandidateCerts(master_uuid, master_digest)
211 cfg.Update(cfg.GetClusterInfo(), logging.error)
212 ssconf.WriteSsconfFiles(cfg.GetSsconfValues())
213
214 if not os.path.exists(
215 os.path.join(pathutils.DATA_DIR,
216 "%s%s" % (constants.SSCONF_FILEPREFIX,
217 constants.SS_MASTER_CANDIDATES_CERTS))):
218 raise errors.OpExecError("Ssconf file for master candidate certificates"
219 " was not written.")
220
221 if not os.path.exists(pathutils.NODED_CERT_FILE):
222 raise errors.OpExecError("The server certficate was not created properly.")
223
224 if not os.path.exists(pathutils.NODED_CLIENT_CERT_FILE):
225 raise errors.OpExecError("The client certificate was not created"
226 " properly.")
227
228
229 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start", constants.NODED])
230 if result.failed:
231 raise errors.OpExecError("Could not start the node daemon, command %s"
232 " had exitcode %s and error %s" %
233 (result.cmd, result.exit_code, result.output))
234
235 _WaitForNodeDaemon(master_name)
236
237
248
249 try:
250 utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT)
251 except utils.RetryTimeout:
252 raise errors.OpExecError("Node daemon on %s didn't answer queries within"
253 " %s seconds" % (node_name, _DAEMON_READY_TIMEOUT))
254
255
268
269 try:
270 utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT)
271 except utils.RetryTimeout:
272 raise errors.OpExecError("Master daemon didn't answer queries within"
273 " %s seconds" % _DAEMON_READY_TIMEOUT)
274
275
289
290 try:
291 utils.Retry(_CheckSshDaemon, 1.0, _DAEMON_READY_TIMEOUT)
292 except utils.RetryTimeout:
293 raise errors.OpExecError("SSH daemon on %s:%s (IP address %s) didn't"
294 " become responsive within %s seconds" %
295 (hostname, port, hostip, _DAEMON_READY_TIMEOUT))
296
297
299 """Initialize if needed the file storage.
300
301 @param file_storage_dir: the user-supplied value
302 @return: either empty string (if file storage was disabled at build
303 time) or the normalized path to the storage directory
304
305 """
306 file_storage_dir = os.path.normpath(file_storage_dir)
307
308 if not os.path.isabs(file_storage_dir):
309 raise errors.OpPrereqError("File storage directory '%s' is not an absolute"
310 " path" % file_storage_dir, errors.ECODE_INVAL)
311
312 if not os.path.exists(file_storage_dir):
313 try:
314 os.makedirs(file_storage_dir, 0750)
315 except OSError, err:
316 raise errors.OpPrereqError("Cannot create file storage directory"
317 " '%s': %s" % (file_storage_dir, err),
318 errors.ECODE_ENVIRON)
319
320 if not os.path.isdir(file_storage_dir):
321 raise errors.OpPrereqError("The file storage directory '%s' is not"
322 " a directory." % file_storage_dir,
323 errors.ECODE_ENVIRON)
324
325 return file_storage_dir
326
327
332 """Checks if a file-base storage type is enabled and inits the dir.
333
334 @type enabled_disk_templates: list of string
335 @param enabled_disk_templates: list of enabled disk templates
336 @type file_storage_dir: string
337 @param file_storage_dir: the file storage directory
338 @type default_dir: string
339 @param default_dir: default file storage directory when C{file_storage_dir}
340 is 'None'
341 @type file_disk_template: string
342 @param file_disk_template: a disk template whose storage type is 'ST_FILE',
343 'ST_SHARED_FILE' or 'ST_GLUSTER'
344 @type _storage_path_acceptance_fn: function
345 @param _storage_path_acceptance_fn: checks whether the given file-based
346 storage directory is acceptable
347 @see: C{cluster.CheckFileBasedStoragePathVsEnabledDiskTemplates} for details
348
349 @rtype: string
350 @returns: the name of the actual file storage directory
351
352 """
353 assert (file_disk_template in utils.storage.GetDiskTemplatesOfStorageTypes(
354 constants.ST_FILE, constants.ST_SHARED_FILE, constants.ST_GLUSTER
355 ))
356
357 if file_storage_dir is None:
358 file_storage_dir = default_dir
359 if not acceptance_fn:
360 acceptance_fn = \
361 lambda path: filestorage.CheckFileStoragePathAcceptance(
362 path, exact_match_ok=True)
363
364 _storage_path_acceptance_fn(logging.warning, file_storage_dir,
365 enabled_disk_templates)
366
367 file_storage_enabled = file_disk_template in enabled_disk_templates
368 if file_storage_enabled:
369 try:
370 acceptance_fn(file_storage_dir)
371 except errors.FileStoragePathError as e:
372 raise errors.OpPrereqError(str(e))
373 result_file_storage_dir = init_fn(file_storage_dir)
374 else:
375 result_file_storage_dir = file_storage_dir
376 return result_file_storage_dir
377
378
392
393
407
408
422
423
425 """Checks the sanity of the enabled disk templates.
426
427 """
428 if not enabled_disk_templates:
429 raise errors.OpPrereqError("Enabled disk templates list must contain at"
430 " least one member", errors.ECODE_INVAL)
431 invalid_disk_templates = \
432 set(enabled_disk_templates) - constants.DISK_TEMPLATES
433 if invalid_disk_templates:
434 raise errors.OpPrereqError("Enabled disk templates list contains invalid"
435 " entries: %s" % invalid_disk_templates,
436 errors.ECODE_INVAL)
437
438
440 """Restricts the ipolicy's disk templates to the enabled ones.
441
442 This function clears the ipolicy's list of allowed disk templates from the
443 ones that are not enabled by the cluster.
444
445 @type ipolicy: dict
446 @param ipolicy: the instance policy
447 @type enabled_disk_templates: list of string
448 @param enabled_disk_templates: the list of cluster-wide enabled disk
449 templates
450
451 """
452 assert constants.IPOLICY_DTS in ipolicy
453 allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
454 restricted_disk_templates = list(set(allowed_disk_templates)
455 .intersection(set(enabled_disk_templates)))
456 ipolicy[constants.IPOLICY_DTS] = restricted_disk_templates
457
458
460 """Checks the DRBD usermode helper.
461
462 @type drbd_helper: string
463 @param drbd_helper: name of the DRBD usermode helper that the system should
464 use
465
466 """
467 if not drbd_enabled:
468 return
469
470 if drbd_helper is not None:
471 try:
472 curr_helper = drbd.DRBD8.GetUsermodeHelper()
473 except errors.BlockDeviceError, err:
474 raise errors.OpPrereqError("Error while checking drbd helper"
475 " (disable drbd with --enabled-disk-templates"
476 " if you are not using drbd): %s" % str(err),
477 errors.ECODE_ENVIRON)
478 if drbd_helper != curr_helper:
479 raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
480 " is the current helper" % (drbd_helper,
481 curr_helper),
482 errors.ECODE_INVAL)
483
484
485 -def InitCluster(cluster_name, mac_prefix,
486 master_netmask, master_netdev, file_storage_dir,
487 shared_file_storage_dir, gluster_storage_dir,
488 candidate_pool_size, ssh_key_type, ssh_key_bits,
489 secondary_ip=None, vg_name=None, beparams=None, nicparams=None,
490 ndparams=None, hvparams=None, diskparams=None,
491 enabled_hypervisors=None, modify_etc_hosts=True,
492 modify_ssh_setup=True, maintain_node_health=False,
493 drbd_helper=None, uid_pool=None, default_iallocator=None,
494 default_iallocator_params=None, primary_ip_version=None,
495 ipolicy=None, prealloc_wipe_disks=False,
496 use_external_mip_script=False, hv_state=None, disk_state=None,
497 enabled_disk_templates=None, install_image=None,
498 zeroing_image=None, compression_tools=None,
499 enabled_user_shutdown=False):
500 """Initialise the cluster.
501
502 @type candidate_pool_size: int
503 @param candidate_pool_size: master candidate pool size
504
505 @type enabled_disk_templates: list of string
506 @param enabled_disk_templates: list of disk_templates to be used in this
507 cluster
508
509 @type enabled_user_shutdown: bool
510 @param enabled_user_shutdown: whether user shutdown is enabled cluster
511 wide
512
513 """
514
515 if config.ConfigWriter.IsCluster():
516 raise errors.OpPrereqError("Cluster is already initialised",
517 errors.ECODE_STATE)
518
519 data_dir = vcluster.AddNodePrefix(pathutils.DATA_DIR)
520 queue_dir = vcluster.AddNodePrefix(pathutils.QUEUE_DIR)
521 archive_dir = vcluster.AddNodePrefix(pathutils.JOB_QUEUE_ARCHIVE_DIR)
522 for ddir in [queue_dir, data_dir, archive_dir]:
523 if os.path.isdir(ddir):
524 for entry in os.listdir(ddir):
525 if not os.path.isdir(os.path.join(ddir, entry)):
526 raise errors.OpPrereqError(
527 "%s contains non-directory entries like %s. Remove left-overs of an"
528 " old cluster before initialising a new one" % (ddir, entry),
529 errors.ECODE_STATE)
530
531 if not enabled_hypervisors:
532 raise errors.OpPrereqError("Enabled hypervisors list must contain at"
533 " least one member", errors.ECODE_INVAL)
534 invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
535 if invalid_hvs:
536 raise errors.OpPrereqError("Enabled hypervisors contains invalid"
537 " entries: %s" % invalid_hvs,
538 errors.ECODE_INVAL)
539
540 _InitCheckEnabledDiskTemplates(enabled_disk_templates)
541
542 try:
543 ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
544 except errors.ProgrammerError:
545 raise errors.OpPrereqError("Invalid primary ip version: %d." %
546 primary_ip_version, errors.ECODE_INVAL)
547
548 hostname = netutils.GetHostname(family=ipcls.family)
549 if not ipcls.IsValid(hostname.ip):
550 raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
551 " address." % (hostname.ip, primary_ip_version),
552 errors.ECODE_INVAL)
553
554 if ipcls.IsLoopback(hostname.ip):
555 raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
556 " address. Please fix DNS or %s." %
557 (hostname.ip, pathutils.ETC_HOSTS),
558 errors.ECODE_ENVIRON)
559
560 if not ipcls.Own(hostname.ip):
561 raise errors.OpPrereqError("Inconsistency: this host's name resolves"
562 " to %s,\nbut this ip address does not"
563 " belong to this host" %
564 hostname.ip, errors.ECODE_ENVIRON)
565
566 clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)
567
568 if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
569 raise errors.OpPrereqError("Cluster IP already active",
570 errors.ECODE_NOTUNIQUE)
571
572 if not secondary_ip:
573 if primary_ip_version == constants.IP6_VERSION:
574 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
575 " IPv4 address must be given as secondary",
576 errors.ECODE_INVAL)
577 secondary_ip = hostname.ip
578
579 if not netutils.IP4Address.IsValid(secondary_ip):
580 raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
581 " IPv4 address." % secondary_ip,
582 errors.ECODE_INVAL)
583
584 if not netutils.IP4Address.Own(secondary_ip):
585 raise errors.OpPrereqError("You gave %s as secondary IP,"
586 " but it does not belong to this host." %
587 secondary_ip, errors.ECODE_ENVIRON)
588
589 if master_netmask is not None:
590 if not ipcls.ValidateNetmask(master_netmask):
591 raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " %
592 (master_netmask, primary_ip_version),
593 errors.ECODE_INVAL)
594 else:
595 master_netmask = ipcls.iplen
596
597 if vg_name:
598
599 vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
600 constants.MIN_VG_SIZE)
601 if vgstatus:
602 raise errors.OpPrereqError("Error: %s" % vgstatus, errors.ECODE_INVAL)
603
604 drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
605 _InitCheckDrbdHelper(drbd_helper, drbd_enabled)
606
607 logging.debug("Stopping daemons (if any are running)")
608 result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"])
609 if result.failed:
610 raise errors.OpExecError("Could not stop daemons, command %s"
611 " had exitcode %s and error '%s'" %
612 (result.cmd, result.exit_code, result.output))
613
614 file_storage_dir = _PrepareFileStorage(enabled_disk_templates,
615 file_storage_dir)
616 shared_file_storage_dir = _PrepareSharedFileStorage(enabled_disk_templates,
617 shared_file_storage_dir)
618 gluster_storage_dir = _PrepareGlusterStorage(enabled_disk_templates,
619 gluster_storage_dir)
620
621 if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
622 raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
623 errors.ECODE_INVAL)
624
625 if not nicparams.get('mode', None) == constants.NIC_MODE_OVS:
626
627
628 result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
629 if result.failed:
630 raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
631 (master_netdev,
632 result.output.strip()), errors.ECODE_INVAL)
633
634 dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)]
635 utils.EnsureDirs(dirs)
636
637 objects.UpgradeBeParams(beparams)
638 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
639 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
640
641 objects.NIC.CheckParameterSyntax(nicparams)
642
643 full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy)
644 _RestrictIpolicyToEnabledDiskTemplates(full_ipolicy, enabled_disk_templates)
645
646 if ndparams is not None:
647 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
648 else:
649 ndparams = dict(constants.NDC_DEFAULTS)
650
651
652
653
654 if hv_state:
655 for hvname, hvs_data in hv_state.items():
656 utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES)
657 hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data)
658 else:
659 hv_state = dict((hvname, constants.HVST_DEFAULTS)
660 for hvname in enabled_hypervisors)
661
662
663 if disk_state:
664 for storage, ds_data in disk_state.items():
665 if storage not in constants.DS_VALID_TYPES:
666 raise errors.OpPrereqError("Invalid storage type in disk state: %s" %
667 storage, errors.ECODE_INVAL)
668 for ds_name, state in ds_data.items():
669 utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES)
670 ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state)
671
672
673 for hv_name, hv_params in hvparams.iteritems():
674 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
675 hv_class = hypervisor.GetHypervisor(hv_name)
676 hv_class.CheckParameterSyntax(hv_params)
677
678
679 for template, dt_params in diskparams.items():
680 param_keys = set(dt_params.keys())
681 default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
682 if not (param_keys <= default_param_keys):
683 unknown_params = param_keys - default_param_keys
684 raise errors.OpPrereqError("Invalid parameters for disk template %s:"
685 " %s" % (template,
686 utils.CommaJoin(unknown_params)),
687 errors.ECODE_INVAL)
688 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
689 if template == constants.DT_DRBD8 and vg_name is not None:
690
691
692 dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name
693
694 try:
695 utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS)
696 except errors.OpPrereqError, err:
697 raise errors.OpPrereqError("While verify diskparam options: %s" % err,
698 errors.ECODE_INVAL)
699
700
701 rsa_sshkey = ""
702 dsa_sshkey = ""
703 if os.path.isfile(pathutils.SSH_HOST_RSA_PUB):
704 sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB)
705 rsa_sshkey = sshline.split(" ")[1]
706 if os.path.isfile(pathutils.SSH_HOST_DSA_PUB):
707 sshline = utils.ReadFile(pathutils.SSH_HOST_DSA_PUB)
708 dsa_sshkey = sshline.split(" ")[1]
709 if not rsa_sshkey and not dsa_sshkey:
710 raise errors.OpPrereqError("Failed to find SSH public keys",
711 errors.ECODE_ENVIRON)
712
713 if modify_etc_hosts:
714 utils.AddHostToEtcHosts(hostname.name, hostname.ip)
715
716 if modify_ssh_setup:
717 ssh.InitSSHSetup(ssh_key_type, ssh_key_bits)
718
719 if default_iallocator is not None:
720 alloc_script = utils.FindFile(default_iallocator,
721 constants.IALLOCATOR_SEARCH_PATH,
722 os.path.isfile)
723 if alloc_script is None:
724 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
725 " specified" % default_iallocator,
726 errors.ECODE_INVAL)
727 else:
728
729 if utils.FindFile(constants.IALLOC_HAIL,
730 constants.IALLOCATOR_SEARCH_PATH,
731 os.path.isfile):
732 default_iallocator = constants.IALLOC_HAIL
733
734
735 try:
736 runtime.GetEnts()
737 except errors.ConfigurationError, err:
738 raise errors.OpPrereqError("Required system user/group missing: %s" %
739 err, errors.ECODE_ENVIRON)
740
741 candidate_certs = {}
742
743 now = time.time()
744
745 if compression_tools is not None:
746 cluster.CheckCompressionTools(compression_tools)
747
748 initial_dc_config = dict(active=True,
749 interval=int(constants.MOND_TIME_INTERVAL * 1e6))
750 data_collectors = dict(
751 (name, initial_dc_config.copy())
752 for name in constants.DATA_COLLECTOR_NAMES)
753
754
755 cluster_config = objects.Cluster(
756 serial_no=1,
757 rsahostkeypub=rsa_sshkey,
758 dsahostkeypub=dsa_sshkey,
759 highest_used_port=(constants.FIRST_DRBD_PORT - 1),
760 mac_prefix=mac_prefix,
761 volume_group_name=vg_name,
762 tcpudp_port_pool=set(),
763 master_ip=clustername.ip,
764 master_netmask=master_netmask,
765 master_netdev=master_netdev,
766 cluster_name=clustername.name,
767 file_storage_dir=file_storage_dir,
768 shared_file_storage_dir=shared_file_storage_dir,
769 gluster_storage_dir=gluster_storage_dir,
770 enabled_hypervisors=enabled_hypervisors,
771 beparams={constants.PP_DEFAULT: beparams},
772 nicparams={constants.PP_DEFAULT: nicparams},
773 ndparams=ndparams,
774 hvparams=hvparams,
775 diskparams=diskparams,
776 candidate_pool_size=candidate_pool_size,
777 modify_etc_hosts=modify_etc_hosts,
778 modify_ssh_setup=modify_ssh_setup,
779 uid_pool=uid_pool,
780 ctime=now,
781 mtime=now,
782 maintain_node_health=maintain_node_health,
783 data_collectors=data_collectors,
784 drbd_usermode_helper=drbd_helper,
785 default_iallocator=default_iallocator,
786 default_iallocator_params=default_iallocator_params,
787 primary_ip_family=ipcls.family,
788 prealloc_wipe_disks=prealloc_wipe_disks,
789 use_external_mip_script=use_external_mip_script,
790 ipolicy=full_ipolicy,
791 hv_state_static=hv_state,
792 disk_state_static=disk_state,
793 enabled_disk_templates=enabled_disk_templates,
794 candidate_certs=candidate_certs,
795 osparams={},
796 osparams_private_cluster={},
797 install_image=install_image,
798 zeroing_image=zeroing_image,
799 compression_tools=compression_tools,
800 enabled_user_shutdown=enabled_user_shutdown,
801 ssh_key_type=ssh_key_type,
802 ssh_key_bits=ssh_key_bits,
803 )
804 master_node_config = objects.Node(name=hostname.name,
805 primary_ip=hostname.ip,
806 secondary_ip=secondary_ip,
807 serial_no=1,
808 master_candidate=True,
809 offline=False, drained=False,
810 ctime=now, mtime=now,
811 )
812 InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
813 cfg = config.ConfigWriter(offline=True)
814 ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
815 cfg.Update(cfg.GetClusterInfo(), logging.error)
816 ssconf.WriteSsconfFiles(cfg.GetSsconfValues())
817
818 master_uuid = cfg.GetMasterNode()
819 if modify_ssh_setup:
820 ssh.InitPubKeyFile(master_uuid, ssh_key_type)
821
822 _InitGanetiServerSetup(hostname.name, cfg)
823
824 logging.debug("Starting daemons")
825 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"])
826 if result.failed:
827 raise errors.OpExecError("Could not start daemons, command %s"
828 " had exitcode %s and error %s" %
829 (result.cmd, result.exit_code, result.output))
830
831 _WaitForMasterDaemon()
832
833
836 """Create the initial cluster configuration.
837
838 It will contain the current node, which will also be the master
839 node, and no instances.
840
841 @type version: int
842 @param version: configuration version
843 @type cluster_config: L{objects.Cluster}
844 @param cluster_config: cluster configuration
845 @type master_node_config: L{objects.Node}
846 @param master_node_config: master node configuration
847 @type cfg_file: string
848 @param cfg_file: configuration file path
849
850 """
851 uuid_generator = config.TemporaryReservationManager()
852 cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
853 _INITCONF_ECID)
854 master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
855 _INITCONF_ECID)
856 cluster_config.master_node = master_node_config.uuid
857 nodes = {
858 master_node_config.uuid: master_node_config,
859 }
860 default_nodegroup = objects.NodeGroup(
861 uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
862 name=constants.INITIAL_NODE_GROUP_NAME,
863 members=[master_node_config.uuid],
864 diskparams={},
865 )
866 nodegroups = {
867 default_nodegroup.uuid: default_nodegroup,
868 }
869 now = time.time()
870 config_data = objects.ConfigData(version=version,
871 cluster=cluster_config,
872 nodegroups=nodegroups,
873 nodes=nodes,
874 instances={},
875 networks={},
876 disks={},
877 filters={},
878 serial_no=1,
879 ctime=now, mtime=now)
880 utils.WriteFile(cfg_file,
881 data=serializer.Dump(config_data.ToDict()),
882 mode=0600)
883
884
886 """Execute the last steps of cluster destroy
887
888 This function shuts down all the daemons, completing the destroy
889 begun in cmdlib.LUDestroyOpcode.
890
891 """
892 livelock = utils.livelock.LiveLock("bootstrap_destroy")
893 cfg = config.GetConfig(None, livelock)
894 modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
895 runner = rpc.BootstrapRunner()
896
897 master_name = cfg.GetNodeName(master_uuid)
898
899 master_params = cfg.GetMasterNetworkParameters()
900 master_params.uuid = master_uuid
901 ems = cfg.GetUseExternalMipScript()
902 result = runner.call_node_deactivate_master_ip(master_name, master_params,
903 ems)
904
905 msg = result.fail_msg
906 if msg:
907 logging.warning("Could not disable the master IP: %s", msg)
908
909 result = runner.call_node_stop_master(master_name)
910 msg = result.fail_msg
911 if msg:
912 logging.warning("Could not disable the master role: %s", msg)
913
914 result = runner.call_node_leave_cluster(master_name, modify_ssh_setup)
915 msg = result.fail_msg
916 if msg:
917 logging.warning("Could not shutdown the node daemon and cleanup"
918 " the node: %s", msg)
919
920
922 """Add a node to the cluster.
923
924 This function must be called before the actual opcode, and will ssh
925 to the remote node, copy the needed files, and start ganeti-noded,
926 allowing the master to do the rest via normal rpc calls.
927
928 @param cluster_name: the cluster name
929 @param node: the name of the new node
930 @param ssh_port: the SSH port of the new node
931
932 """
933 data = {
934 constants.NDS_CLUSTER_NAME: cluster_name,
935 constants.NDS_NODE_DAEMON_CERTIFICATE:
936 utils.ReadFile(pathutils.NODED_CERT_FILE),
937 constants.NDS_SSCONF: ssconf.SimpleStore().ReadAll(),
938 constants.NDS_START_NODE_DAEMON: True,
939 constants.NDS_NODE_NAME: node,
940 }
941
942 ssh.RunSshCmdWithStdin(cluster_name, node, pathutils.NODE_DAEMON_SETUP,
943 ssh_port, data,
944 debug=opts.debug, verbose=opts.verbose,
945 use_cluster_key=True, ask_key=opts.ssh_key_check,
946 strict_host_check=opts.ssh_key_check,
947 ensure_version=True)
948
949 _WaitForSshDaemon(node, ssh_port)
950 _WaitForNodeDaemon(node)
951
952
954 """Failover the master node.
955
956 This checks that we are not already the master, and will cause the
957 current master to cease being master, and the non-master to become
958 new master.
959
960 @type no_voting: boolean
961 @param no_voting: force the operation without remote nodes agreement
962 (dangerous)
963
964 @returns: the pair of an exit code and warnings to display
965 """
966 sstore = ssconf.SimpleStore()
967
968 old_master, new_master = ssconf.GetMasterAndMyself(sstore)
969 node_names = sstore.GetNodeList()
970 mc_list = sstore.GetMasterCandidates()
971
972 if old_master == new_master:
973 raise errors.OpPrereqError("This commands must be run on the node"
974 " where you want the new master to be."
975 " %s is already the master" %
976 old_master, errors.ECODE_INVAL)
977
978 if new_master not in mc_list:
979 mc_no_master = [name for name in mc_list if name != old_master]
980 raise errors.OpPrereqError("This node is not among the nodes marked"
981 " as master candidates. Only these nodes"
982 " can become masters. Current list of"
983 " master candidates is:\n"
984 "%s" % ("\n".join(mc_no_master)),
985 errors.ECODE_STATE)
986
987 if not no_voting:
988 vote_list = GatherMasterVotes(node_names)
989
990 if vote_list:
991 voted_master = vote_list[0][0]
992 if voted_master is None:
993 raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
994 " not respond.", errors.ECODE_ENVIRON)
995 elif voted_master != old_master:
996 raise errors.OpPrereqError("I have a wrong configuration, I believe"
997 " the master is %s but the other nodes"
998 " voted %s. Please resync the configuration"
999 " of this node." %
1000 (old_master, voted_master),
1001 errors.ECODE_STATE)
1002
1003
1004 rcode = 0
1005 warnings = []
1006
1007 logging.info("Setting master to %s, old master: %s", new_master, old_master)
1008
1009 try:
1010
1011 result = utils.RunCmd([pathutils.DAEMON_UTIL,
1012 "start", constants.WCONFD, "--force-node",
1013 "--no-voting", "--yes-do-it"])
1014 if result.failed:
1015 raise errors.OpPrereqError("Could not start the configuration daemon,"
1016 " command %s had exitcode %s and error %s" %
1017 (result.cmd, result.exit_code, result.output),
1018 errors.ECODE_NOENT)
1019
1020
1021
1022 livelock = utils.livelock.LiveLock("bootstrap_failover")
1023 cfg = config.GetConfig(None, livelock, accept_foreign=True)
1024
1025 old_master_node = cfg.GetNodeInfoByName(old_master)
1026 if old_master_node is None:
1027 raise errors.OpPrereqError("Could not find old master node '%s' in"
1028 " cluster configuration." % old_master,
1029 errors.ECODE_NOENT)
1030
1031 cluster_info = cfg.GetClusterInfo()
1032 new_master_node = cfg.GetNodeInfoByName(new_master)
1033 if new_master_node is None:
1034 raise errors.OpPrereqError("Could not find new master node '%s' in"
1035 " cluster configuration." % new_master,
1036 errors.ECODE_NOENT)
1037
1038 cluster_info.master_node = new_master_node.uuid
1039
1040
1041 cfg.Update(cluster_info, logging.error)
1042
1043
1044
1045
1046
1047
1048 logging.info("Stopping the master daemon on node %s", old_master)
1049
1050 runner = rpc.BootstrapRunner()
1051 master_params = cfg.GetMasterNetworkParameters()
1052 master_params.uuid = old_master_node.uuid
1053 ems = cfg.GetUseExternalMipScript()
1054 result = runner.call_node_deactivate_master_ip(old_master,
1055 master_params, ems)
1056
1057 msg = result.fail_msg
1058 if msg:
1059 warning = "Could not disable the master IP: %s" % (msg,)
1060 logging.warning("%s", warning)
1061 warnings.append(warning)
1062
1063 result = runner.call_node_stop_master(old_master)
1064 msg = result.fail_msg
1065 if msg:
1066 warning = ("Could not disable the master role on the old master"
1067 " %s, please disable manually: %s" % (old_master, msg))
1068 logging.error("%s", warning)
1069 warnings.append(warning)
1070 except errors.ConfigurationError, err:
1071 logging.error("Error while trying to set the new master: %s",
1072 str(err))
1073 return 1, warnings
1074 finally:
1075
1076 result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop", constants.WCONFD])
1077 if result.failed:
1078 warning = ("Could not stop the configuration daemon,"
1079 " command %s had exitcode %s and error %s"
1080 % (result.cmd, result.exit_code, result.output))
1081 logging.error("%s", warning)
1082 rcode = 1
1083
1084 logging.info("Checking master IP non-reachability...")
1085
1086 master_ip = sstore.GetMasterIP()
1087 total_timeout = 30
1088
1089
1090 def _check_ip(expected):
1091 if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT) != expected:
1092 raise utils.RetryAgain()
1093
1094 try:
1095 utils.Retry(_check_ip, (1, 1.5, 5), total_timeout, args=[False])
1096 except utils.RetryTimeout:
1097 warning = ("The master IP is still reachable after %s seconds,"
1098 " continuing but activating the master IP on the current"
1099 " node will probably fail" % total_timeout)
1100 logging.warning("%s", warning)
1101 warnings.append(warning)
1102 rcode = 1
1103
1104 if jstore.CheckDrainFlag():
1105 logging.info("Undraining job queue")
1106 jstore.SetDrainFlag(False)
1107
1108 logging.info("Starting the master daemons on the new master")
1109
1110 result = rpc.BootstrapRunner().call_node_start_master_daemons(new_master,
1111 no_voting)
1112 msg = result.fail_msg
1113 if msg:
1114 logging.error("Could not start the master role on the new master"
1115 " %s, please check: %s", new_master, msg)
1116 rcode = 1
1117
1118
1119
1120 try:
1121 utils.Retry(_check_ip, (1, 1.5, 5), total_timeout, args=[True])
1122 except utils.RetryTimeout:
1123 warning = ("The master IP did not come up within %s seconds; the"
1124 " cluster should still be working and reachable via %s,"
1125 " but not via the master IP address"
1126 % (total_timeout, new_master))
1127 logging.warning("%s", warning)
1128 warnings.append(warning)
1129 rcode = 1
1130
1131 logging.info("Master failed over from %s to %s", old_master, new_master)
1132 return rcode, warnings
1133
1134
1136 """Returns the current master node.
1137
1138 This is a separate function in bootstrap since it's needed by
1139 gnt-cluster, and instead of importing directly ssconf, it's better
1140 to abstract it in bootstrap, where we do use ssconf in other
1141 functions too.
1142
1143 """
1144 sstore = ssconf.SimpleStore()
1145
1146 old_master, _ = ssconf.GetMasterAndMyself(sstore)
1147
1148 return old_master
1149
1150
1152 """Check the agreement on who is the master.
1153
1154 This function will return a list of (node, number of votes), ordered
1155 by the number of votes. Errors will be denoted by the key 'None'.
1156
1157 Note that the sum of votes is the number of nodes this machine
1158 knows, whereas the number of entries in the list could be different
1159 (if some nodes vote for another master).
1160
1161 @type node_names: list
1162 @param node_names: the list of nodes to query for master info
1163 @rtype: list
1164 @return: list of (node, votes)
1165
1166 """
1167 if not node_names:
1168
1169 return []
1170 results = rpc.BootstrapRunner().call_master_node_name(node_names)
1171 if not isinstance(results, dict):
1172
1173 logging.critical("Can't complete rpc call, aborting master startup")
1174 return [(None, len(node_names))]
1175 votes = {}
1176 for node_name in results:
1177 nres = results[node_name]
1178 msg = nres.fail_msg
1179
1180 if msg:
1181 logging.warning("Error contacting node %s: %s", node_name, msg)
1182 node = None
1183 else:
1184 node = nres.payload
1185
1186 if node not in votes:
1187 votes[node] = 1
1188 else:
1189 votes[node] += 1
1190
1191 vote_list = [v for v in votes.items()]
1192
1193
1194
1195 vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
1196
1197 return vote_list
1198
1199
1201 """Check if the majority of nodes is healthy
1202
1203 Gather master votes from all nodes known to this node;
1204 return True if a strict majority of nodes is reachable and
1205 has some opinion on which node is master. Note that this will
1206 not guarantee any node to win an election but it ensures that
1207 a standard master-failover is still possible.
1208
1209 """
1210 node_names = ssconf.SimpleStore().GetNodeList()
1211 node_count = len(node_names)
1212 vote_list = GatherMasterVotes(node_names)
1213 if vote_list is None:
1214 return False
1215 total_votes = sum([count for (node, count) in vote_list if node is not None])
1216 logging.info("Total %d nodes, %d votes: %s", node_count, total_votes,
1217 vote_list)
1218 return 2 * total_votes > node_count
1219