1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Functions to bootstrap a new cluster.
32
33 """
34
35 import os
36 import os.path
37 import re
38 import logging
39 import time
40
41 from ganeti.cmdlib import cluster
42 import ganeti.rpc.node as rpc
43 from ganeti import ssh
44 from ganeti import utils
45 from ganeti import errors
46 from ganeti import config
47 from ganeti import constants
48 from ganeti import objects
49 from ganeti import ssconf
50 from ganeti import serializer
51 from ganeti import hypervisor
52 from ganeti.storage import drbd
53 from ganeti.storage import filestorage
54 from ganeti import netutils
55 from ganeti import luxi
56 from ganeti import jstore
57 from ganeti import pathutils
58 from ganeti import runtime
59 from ganeti import vcluster
60
61
62
63 _INITCONF_ECID = "initconfig-ecid"
64
65
66 _DAEMON_READY_TIMEOUT = 10.0
67
68
70 """Writes a new HMAC key.
71
72 @type file_name: str
73 @param file_name: Path to output file
74
75 """
76 utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
77 backup=True)
78
79
80
81 -def GenerateClusterCrypto(new_cluster_cert, new_rapi_cert, new_spice_cert,
82 new_confd_hmac_key, new_cds, new_client_cert,
83 master_name,
84 rapi_cert_pem=None, spice_cert_pem=None,
85 spice_cacert_pem=None, cds=None,
86 nodecert_file=pathutils.NODED_CERT_FILE,
87 clientcert_file=pathutils.NODED_CLIENT_CERT_FILE,
88 rapicert_file=pathutils.RAPI_CERT_FILE,
89 spicecert_file=pathutils.SPICE_CERT_FILE,
90 spicecacert_file=pathutils.SPICE_CACERT_FILE,
91 hmackey_file=pathutils.CONFD_HMAC_KEY,
92 cds_file=pathutils.CLUSTER_DOMAIN_SECRET_FILE):
93 """Updates the cluster certificates, keys and secrets.
94
95 @type new_cluster_cert: bool
96 @param new_cluster_cert: Whether to generate a new cluster certificate
97 @type new_rapi_cert: bool
98 @param new_rapi_cert: Whether to generate a new RAPI certificate
99 @type new_spice_cert: bool
100 @param new_spice_cert: Whether to generate a new SPICE certificate
101 @type new_confd_hmac_key: bool
102 @param new_confd_hmac_key: Whether to generate a new HMAC key
103 @type new_cds: bool
104 @param new_cds: Whether to generate a new cluster domain secret
105 @type new_client_cert: bool
106 @param new_client_cert: Whether to generate a new client certificate
107 @type master_name: string
108 @param master_name: FQDN of the master node
109 @type rapi_cert_pem: string
110 @param rapi_cert_pem: New RAPI certificate in PEM format
111 @type spice_cert_pem: string
112 @param spice_cert_pem: New SPICE certificate in PEM format
113 @type spice_cacert_pem: string
114 @param spice_cacert_pem: Certificate of the CA that signed the SPICE
115 certificate, in PEM format
116 @type cds: string
117 @param cds: New cluster domain secret
118 @type nodecert_file: string
119 @param nodecert_file: optional override of the node cert file path
120 @type rapicert_file: string
121 @param rapicert_file: optional override of the rapi cert file path
122 @type spicecert_file: string
123 @param spicecert_file: optional override of the spice cert file path
124 @type spicecacert_file: string
125 @param spicecacert_file: optional override of the spice CA cert file path
126 @type hmackey_file: string
127 @param hmackey_file: optional override of the hmac key file path
128
129 """
130
131
132 utils.GenerateNewSslCert(
133 new_cluster_cert, nodecert_file, 1,
134 "Generating new cluster certificate at %s" % nodecert_file)
135
136
137
138 if new_cluster_cert or new_client_cert:
139 utils.GenerateNewClientSslCert(clientcert_file, nodecert_file,
140 master_name)
141
142
143 if new_confd_hmac_key or not os.path.exists(hmackey_file):
144 logging.debug("Writing new confd HMAC key to %s", hmackey_file)
145 GenerateHmacKey(hmackey_file)
146
147 if rapi_cert_pem:
148
149 logging.debug("Writing RAPI certificate at %s", rapicert_file)
150 utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
151
152 else:
153 utils.GenerateNewSslCert(
154 new_rapi_cert, rapicert_file, 1,
155 "Generating new RAPI certificate at %s" % rapicert_file)
156
157
158 spice_cert_exists = os.path.exists(spicecert_file)
159 spice_cacert_exists = os.path.exists(spicecacert_file)
160 if spice_cert_pem:
161
162 logging.debug("Writing SPICE certificate at %s", spicecert_file)
163 utils.WriteFile(spicecert_file, data=spice_cert_pem, backup=True)
164 logging.debug("Writing SPICE CA certificate at %s", spicecacert_file)
165 utils.WriteFile(spicecacert_file, data=spice_cacert_pem, backup=True)
166 elif new_spice_cert or not spice_cert_exists:
167 if spice_cert_exists:
168 utils.CreateBackup(spicecert_file)
169 if spice_cacert_exists:
170 utils.CreateBackup(spicecacert_file)
171
172 logging.debug("Generating new self-signed SPICE certificate at %s",
173 spicecert_file)
174 (_, cert_pem) = utils.GenerateSelfSignedSslCert(spicecert_file, 1)
175
176
177
178 logging.debug("Writing the public certificate to %s",
179 spicecert_file)
180 utils.io.WriteFile(spicecacert_file, mode=0400, data=cert_pem)
181
182
183 if cds:
184 logging.debug("Writing cluster domain secret to %s", cds_file)
185 utils.WriteFile(cds_file, data=cds, backup=True)
186
187 elif new_cds or not os.path.exists(cds_file):
188 logging.debug("Generating new cluster domain secret at %s", cds_file)
189 GenerateHmacKey(cds_file)
190
191
193 """Setup the necessary configuration for the initial node daemon.
194
195 This creates the nodepass file containing the shared password for
196 the cluster, generates the SSL certificate and starts the node daemon.
197
198 @type master_name: str
199 @param master_name: Name of the master node
200 @type cfg: ConfigWriter
201 @param cfg: the configuration writer
202
203 """
204
205 GenerateClusterCrypto(True, False, False, False, False, False, master_name)
206
207
208 master_uuid = cfg.GetMasterNode()
209 master_digest = utils.GetCertificateDigest()
210 cfg.AddNodeToCandidateCerts(master_uuid, master_digest)
211 cfg.Update(cfg.GetClusterInfo(), logging.error)
212 ssconf.WriteSsconfFiles(cfg.GetSsconfValues())
213
214 if not os.path.exists(
215 os.path.join(pathutils.DATA_DIR,
216 "%s%s" % (constants.SSCONF_FILEPREFIX,
217 constants.SS_MASTER_CANDIDATES_CERTS))):
218 raise errors.OpExecError("Ssconf file for master candidate certificates"
219 " was not written.")
220
221 if not os.path.exists(pathutils.NODED_CERT_FILE):
222 raise errors.OpExecError("The server certficate was not created properly.")
223
224 if not os.path.exists(pathutils.NODED_CLIENT_CERT_FILE):
225 raise errors.OpExecError("The client certificate was not created"
226 " properly.")
227
228
229 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start", constants.NODED])
230 if result.failed:
231 raise errors.OpExecError("Could not start the node daemon, command %s"
232 " had exitcode %s and error %s" %
233 (result.cmd, result.exit_code, result.output))
234
235 _WaitForNodeDaemon(master_name)
236
237
248
249 try:
250 utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT)
251 except utils.RetryTimeout:
252 raise errors.OpExecError("Node daemon on %s didn't answer queries within"
253 " %s seconds" % (node_name, _DAEMON_READY_TIMEOUT))
254
255
268
269 try:
270 utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT)
271 except utils.RetryTimeout:
272 raise errors.OpExecError("Master daemon didn't answer queries within"
273 " %s seconds" % _DAEMON_READY_TIMEOUT)
274
275
289
290 try:
291 utils.Retry(_CheckSshDaemon, 1.0, _DAEMON_READY_TIMEOUT)
292 except utils.RetryTimeout:
293 raise errors.OpExecError("SSH daemon on %s:%s (IP address %s) didn't"
294 " become responsive within %s seconds" %
295 (hostname, port, hostip, _DAEMON_READY_TIMEOUT))
296
297
299 """Initialize if needed the file storage.
300
301 @param file_storage_dir: the user-supplied value
302 @return: either empty string (if file storage was disabled at build
303 time) or the normalized path to the storage directory
304
305 """
306 file_storage_dir = os.path.normpath(file_storage_dir)
307
308 if not os.path.isabs(file_storage_dir):
309 raise errors.OpPrereqError("File storage directory '%s' is not an absolute"
310 " path" % file_storage_dir, errors.ECODE_INVAL)
311
312 if not os.path.exists(file_storage_dir):
313 try:
314 os.makedirs(file_storage_dir, 0750)
315 except OSError, err:
316 raise errors.OpPrereqError("Cannot create file storage directory"
317 " '%s': %s" % (file_storage_dir, err),
318 errors.ECODE_ENVIRON)
319
320 if not os.path.isdir(file_storage_dir):
321 raise errors.OpPrereqError("The file storage directory '%s' is not"
322 " a directory." % file_storage_dir,
323 errors.ECODE_ENVIRON)
324
325 return file_storage_dir
326
327
332 """Checks if a file-base storage type is enabled and inits the dir.
333
334 @type enabled_disk_templates: list of string
335 @param enabled_disk_templates: list of enabled disk templates
336 @type file_storage_dir: string
337 @param file_storage_dir: the file storage directory
338 @type default_dir: string
339 @param default_dir: default file storage directory when C{file_storage_dir}
340 is 'None'
341 @type file_disk_template: string
342 @param file_disk_template: a disk template whose storage type is 'ST_FILE',
343 'ST_SHARED_FILE' or 'ST_GLUSTER'
344 @type _storage_path_acceptance_fn: function
345 @param _storage_path_acceptance_fn: checks whether the given file-based
346 storage directory is acceptable
347 @see: C{cluster.CheckFileBasedStoragePathVsEnabledDiskTemplates} for details
348
349 @rtype: string
350 @returns: the name of the actual file storage directory
351
352 """
353 assert (file_disk_template in utils.storage.GetDiskTemplatesOfStorageTypes(
354 constants.ST_FILE, constants.ST_SHARED_FILE, constants.ST_GLUSTER
355 ))
356
357 file_storage_enabled = file_disk_template in enabled_disk_templates
358
359 if file_storage_dir is None:
360 if file_storage_enabled:
361 file_storage_dir = default_dir
362 else:
363 file_storage_dir = ""
364
365 if not acceptance_fn:
366 acceptance_fn = \
367 lambda path: filestorage.CheckFileStoragePathAcceptance(
368 path, exact_match_ok=True)
369
370 _storage_path_acceptance_fn(logging.warning, file_storage_dir,
371 enabled_disk_templates)
372
373 if file_storage_enabled:
374 try:
375 acceptance_fn(file_storage_dir)
376 except errors.FileStoragePathError as e:
377 raise errors.OpPrereqError(str(e))
378 result_file_storage_dir = init_fn(file_storage_dir)
379 else:
380 result_file_storage_dir = file_storage_dir
381 return result_file_storage_dir
382
383
397
398
412
413
427
428
430 """Checks the sanity of the enabled disk templates.
431
432 """
433 if not enabled_disk_templates:
434 raise errors.OpPrereqError("Enabled disk templates list must contain at"
435 " least one member", errors.ECODE_INVAL)
436 invalid_disk_templates = \
437 set(enabled_disk_templates) - constants.DISK_TEMPLATES
438 if invalid_disk_templates:
439 raise errors.OpPrereqError("Enabled disk templates list contains invalid"
440 " entries: %s" % invalid_disk_templates,
441 errors.ECODE_INVAL)
442
443
445 """Restricts the ipolicy's disk templates to the enabled ones.
446
447 This function clears the ipolicy's list of allowed disk templates from the
448 ones that are not enabled by the cluster.
449
450 @type ipolicy: dict
451 @param ipolicy: the instance policy
452 @type enabled_disk_templates: list of string
453 @param enabled_disk_templates: the list of cluster-wide enabled disk
454 templates
455
456 """
457 assert constants.IPOLICY_DTS in ipolicy
458 allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
459 restricted_disk_templates = list(set(allowed_disk_templates)
460 .intersection(set(enabled_disk_templates)))
461 ipolicy[constants.IPOLICY_DTS] = restricted_disk_templates
462
463
465 """Checks the DRBD usermode helper.
466
467 @type drbd_helper: string
468 @param drbd_helper: name of the DRBD usermode helper that the system should
469 use
470
471 """
472 if not drbd_enabled:
473 return
474
475 if drbd_helper is not None:
476 try:
477 curr_helper = drbd.DRBD8.GetUsermodeHelper()
478 except errors.BlockDeviceError, err:
479 raise errors.OpPrereqError("Error while checking drbd helper"
480 " (disable drbd with --enabled-disk-templates"
481 " if you are not using drbd): %s" % str(err),
482 errors.ECODE_ENVIRON)
483 if drbd_helper != curr_helper:
484 raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
485 " is the current helper" % (drbd_helper,
486 curr_helper),
487 errors.ECODE_INVAL)
488
489
490 -def InitCluster(cluster_name, mac_prefix,
491 master_netmask, master_netdev, file_storage_dir,
492 shared_file_storage_dir, gluster_storage_dir,
493 candidate_pool_size, ssh_key_type, ssh_key_bits,
494 secondary_ip=None, vg_name=None, beparams=None, nicparams=None,
495 ndparams=None, hvparams=None, diskparams=None,
496 enabled_hypervisors=None, modify_etc_hosts=True,
497 modify_ssh_setup=True, maintain_node_health=False,
498 drbd_helper=None, uid_pool=None, default_iallocator=None,
499 default_iallocator_params=None, primary_ip_version=None,
500 ipolicy=None, prealloc_wipe_disks=False,
501 use_external_mip_script=False, hv_state=None, disk_state=None,
502 enabled_disk_templates=None, install_image=None,
503 zeroing_image=None, compression_tools=None,
504 enabled_user_shutdown=False):
505 """Initialise the cluster.
506
507 @type candidate_pool_size: int
508 @param candidate_pool_size: master candidate pool size
509
510 @type enabled_disk_templates: list of string
511 @param enabled_disk_templates: list of disk_templates to be used in this
512 cluster
513
514 @type enabled_user_shutdown: bool
515 @param enabled_user_shutdown: whether user shutdown is enabled cluster
516 wide
517
518 """
519
520 if config.ConfigWriter.IsCluster():
521 raise errors.OpPrereqError("Cluster is already initialised",
522 errors.ECODE_STATE)
523
524 data_dir = vcluster.AddNodePrefix(pathutils.DATA_DIR)
525 queue_dir = vcluster.AddNodePrefix(pathutils.QUEUE_DIR)
526 archive_dir = vcluster.AddNodePrefix(pathutils.JOB_QUEUE_ARCHIVE_DIR)
527 for ddir in [queue_dir, data_dir, archive_dir]:
528 if os.path.isdir(ddir):
529 for entry in os.listdir(ddir):
530 if not os.path.isdir(os.path.join(ddir, entry)):
531 raise errors.OpPrereqError(
532 "%s contains non-directory entries like %s. Remove left-overs of an"
533 " old cluster before initialising a new one" % (ddir, entry),
534 errors.ECODE_STATE)
535
536 if not enabled_hypervisors:
537 raise errors.OpPrereqError("Enabled hypervisors list must contain at"
538 " least one member", errors.ECODE_INVAL)
539 invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
540 if invalid_hvs:
541 raise errors.OpPrereqError("Enabled hypervisors contains invalid"
542 " entries: %s" % invalid_hvs,
543 errors.ECODE_INVAL)
544
545 _InitCheckEnabledDiskTemplates(enabled_disk_templates)
546
547 try:
548 ipcls = netutils.IPAddress.GetClassFromIpVersion(primary_ip_version)
549 except errors.ProgrammerError:
550 raise errors.OpPrereqError("Invalid primary ip version: %d." %
551 primary_ip_version, errors.ECODE_INVAL)
552
553 hostname = netutils.GetHostname(family=ipcls.family)
554 if not ipcls.IsValid(hostname.ip):
555 raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
556 " address." % (hostname.ip, primary_ip_version),
557 errors.ECODE_INVAL)
558
559 if ipcls.IsLoopback(hostname.ip):
560 raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
561 " address. Please fix DNS or %s." %
562 (hostname.ip, pathutils.ETC_HOSTS),
563 errors.ECODE_ENVIRON)
564
565 if not ipcls.Own(hostname.ip):
566 raise errors.OpPrereqError("Inconsistency: this host's name resolves"
567 " to %s,\nbut this ip address does not"
568 " belong to this host" %
569 hostname.ip, errors.ECODE_ENVIRON)
570
571 clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)
572
573 if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
574 raise errors.OpPrereqError("Cluster IP already active",
575 errors.ECODE_NOTUNIQUE)
576
577 if not secondary_ip:
578 if primary_ip_version == constants.IP6_VERSION:
579 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
580 " IPv4 address must be given as secondary",
581 errors.ECODE_INVAL)
582 secondary_ip = hostname.ip
583
584 if not netutils.IP4Address.IsValid(secondary_ip):
585 raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
586 " IPv4 address." % secondary_ip,
587 errors.ECODE_INVAL)
588
589 if not netutils.IP4Address.Own(secondary_ip):
590 raise errors.OpPrereqError("You gave %s as secondary IP,"
591 " but it does not belong to this host." %
592 secondary_ip, errors.ECODE_ENVIRON)
593
594 if master_netmask is not None:
595 if not ipcls.ValidateNetmask(master_netmask):
596 raise errors.OpPrereqError("CIDR netmask (%s) not valid for IPv%s " %
597 (master_netmask, primary_ip_version),
598 errors.ECODE_INVAL)
599 else:
600 master_netmask = ipcls.iplen
601
602 if vg_name:
603
604 vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
605 constants.MIN_VG_SIZE)
606 if vgstatus:
607 raise errors.OpPrereqError("Error: %s" % vgstatus, errors.ECODE_INVAL)
608
609 drbd_enabled = constants.DT_DRBD8 in enabled_disk_templates
610 _InitCheckDrbdHelper(drbd_helper, drbd_enabled)
611
612 logging.debug("Stopping daemons (if any are running)")
613 result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-all"])
614 if result.failed:
615 raise errors.OpExecError("Could not stop daemons, command %s"
616 " had exitcode %s and error '%s'" %
617 (result.cmd, result.exit_code, result.output))
618
619 file_storage_dir = _PrepareFileStorage(enabled_disk_templates,
620 file_storage_dir)
621 shared_file_storage_dir = _PrepareSharedFileStorage(enabled_disk_templates,
622 shared_file_storage_dir)
623 gluster_storage_dir = _PrepareGlusterStorage(enabled_disk_templates,
624 gluster_storage_dir)
625
626 if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
627 raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
628 errors.ECODE_INVAL)
629
630 if not nicparams.get('mode', None) == constants.NIC_MODE_OVS:
631
632
633 result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
634 if result.failed:
635 raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
636 (master_netdev,
637 result.output.strip()), errors.ECODE_INVAL)
638
639 dirs = [(pathutils.RUN_DIR, constants.RUN_DIRS_MODE)]
640 utils.EnsureDirs(dirs)
641
642 objects.UpgradeBeParams(beparams)
643 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
644 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
645
646 objects.NIC.CheckParameterSyntax(nicparams)
647
648 full_ipolicy = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy)
649 _RestrictIpolicyToEnabledDiskTemplates(full_ipolicy, enabled_disk_templates)
650
651 if ndparams is not None:
652 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
653 else:
654 ndparams = dict(constants.NDC_DEFAULTS)
655
656
657
658
659 if hv_state:
660 for hvname, hvs_data in hv_state.items():
661 utils.ForceDictType(hvs_data, constants.HVSTS_PARAMETER_TYPES)
662 hv_state[hvname] = objects.Cluster.SimpleFillHvState(hvs_data)
663 else:
664 hv_state = dict((hvname, constants.HVST_DEFAULTS)
665 for hvname in enabled_hypervisors)
666
667
668 if disk_state:
669 for storage, ds_data in disk_state.items():
670 if storage not in constants.DS_VALID_TYPES:
671 raise errors.OpPrereqError("Invalid storage type in disk state: %s" %
672 storage, errors.ECODE_INVAL)
673 for ds_name, state in ds_data.items():
674 utils.ForceDictType(state, constants.DSS_PARAMETER_TYPES)
675 ds_data[ds_name] = objects.Cluster.SimpleFillDiskState(state)
676
677
678 for hv_name, hv_params in hvparams.iteritems():
679 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
680 hv_class = hypervisor.GetHypervisor(hv_name)
681 hv_class.CheckParameterSyntax(hv_params)
682
683
684 for template, dt_params in diskparams.items():
685 param_keys = set(dt_params.keys())
686 default_param_keys = set(constants.DISK_DT_DEFAULTS[template].keys())
687 if not (param_keys <= default_param_keys):
688 unknown_params = param_keys - default_param_keys
689 raise errors.OpPrereqError("Invalid parameters for disk template %s:"
690 " %s" % (template,
691 utils.CommaJoin(unknown_params)),
692 errors.ECODE_INVAL)
693 utils.ForceDictType(dt_params, constants.DISK_DT_TYPES)
694 if template == constants.DT_DRBD8 and vg_name is not None:
695
696
697 dt_params[constants.DRBD_DEFAULT_METAVG] = vg_name
698
699 try:
700 utils.VerifyDictOptions(diskparams, constants.DISK_DT_DEFAULTS)
701 except errors.OpPrereqError, err:
702 raise errors.OpPrereqError("While verify diskparam options: %s" % err,
703 errors.ECODE_INVAL)
704
705
706 rsa_sshkey = ""
707 dsa_sshkey = ""
708 if os.path.isfile(pathutils.SSH_HOST_RSA_PUB):
709 sshline = utils.ReadFile(pathutils.SSH_HOST_RSA_PUB)
710 rsa_sshkey = sshline.split(" ")[1]
711 if os.path.isfile(pathutils.SSH_HOST_DSA_PUB):
712 sshline = utils.ReadFile(pathutils.SSH_HOST_DSA_PUB)
713 dsa_sshkey = sshline.split(" ")[1]
714 if not rsa_sshkey and not dsa_sshkey:
715 raise errors.OpPrereqError("Failed to find SSH public keys",
716 errors.ECODE_ENVIRON)
717
718 if modify_etc_hosts:
719 utils.AddHostToEtcHosts(hostname.name, hostname.ip)
720
721 if modify_ssh_setup:
722 ssh.InitSSHSetup(ssh_key_type, ssh_key_bits)
723
724 if default_iallocator is not None:
725 alloc_script = utils.FindFile(default_iallocator,
726 constants.IALLOCATOR_SEARCH_PATH,
727 os.path.isfile)
728 if alloc_script is None:
729 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
730 " specified" % default_iallocator,
731 errors.ECODE_INVAL)
732 else:
733
734 if utils.FindFile(constants.IALLOC_HAIL,
735 constants.IALLOCATOR_SEARCH_PATH,
736 os.path.isfile):
737 default_iallocator = constants.IALLOC_HAIL
738
739
740 try:
741 runtime.GetEnts()
742 except errors.ConfigurationError, err:
743 raise errors.OpPrereqError("Required system user/group missing: %s" %
744 err, errors.ECODE_ENVIRON)
745
746 candidate_certs = {}
747
748 now = time.time()
749
750 if compression_tools is not None:
751 cluster.CheckCompressionTools(compression_tools)
752
753 initial_dc_config = dict(active=True,
754 interval=int(constants.MOND_TIME_INTERVAL * 1e6))
755 data_collectors = dict(
756 (name, initial_dc_config.copy())
757 for name in constants.DATA_COLLECTOR_NAMES)
758
759
760 cluster_config = objects.Cluster(
761 serial_no=1,
762 rsahostkeypub=rsa_sshkey,
763 dsahostkeypub=dsa_sshkey,
764 highest_used_port=(constants.FIRST_DRBD_PORT - 1),
765 mac_prefix=mac_prefix,
766 volume_group_name=vg_name,
767 tcpudp_port_pool=set(),
768 master_ip=clustername.ip,
769 master_netmask=master_netmask,
770 master_netdev=master_netdev,
771 cluster_name=clustername.name,
772 file_storage_dir=file_storage_dir,
773 shared_file_storage_dir=shared_file_storage_dir,
774 gluster_storage_dir=gluster_storage_dir,
775 enabled_hypervisors=enabled_hypervisors,
776 beparams={constants.PP_DEFAULT: beparams},
777 nicparams={constants.PP_DEFAULT: nicparams},
778 ndparams=ndparams,
779 hvparams=hvparams,
780 diskparams=diskparams,
781 candidate_pool_size=candidate_pool_size,
782 modify_etc_hosts=modify_etc_hosts,
783 modify_ssh_setup=modify_ssh_setup,
784 uid_pool=uid_pool,
785 ctime=now,
786 mtime=now,
787 maintain_node_health=maintain_node_health,
788 data_collectors=data_collectors,
789 drbd_usermode_helper=drbd_helper,
790 default_iallocator=default_iallocator,
791 default_iallocator_params=default_iallocator_params,
792 primary_ip_family=ipcls.family,
793 prealloc_wipe_disks=prealloc_wipe_disks,
794 use_external_mip_script=use_external_mip_script,
795 ipolicy=full_ipolicy,
796 hv_state_static=hv_state,
797 disk_state_static=disk_state,
798 enabled_disk_templates=enabled_disk_templates,
799 candidate_certs=candidate_certs,
800 osparams={},
801 osparams_private_cluster={},
802 install_image=install_image,
803 zeroing_image=zeroing_image,
804 compression_tools=compression_tools,
805 enabled_user_shutdown=enabled_user_shutdown,
806 ssh_key_type=ssh_key_type,
807 ssh_key_bits=ssh_key_bits,
808 )
809 master_node_config = objects.Node(name=hostname.name,
810 primary_ip=hostname.ip,
811 secondary_ip=secondary_ip,
812 serial_no=1,
813 master_candidate=True,
814 offline=False, drained=False,
815 ctime=now, mtime=now,
816 )
817 InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
818 cfg = config.ConfigWriter(offline=True)
819 ssh.WriteKnownHostsFile(cfg, pathutils.SSH_KNOWN_HOSTS_FILE)
820 cfg.Update(cfg.GetClusterInfo(), logging.error)
821 ssconf.WriteSsconfFiles(cfg.GetSsconfValues())
822
823 master_uuid = cfg.GetMasterNode()
824 if modify_ssh_setup:
825 ssh.InitPubKeyFile(master_uuid, ssh_key_type)
826
827 _InitGanetiServerSetup(hostname.name, cfg)
828
829 logging.debug("Starting daemons")
830 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-all"])
831 if result.failed:
832 raise errors.OpExecError("Could not start daemons, command %s"
833 " had exitcode %s and error %s" %
834 (result.cmd, result.exit_code, result.output))
835
836 _WaitForMasterDaemon()
837
838
841 """Create the initial cluster configuration.
842
843 It will contain the current node, which will also be the master
844 node, and no instances.
845
846 @type version: int
847 @param version: configuration version
848 @type cluster_config: L{objects.Cluster}
849 @param cluster_config: cluster configuration
850 @type master_node_config: L{objects.Node}
851 @param master_node_config: master node configuration
852 @type cfg_file: string
853 @param cfg_file: configuration file path
854
855 """
856 uuid_generator = config.TemporaryReservationManager()
857 cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
858 _INITCONF_ECID)
859 master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
860 _INITCONF_ECID)
861 cluster_config.master_node = master_node_config.uuid
862 nodes = {
863 master_node_config.uuid: master_node_config,
864 }
865 default_nodegroup = objects.NodeGroup(
866 uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
867 name=constants.INITIAL_NODE_GROUP_NAME,
868 members=[master_node_config.uuid],
869 diskparams={},
870 )
871 nodegroups = {
872 default_nodegroup.uuid: default_nodegroup,
873 }
874 now = time.time()
875 maintenance = objects.Maintenance(serial_no=1, ctime=now, mtime=now)
876 config_data = objects.ConfigData(version=version,
877 cluster=cluster_config,
878 nodegroups=nodegroups,
879 nodes=nodes,
880 instances={},
881 networks={},
882 disks={},
883 filters={},
884 maintenance=maintenance,
885 serial_no=1,
886 ctime=now, mtime=now)
887 utils.WriteFile(cfg_file,
888 data=serializer.Dump(config_data.ToDict()),
889 mode=0600)
890
891
893 """Execute the last steps of cluster destroy
894
895 This function shuts down all the daemons, completing the destroy
896 begun in cmdlib.LUDestroyOpcode.
897
898 """
899 livelock = utils.livelock.LiveLock("bootstrap_destroy")
900 cfg = config.GetConfig(None, livelock)
901 modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
902 runner = rpc.BootstrapRunner()
903
904 master_name = cfg.GetNodeName(master_uuid)
905
906 master_params = cfg.GetMasterNetworkParameters()
907 master_params.uuid = master_uuid
908 ems = cfg.GetUseExternalMipScript()
909 result = runner.call_node_deactivate_master_ip(master_name, master_params,
910 ems)
911
912 msg = result.fail_msg
913 if msg:
914 logging.warning("Could not disable the master IP: %s", msg)
915
916 result = runner.call_node_stop_master(master_name)
917 msg = result.fail_msg
918 if msg:
919 logging.warning("Could not disable the master role: %s", msg)
920
921 result = runner.call_node_leave_cluster(master_name, modify_ssh_setup)
922 msg = result.fail_msg
923 if msg:
924 logging.warning("Could not shutdown the node daemon and cleanup"
925 " the node: %s", msg)
926
927
929 """Add a node to the cluster.
930
931 This function must be called before the actual opcode, and will ssh
932 to the remote node, copy the needed files, and start ganeti-noded,
933 allowing the master to do the rest via normal rpc calls.
934
935 @param cluster_name: the cluster name
936 @param node: the name of the new node
937 @param ssh_port: the SSH port of the new node
938
939 """
940 data = {
941 constants.NDS_CLUSTER_NAME: cluster_name,
942 constants.NDS_NODE_DAEMON_CERTIFICATE:
943 utils.ReadFile(pathutils.NODED_CERT_FILE),
944 constants.NDS_HMAC:
945 utils.ReadFile(pathutils.CONFD_HMAC_KEY),
946 constants.NDS_SSCONF: ssconf.SimpleStore().ReadAll(),
947 constants.NDS_START_NODE_DAEMON: True,
948 constants.NDS_NODE_NAME: node,
949 }
950
951 ssh.RunSshCmdWithStdin(cluster_name, node, pathutils.NODE_DAEMON_SETUP,
952 ssh_port, data,
953 debug=opts.debug, verbose=opts.verbose,
954 use_cluster_key=True, ask_key=opts.ssh_key_check,
955 strict_host_check=opts.ssh_key_check,
956 ensure_version=True)
957
958 _WaitForSshDaemon(node, ssh_port)
959 _WaitForNodeDaemon(node)
960
961
963 """Failover the master node.
964
965 This checks that we are not already the master, and will cause the
966 current master to cease being master, and the non-master to become
967 new master.
968
969 @type no_voting: boolean
970 @param no_voting: force the operation without remote nodes agreement
971 (dangerous)
972
973 @returns: the pair of an exit code and warnings to display
974 """
975 sstore = ssconf.SimpleStore()
976
977 old_master, new_master = ssconf.GetMasterAndMyself(sstore)
978 node_names = sstore.GetNodeList()
979 mc_list = sstore.GetMasterCandidates()
980
981 if old_master == new_master:
982 raise errors.OpPrereqError("This commands must be run on the node"
983 " where you want the new master to be."
984 " %s is already the master" %
985 old_master, errors.ECODE_INVAL)
986
987 if new_master not in mc_list:
988 mc_no_master = [name for name in mc_list if name != old_master]
989 raise errors.OpPrereqError("This node is not among the nodes marked"
990 " as master candidates. Only these nodes"
991 " can become masters. Current list of"
992 " master candidates is:\n"
993 "%s" % ("\n".join(mc_no_master)),
994 errors.ECODE_STATE)
995
996 if not no_voting:
997 vote_list = GatherMasterVotes(node_names)
998
999 if vote_list:
1000 voted_master = vote_list[0][0]
1001 if voted_master is None:
1002 raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
1003 " not respond.", errors.ECODE_ENVIRON)
1004 elif voted_master != old_master:
1005 raise errors.OpPrereqError("I have a wrong configuration, I believe"
1006 " the master is %s but the other nodes"
1007 " voted %s. Please resync the configuration"
1008 " of this node." %
1009 (old_master, voted_master),
1010 errors.ECODE_STATE)
1011
1012
1013 rcode = 0
1014 warnings = []
1015
1016 logging.info("Setting master to %s, old master: %s", new_master, old_master)
1017
1018 try:
1019
1020 result = utils.RunCmd([pathutils.DAEMON_UTIL,
1021 "start", constants.WCONFD, "--force-node",
1022 "--no-voting", "--yes-do-it"])
1023 if result.failed:
1024 raise errors.OpPrereqError("Could not start the configuration daemon,"
1025 " command %s had exitcode %s and error %s" %
1026 (result.cmd, result.exit_code, result.output),
1027 errors.ECODE_NOENT)
1028
1029
1030
1031 livelock = utils.livelock.LiveLock("bootstrap_failover")
1032 cfg = config.GetConfig(None, livelock, accept_foreign=True)
1033
1034 old_master_node = cfg.GetNodeInfoByName(old_master)
1035 if old_master_node is None:
1036 raise errors.OpPrereqError("Could not find old master node '%s' in"
1037 " cluster configuration." % old_master,
1038 errors.ECODE_NOENT)
1039
1040 cluster_info = cfg.GetClusterInfo()
1041 new_master_node = cfg.GetNodeInfoByName(new_master)
1042 if new_master_node is None:
1043 raise errors.OpPrereqError("Could not find new master node '%s' in"
1044 " cluster configuration." % new_master,
1045 errors.ECODE_NOENT)
1046
1047 cluster_info.master_node = new_master_node.uuid
1048
1049
1050 cfg.Update(cluster_info, logging.error)
1051
1052
1053
1054
1055
1056
1057 logging.info("Stopping the master daemon on node %s", old_master)
1058
1059 runner = rpc.BootstrapRunner()
1060 master_params = cfg.GetMasterNetworkParameters()
1061 master_params.uuid = old_master_node.uuid
1062 ems = cfg.GetUseExternalMipScript()
1063 result = runner.call_node_deactivate_master_ip(old_master,
1064 master_params, ems)
1065
1066 msg = result.fail_msg
1067 if msg:
1068 warning = "Could not disable the master IP: %s" % (msg,)
1069 logging.warning("%s", warning)
1070 warnings.append(warning)
1071
1072 result = runner.call_node_stop_master(old_master)
1073 msg = result.fail_msg
1074 if msg:
1075 warning = ("Could not disable the master role on the old master"
1076 " %s, please disable manually: %s" % (old_master, msg))
1077 logging.error("%s", warning)
1078 warnings.append(warning)
1079 except errors.ConfigurationError, err:
1080 logging.error("Error while trying to set the new master: %s",
1081 str(err))
1082 return 1, warnings
1083 finally:
1084
1085 result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop", constants.WCONFD])
1086 if result.failed:
1087 warning = ("Could not stop the configuration daemon,"
1088 " command %s had exitcode %s and error %s"
1089 % (result.cmd, result.exit_code, result.output))
1090 logging.error("%s", warning)
1091 rcode = 1
1092
1093 logging.info("Checking master IP non-reachability...")
1094
1095 master_ip = sstore.GetMasterIP()
1096 total_timeout = 30
1097
1098
1099 def _check_ip(expected):
1100 if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT) != expected:
1101 raise utils.RetryAgain()
1102
1103 try:
1104 utils.Retry(_check_ip, (1, 1.5, 5), total_timeout, args=[False])
1105 except utils.RetryTimeout:
1106 warning = ("The master IP is still reachable after %s seconds,"
1107 " continuing but activating the master IP on the current"
1108 " node will probably fail" % total_timeout)
1109 logging.warning("%s", warning)
1110 warnings.append(warning)
1111 rcode = 1
1112
1113 if jstore.CheckDrainFlag():
1114 logging.info("Undraining job queue")
1115 jstore.SetDrainFlag(False)
1116
1117 logging.info("Starting the master daemons on the new master")
1118
1119 result = rpc.BootstrapRunner().call_node_start_master_daemons(new_master,
1120 no_voting)
1121 msg = result.fail_msg
1122 if msg:
1123 logging.error("Could not start the master role on the new master"
1124 " %s, please check: %s", new_master, msg)
1125 rcode = 1
1126
1127
1128
1129 try:
1130 utils.Retry(_check_ip, (1, 1.5, 5), total_timeout, args=[True])
1131 except utils.RetryTimeout:
1132 warning = ("The master IP did not come up within %s seconds; the"
1133 " cluster should still be working and reachable via %s,"
1134 " but not via the master IP address"
1135 % (total_timeout, new_master))
1136 logging.warning("%s", warning)
1137 warnings.append(warning)
1138 rcode = 1
1139
1140 logging.info("Master failed over from %s to %s", old_master, new_master)
1141 return rcode, warnings
1142
1143
1145 """Returns the current master node.
1146
1147 This is a separate function in bootstrap since it's needed by
1148 gnt-cluster, and instead of importing directly ssconf, it's better
1149 to abstract it in bootstrap, where we do use ssconf in other
1150 functions too.
1151
1152 """
1153 sstore = ssconf.SimpleStore()
1154
1155 old_master, _ = ssconf.GetMasterAndMyself(sstore)
1156
1157 return old_master
1158
1159
1161 """Check the agreement on who is the master.
1162
1163 This function will return a list of (node, number of votes), ordered
1164 by the number of votes. Errors will be denoted by the key 'None'.
1165
1166 Note that the sum of votes is the number of nodes this machine
1167 knows, whereas the number of entries in the list could be different
1168 (if some nodes vote for another master).
1169
1170 @type node_names: list
1171 @param node_names: the list of nodes to query for master info
1172 @rtype: list
1173 @return: list of (node, votes)
1174
1175 """
1176 if not node_names:
1177
1178 return []
1179 results = rpc.BootstrapRunner().call_master_node_name(node_names)
1180 if not isinstance(results, dict):
1181
1182 logging.critical("Can't complete rpc call, aborting master startup")
1183 return [(None, len(node_names))]
1184 votes = {}
1185 for node_name in results:
1186 nres = results[node_name]
1187 msg = nres.fail_msg
1188
1189 if msg:
1190 logging.warning("Error contacting node %s: %s", node_name, msg)
1191 node = None
1192 else:
1193 node = nres.payload
1194
1195 if node not in votes:
1196 votes[node] = 1
1197 else:
1198 votes[node] += 1
1199
1200 vote_list = [v for v in votes.items()]
1201
1202
1203
1204 vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
1205
1206 return vote_list
1207
1208
1210 """Check if the majority of nodes is healthy
1211
1212 Gather master votes from all nodes known to this node;
1213 return True if a strict majority of nodes is reachable and
1214 has some opinion on which node is master. Note that this will
1215 not guarantee any node to win an election but it ensures that
1216 a standard master-failover is still possible.
1217
1218 """
1219 node_names = ssconf.SimpleStore().GetNodeList()
1220 node_count = len(node_names)
1221 vote_list = GatherMasterVotes(node_names)
1222 if vote_list is None:
1223 return False
1224 total_votes = sum([count for (node, count) in vote_list if node is not None])
1225 logging.info("Total %d nodes, %d votes: %s", node_count, total_votes,
1226 vote_list)
1227 return 2 * total_votes > node_count
1228