1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 """Functions to bootstrap a new cluster.
23
24 """
25
26 import os
27 import os.path
28 import re
29 import logging
30 import time
31
32 from ganeti import rpc
33 from ganeti import ssh
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import config
37 from ganeti import constants
38 from ganeti import objects
39 from ganeti import ssconf
40 from ganeti import serializer
41 from ganeti import hypervisor
42 from ganeti import bdev
43 from ganeti import netutils
44 from ganeti import backend
45 from ganeti import luxi
46 from ganeti import jstore
47
48
49
50 _INITCONF_ECID = "initconfig-ecid"
51
52
53 _DAEMON_READY_TIMEOUT = 10.0
54
55
57 """Setup the SSH configuration for the cluster.
58
59 This generates a dsa keypair for root, adds the pub key to the
60 permitted hosts and adds the hostkey to its own known hosts.
61
62 """
63 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
64
65 for name in priv_key, pub_key:
66 if os.path.exists(name):
67 utils.CreateBackup(name)
68 utils.RemoveFile(name)
69
70 result = utils.RunCmd(["ssh-keygen", "-t", "dsa",
71 "-f", priv_key,
72 "-q", "-N", ""])
73 if result.failed:
74 raise errors.OpExecError("Could not generate ssh keypair, error %s" %
75 result.output)
76
77 utils.AddAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
78
79
81 """Writes a new HMAC key.
82
83 @type file_name: str
84 @param file_name: Path to output file
85
86 """
87 utils.WriteFile(file_name, data="%s\n" % utils.GenerateSecret(), mode=0400,
88 backup=True)
89
90
97 """Updates the cluster certificates, keys and secrets.
98
99 @type new_cluster_cert: bool
100 @param new_cluster_cert: Whether to generate a new cluster certificate
101 @type new_rapi_cert: bool
102 @param new_rapi_cert: Whether to generate a new RAPI certificate
103 @type new_confd_hmac_key: bool
104 @param new_confd_hmac_key: Whether to generate a new HMAC key
105 @type new_cds: bool
106 @param new_cds: Whether to generate a new cluster domain secret
107 @type rapi_cert_pem: string
108 @param rapi_cert_pem: New RAPI certificate in PEM format
109 @type cds: string
110 @param cds: New cluster domain secret
111 @type nodecert_file: string
112 @param nodecert_file: optional override of the node cert file path
113 @type rapicert_file: string
114 @param rapicert_file: optional override of the rapi cert file path
115 @type hmackey_file: string
116 @param hmackey_file: optional override of the hmac key file path
117
118 """
119
120 cluster_cert_exists = os.path.exists(nodecert_file)
121 if new_cluster_cert or not cluster_cert_exists:
122 if cluster_cert_exists:
123 utils.CreateBackup(nodecert_file)
124
125 logging.debug("Generating new cluster certificate at %s", nodecert_file)
126 utils.GenerateSelfSignedSslCert(nodecert_file)
127
128
129 if new_confd_hmac_key or not os.path.exists(hmackey_file):
130 logging.debug("Writing new confd HMAC key to %s", hmackey_file)
131 GenerateHmacKey(hmackey_file)
132
133
134 rapi_cert_exists = os.path.exists(rapicert_file)
135
136 if rapi_cert_pem:
137
138 logging.debug("Writing RAPI certificate at %s", rapicert_file)
139 utils.WriteFile(rapicert_file, data=rapi_cert_pem, backup=True)
140
141 elif new_rapi_cert or not rapi_cert_exists:
142 if rapi_cert_exists:
143 utils.CreateBackup(rapicert_file)
144
145 logging.debug("Generating new RAPI certificate at %s", rapicert_file)
146 utils.GenerateSelfSignedSslCert(rapicert_file)
147
148
149 if cds:
150 logging.debug("Writing cluster domain secret to %s", cds_file)
151 utils.WriteFile(cds_file, data=cds, backup=True)
152
153 elif new_cds or not os.path.exists(cds_file):
154 logging.debug("Generating new cluster domain secret at %s", cds_file)
155 GenerateHmacKey(cds_file)
156
157
159 """Setup the necessary configuration for the initial node daemon.
160
161 This creates the nodepass file containing the shared password for
162 the cluster, generates the SSL certificate and starts the node daemon.
163
164 @type master_name: str
165 @param master_name: Name of the master node
166
167 """
168
169 GenerateClusterCrypto(True, False, False, False)
170
171 result = utils.RunCmd([constants.DAEMON_UTIL, "start", constants.NODED])
172 if result.failed:
173 raise errors.OpExecError("Could not start the node daemon, command %s"
174 " had exitcode %s and error %s" %
175 (result.cmd, result.exit_code, result.output))
176
177 _WaitForNodeDaemon(master_name)
178
179
181 """Wait for node daemon to become responsive.
182
183 """
184 def _CheckNodeDaemon():
185 result = rpc.RpcRunner.call_version([node_name])[node_name]
186 if result.fail_msg:
187 raise utils.RetryAgain()
188
189 try:
190 utils.Retry(_CheckNodeDaemon, 1.0, _DAEMON_READY_TIMEOUT)
191 except utils.RetryTimeout:
192 raise errors.OpExecError("Node daemon on %s didn't answer queries within"
193 " %s seconds" % (node_name, _DAEMON_READY_TIMEOUT))
194
195
208
209 try:
210 utils.Retry(_CheckMasterDaemon, 1.0, _DAEMON_READY_TIMEOUT)
211 except utils.RetryTimeout:
212 raise errors.OpExecError("Master daemon didn't answer queries within"
213 " %s seconds" % _DAEMON_READY_TIMEOUT)
214
215
217 """Initialize if needed the file storage.
218
219 @param file_storage_dir: the user-supplied value
220 @return: either empty string (if file storage was disabled at build
221 time) or the normalized path to the storage directory
222
223 """
224 file_storage_dir = os.path.normpath(file_storage_dir)
225
226 if not os.path.isabs(file_storage_dir):
227 raise errors.OpPrereqError("File storage directory '%s' is not an absolute"
228 " path" % file_storage_dir, errors.ECODE_INVAL)
229
230 if not os.path.exists(file_storage_dir):
231 try:
232 os.makedirs(file_storage_dir, 0750)
233 except OSError, err:
234 raise errors.OpPrereqError("Cannot create file storage directory"
235 " '%s': %s" % (file_storage_dir, err),
236 errors.ECODE_ENVIRON)
237
238 if not os.path.isdir(file_storage_dir):
239 raise errors.OpPrereqError("The file storage directory '%s' is not"
240 " a directory." % file_storage_dir,
241 errors.ECODE_ENVIRON)
242 return file_storage_dir
243
244
245 -def InitCluster(cluster_name, mac_prefix,
246 master_netdev, file_storage_dir, shared_file_storage_dir,
247 candidate_pool_size, secondary_ip=None, vg_name=None,
248 beparams=None, nicparams=None, ndparams=None, hvparams=None,
249 enabled_hypervisors=None, modify_etc_hosts=True,
250 modify_ssh_setup=True, maintain_node_health=False,
251 drbd_helper=None, uid_pool=None, default_iallocator=None,
252 primary_ip_version=None, prealloc_wipe_disks=False):
253 """Initialise the cluster.
254
255 @type candidate_pool_size: int
256 @param candidate_pool_size: master candidate pool size
257
258 """
259
260 if config.ConfigWriter.IsCluster():
261 raise errors.OpPrereqError("Cluster is already initialised",
262 errors.ECODE_STATE)
263
264 if not enabled_hypervisors:
265 raise errors.OpPrereqError("Enabled hypervisors list must contain at"
266 " least one member", errors.ECODE_INVAL)
267 invalid_hvs = set(enabled_hypervisors) - constants.HYPER_TYPES
268 if invalid_hvs:
269 raise errors.OpPrereqError("Enabled hypervisors contains invalid"
270 " entries: %s" % invalid_hvs,
271 errors.ECODE_INVAL)
272
273 ipcls = None
274 if primary_ip_version == constants.IP4_VERSION:
275 ipcls = netutils.IP4Address
276 elif primary_ip_version == constants.IP6_VERSION:
277 ipcls = netutils.IP6Address
278 else:
279 raise errors.OpPrereqError("Invalid primary ip version: %d." %
280 primary_ip_version)
281
282 hostname = netutils.GetHostname(family=ipcls.family)
283 if not ipcls.IsValid(hostname.ip):
284 raise errors.OpPrereqError("This host's IP (%s) is not a valid IPv%d"
285 " address." % (hostname.ip, primary_ip_version))
286
287 if ipcls.IsLoopback(hostname.ip):
288 raise errors.OpPrereqError("This host's IP (%s) resolves to a loopback"
289 " address. Please fix DNS or %s." %
290 (hostname.ip, constants.ETC_HOSTS),
291 errors.ECODE_ENVIRON)
292
293 if not ipcls.Own(hostname.ip):
294 raise errors.OpPrereqError("Inconsistency: this host's name resolves"
295 " to %s,\nbut this ip address does not"
296 " belong to this host" %
297 hostname.ip, errors.ECODE_ENVIRON)
298
299 clustername = netutils.GetHostname(name=cluster_name, family=ipcls.family)
300
301 if netutils.TcpPing(clustername.ip, constants.DEFAULT_NODED_PORT, timeout=5):
302 raise errors.OpPrereqError("Cluster IP already active",
303 errors.ECODE_NOTUNIQUE)
304
305 if not secondary_ip:
306 if primary_ip_version == constants.IP6_VERSION:
307 raise errors.OpPrereqError("When using a IPv6 primary address, a valid"
308 " IPv4 address must be given as secondary",
309 errors.ECODE_INVAL)
310 secondary_ip = hostname.ip
311
312 if not netutils.IP4Address.IsValid(secondary_ip):
313 raise errors.OpPrereqError("Secondary IP address (%s) has to be a valid"
314 " IPv4 address." % secondary_ip,
315 errors.ECODE_INVAL)
316
317 if not netutils.IP4Address.Own(secondary_ip):
318 raise errors.OpPrereqError("You gave %s as secondary IP,"
319 " but it does not belong to this host." %
320 secondary_ip, errors.ECODE_ENVIRON)
321
322 if vg_name is not None:
323
324 vgstatus = utils.CheckVolumeGroupSize(utils.ListVolumeGroups(), vg_name,
325 constants.MIN_VG_SIZE)
326 if vgstatus:
327 raise errors.OpPrereqError("Error: %s\nspecify --no-lvm-storage if"
328 " you are not using lvm" % vgstatus,
329 errors.ECODE_INVAL)
330
331 if drbd_helper is not None:
332 try:
333 curr_helper = bdev.BaseDRBD.GetUsermodeHelper()
334 except errors.BlockDeviceError, err:
335 raise errors.OpPrereqError("Error while checking drbd helper"
336 " (specify --no-drbd-storage if you are not"
337 " using drbd): %s" % str(err),
338 errors.ECODE_ENVIRON)
339 if drbd_helper != curr_helper:
340 raise errors.OpPrereqError("Error: requiring %s as drbd helper but %s"
341 " is the current helper" % (drbd_helper,
342 curr_helper),
343 errors.ECODE_INVAL)
344
345 if constants.ENABLE_FILE_STORAGE:
346 file_storage_dir = _InitFileStorage(file_storage_dir)
347 else:
348 file_storage_dir = ""
349
350 if constants.ENABLE_SHARED_FILE_STORAGE:
351 shared_file_storage_dir = _InitFileStorage(shared_file_storage_dir)
352 else:
353 shared_file_storage_dir = ""
354
355 if not re.match("^[0-9a-z]{2}:[0-9a-z]{2}:[0-9a-z]{2}$", mac_prefix):
356 raise errors.OpPrereqError("Invalid mac prefix given '%s'" % mac_prefix,
357 errors.ECODE_INVAL)
358
359 result = utils.RunCmd(["ip", "link", "show", "dev", master_netdev])
360 if result.failed:
361 raise errors.OpPrereqError("Invalid master netdev given (%s): '%s'" %
362 (master_netdev,
363 result.output.strip()), errors.ECODE_INVAL)
364
365 dirs = [(constants.RUN_GANETI_DIR, constants.RUN_DIRS_MODE)]
366 utils.EnsureDirs(dirs)
367
368 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
369 utils.ForceDictType(nicparams, constants.NICS_PARAMETER_TYPES)
370 objects.NIC.CheckParameterSyntax(nicparams)
371
372 if ndparams is not None:
373 utils.ForceDictType(ndparams, constants.NDS_PARAMETER_TYPES)
374 else:
375 ndparams = dict(constants.NDC_DEFAULTS)
376
377
378 for hv_name, hv_params in hvparams.iteritems():
379 utils.ForceDictType(hv_params, constants.HVS_PARAMETER_TYPES)
380 hv_class = hypervisor.GetHypervisor(hv_name)
381 hv_class.CheckParameterSyntax(hv_params)
382
383
384 sshline = utils.ReadFile(constants.SSH_HOST_RSA_PUB)
385 sshkey = sshline.split(" ")[1]
386
387 if modify_etc_hosts:
388 utils.AddHostToEtcHosts(hostname.name, hostname.ip)
389
390 if modify_ssh_setup:
391 _InitSSHSetup()
392
393 if default_iallocator is not None:
394 alloc_script = utils.FindFile(default_iallocator,
395 constants.IALLOCATOR_SEARCH_PATH,
396 os.path.isfile)
397 if alloc_script is None:
398 raise errors.OpPrereqError("Invalid default iallocator script '%s'"
399 " specified" % default_iallocator,
400 errors.ECODE_INVAL)
401 elif constants.HTOOLS:
402
403 if utils.FindFile(constants.IALLOC_HAIL,
404 constants.IALLOCATOR_SEARCH_PATH,
405 os.path.isfile):
406 default_iallocator = constants.IALLOC_HAIL
407
408 now = time.time()
409
410
411 cluster_config = objects.Cluster(
412 serial_no=1,
413 rsahostkeypub=sshkey,
414 highest_used_port=(constants.FIRST_DRBD_PORT - 1),
415 mac_prefix=mac_prefix,
416 volume_group_name=vg_name,
417 tcpudp_port_pool=set(),
418 master_node=hostname.name,
419 master_ip=clustername.ip,
420 master_netdev=master_netdev,
421 cluster_name=clustername.name,
422 file_storage_dir=file_storage_dir,
423 shared_file_storage_dir=shared_file_storage_dir,
424 enabled_hypervisors=enabled_hypervisors,
425 beparams={constants.PP_DEFAULT: beparams},
426 nicparams={constants.PP_DEFAULT: nicparams},
427 ndparams=ndparams,
428 hvparams=hvparams,
429 candidate_pool_size=candidate_pool_size,
430 modify_etc_hosts=modify_etc_hosts,
431 modify_ssh_setup=modify_ssh_setup,
432 uid_pool=uid_pool,
433 ctime=now,
434 mtime=now,
435 maintain_node_health=maintain_node_health,
436 drbd_usermode_helper=drbd_helper,
437 default_iallocator=default_iallocator,
438 primary_ip_family=ipcls.family,
439 prealloc_wipe_disks=prealloc_wipe_disks,
440 )
441 master_node_config = objects.Node(name=hostname.name,
442 primary_ip=hostname.ip,
443 secondary_ip=secondary_ip,
444 serial_no=1,
445 master_candidate=True,
446 offline=False, drained=False,
447 ctime=now, mtime=now,
448 )
449 InitConfig(constants.CONFIG_VERSION, cluster_config, master_node_config)
450 cfg = config.ConfigWriter(offline=True)
451 ssh.WriteKnownHostsFile(cfg, constants.SSH_KNOWN_HOSTS_FILE)
452 cfg.Update(cfg.GetClusterInfo(), logging.error)
453 backend.WriteSsconfFiles(cfg.GetSsconfValues())
454
455
456 _InitGanetiServerSetup(hostname.name)
457
458 logging.debug("Starting daemons")
459 result = utils.RunCmd([constants.DAEMON_UTIL, "start-all"])
460 if result.failed:
461 raise errors.OpExecError("Could not start daemons, command %s"
462 " had exitcode %s and error %s" %
463 (result.cmd, result.exit_code, result.output))
464
465 _WaitForMasterDaemon()
466
467
470 """Create the initial cluster configuration.
471
472 It will contain the current node, which will also be the master
473 node, and no instances.
474
475 @type version: int
476 @param version: configuration version
477 @type cluster_config: L{objects.Cluster}
478 @param cluster_config: cluster configuration
479 @type master_node_config: L{objects.Node}
480 @param master_node_config: master node configuration
481 @type cfg_file: string
482 @param cfg_file: configuration file path
483
484 """
485 uuid_generator = config.TemporaryReservationManager()
486 cluster_config.uuid = uuid_generator.Generate([], utils.NewUUID,
487 _INITCONF_ECID)
488 master_node_config.uuid = uuid_generator.Generate([], utils.NewUUID,
489 _INITCONF_ECID)
490 nodes = {
491 master_node_config.name: master_node_config,
492 }
493 default_nodegroup = objects.NodeGroup(
494 uuid=uuid_generator.Generate([], utils.NewUUID, _INITCONF_ECID),
495 name=constants.INITIAL_NODE_GROUP_NAME,
496 members=[master_node_config.name],
497 )
498 nodegroups = {
499 default_nodegroup.uuid: default_nodegroup,
500 }
501 now = time.time()
502 config_data = objects.ConfigData(version=version,
503 cluster=cluster_config,
504 nodegroups=nodegroups,
505 nodes=nodes,
506 instances={},
507 serial_no=1,
508 ctime=now, mtime=now)
509 utils.WriteFile(cfg_file,
510 data=serializer.Dump(config_data.ToDict()),
511 mode=0600)
512
513
515 """Execute the last steps of cluster destroy
516
517 This function shuts down all the daemons, completing the destroy
518 begun in cmdlib.LUDestroyOpcode.
519
520 """
521 cfg = config.ConfigWriter()
522 modify_ssh_setup = cfg.GetClusterInfo().modify_ssh_setup
523 result = rpc.RpcRunner.call_node_stop_master(master, True)
524 msg = result.fail_msg
525 if msg:
526 logging.warning("Could not disable the master role: %s", msg)
527 result = rpc.RpcRunner.call_node_leave_cluster(master, modify_ssh_setup)
528 msg = result.fail_msg
529 if msg:
530 logging.warning("Could not shutdown the node daemon and cleanup"
531 " the node: %s", msg)
532
533
535 """Add a node to the cluster.
536
537 This function must be called before the actual opcode, and will ssh
538 to the remote node, copy the needed files, and start ganeti-noded,
539 allowing the master to do the rest via normal rpc calls.
540
541 @param cluster_name: the cluster name
542 @param node: the name of the new node
543 @param ssh_key_check: whether to do a strict key check
544
545 """
546 family = ssconf.SimpleStore().GetPrimaryIPFamily()
547 sshrunner = ssh.SshRunner(cluster_name,
548 ipv6=(family == netutils.IP6Address.family))
549
550 bind_address = constants.IP4_ADDRESS_ANY
551 if family == netutils.IP6Address.family:
552 bind_address = constants.IP6_ADDRESS_ANY
553
554
555
556
557
558 sshrunner.CopyFileToNode(node, constants.NODED_CERT_FILE)
559 sshrunner.CopyFileToNode(node, constants.RAPI_CERT_FILE)
560 sshrunner.CopyFileToNode(node, constants.CONFD_HMAC_KEY)
561 mycommand = ("%s stop-all; %s start %s -b %s" %
562 (constants.DAEMON_UTIL, constants.DAEMON_UTIL, constants.NODED,
563 utils.ShellQuote(bind_address)))
564
565 result = sshrunner.Run(node, 'root', mycommand, batch=False,
566 ask_key=ssh_key_check,
567 use_cluster_key=True,
568 strict_host_check=ssh_key_check)
569 if result.failed:
570 raise errors.OpExecError("Remote command on node %s, error: %s,"
571 " output: %s" %
572 (node, result.fail_reason, result.output))
573
574 _WaitForNodeDaemon(node)
575
576
578 """Failover the master node.
579
580 This checks that we are not already the master, and will cause the
581 current master to cease being master, and the non-master to become
582 new master.
583
584 @type no_voting: boolean
585 @param no_voting: force the operation without remote nodes agreement
586 (dangerous)
587
588 """
589 sstore = ssconf.SimpleStore()
590
591 old_master, new_master = ssconf.GetMasterAndMyself(sstore)
592 node_list = sstore.GetNodeList()
593 mc_list = sstore.GetMasterCandidates()
594
595 if old_master == new_master:
596 raise errors.OpPrereqError("This commands must be run on the node"
597 " where you want the new master to be."
598 " %s is already the master" %
599 old_master, errors.ECODE_INVAL)
600
601 if new_master not in mc_list:
602 mc_no_master = [name for name in mc_list if name != old_master]
603 raise errors.OpPrereqError("This node is not among the nodes marked"
604 " as master candidates. Only these nodes"
605 " can become masters. Current list of"
606 " master candidates is:\n"
607 "%s" % ('\n'.join(mc_no_master)),
608 errors.ECODE_STATE)
609
610 if not no_voting:
611 vote_list = GatherMasterVotes(node_list)
612
613 if vote_list:
614 voted_master = vote_list[0][0]
615 if voted_master is None:
616 raise errors.OpPrereqError("Cluster is inconsistent, most nodes did"
617 " not respond.", errors.ECODE_ENVIRON)
618 elif voted_master != old_master:
619 raise errors.OpPrereqError("I have a wrong configuration, I believe"
620 " the master is %s but the other nodes"
621 " voted %s. Please resync the configuration"
622 " of this node." %
623 (old_master, voted_master),
624 errors.ECODE_STATE)
625
626
627 rcode = 0
628
629 logging.info("Setting master to %s, old master: %s", new_master, old_master)
630
631 try:
632
633
634 cfg = config.ConfigWriter(accept_foreign=True)
635
636 cluster_info = cfg.GetClusterInfo()
637 cluster_info.master_node = new_master
638
639
640 cfg.Update(cluster_info, logging.error)
641 except errors.ConfigurationError, err:
642 logging.error("Error while trying to set the new master: %s",
643 str(err))
644 return 1
645
646
647
648
649
650
651 logging.info("Stopping the master daemon on node %s", old_master)
652
653 result = rpc.RpcRunner.call_node_stop_master(old_master, True)
654 msg = result.fail_msg
655 if msg:
656 logging.error("Could not disable the master role on the old master"
657 " %s, please disable manually: %s", old_master, msg)
658
659 logging.info("Checking master IP non-reachability...")
660
661 master_ip = sstore.GetMasterIP()
662 total_timeout = 30
663
664
665 def _check_ip():
666 if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
667 raise utils.RetryAgain()
668
669 try:
670 utils.Retry(_check_ip, (1, 1.5, 5), total_timeout)
671 except utils.RetryTimeout:
672 logging.warning("The master IP is still reachable after %s seconds,"
673 " continuing but activating the master on the current"
674 " node will probably fail", total_timeout)
675
676 if jstore.CheckDrainFlag():
677 logging.info("Undraining job queue")
678 jstore.SetDrainFlag(False)
679
680 logging.info("Starting the master daemons on the new master")
681
682 result = rpc.RpcRunner.call_node_start_master(new_master, True, no_voting)
683 msg = result.fail_msg
684 if msg:
685 logging.error("Could not start the master role on the new master"
686 " %s, please check: %s", new_master, msg)
687 rcode = 1
688
689 logging.info("Master failed over from %s to %s", old_master, new_master)
690 return rcode
691
692
694 """Returns the current master node.
695
696 This is a separate function in bootstrap since it's needed by
697 gnt-cluster, and instead of importing directly ssconf, it's better
698 to abstract it in bootstrap, where we do use ssconf in other
699 functions too.
700
701 """
702 sstore = ssconf.SimpleStore()
703
704 old_master, _ = ssconf.GetMasterAndMyself(sstore)
705
706 return old_master
707
708
710 """Check the agreement on who is the master.
711
712 This function will return a list of (node, number of votes), ordered
713 by the number of votes. Errors will be denoted by the key 'None'.
714
715 Note that the sum of votes is the number of nodes this machine
716 knows, whereas the number of entries in the list could be different
717 (if some nodes vote for another master).
718
719 We remove ourselves from the list since we know that (bugs aside)
720 since we use the same source for configuration information for both
721 backend and boostrap, we'll always vote for ourselves.
722
723 @type node_list: list
724 @param node_list: the list of nodes to query for master info; the current
725 node will be removed if it is in the list
726 @rtype: list
727 @return: list of (node, votes)
728
729 """
730 myself = netutils.Hostname.GetSysName()
731 try:
732 node_list.remove(myself)
733 except ValueError:
734 pass
735 if not node_list:
736
737 return []
738 results = rpc.RpcRunner.call_master_info(node_list)
739 if not isinstance(results, dict):
740
741 logging.critical("Can't complete rpc call, aborting master startup")
742 return [(None, len(node_list))]
743 votes = {}
744 for node in results:
745 nres = results[node]
746 data = nres.payload
747 msg = nres.fail_msg
748 fail = False
749 if msg:
750 logging.warning("Error contacting node %s: %s", node, msg)
751 fail = True
752
753 elif not isinstance(data, (tuple, list)) or len(data) < 3:
754 logging.warning("Invalid data received from node %s: %s", node, data)
755 fail = True
756 if fail:
757 if None not in votes:
758 votes[None] = 0
759 votes[None] += 1
760 continue
761 master_node = data[2]
762 if master_node not in votes:
763 votes[master_node] = 0
764 votes[master_node] += 1
765
766 vote_list = [v for v in votes.items()]
767
768
769
770 vote_list.sort(key=lambda x: (x[1], x[0]), reverse=True)
771
772 return vote_list
773