1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Common functions used by multiple logical units."""
32
33 import copy
34 import math
35 import os
36 import urllib2
37
38 from ganeti import compat
39 from ganeti import constants
40 from ganeti import errors
41 from ganeti import hypervisor
42 from ganeti import locking
43 from ganeti import objects
44 from ganeti import opcodes
45 from ganeti import pathutils
46 import ganeti.rpc.node as rpc
47 from ganeti.serializer import Private
48 from ganeti import ssconf
49 from ganeti import utils
50
51
52
53 INSTANCE_DOWN = [constants.ADMINST_DOWN]
54 INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
55 INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
56
57
59 """Expand an item name.
60
61 @param expand_fn: the function to use for expansion
62 @param name: requested item name
63 @param kind: text description ('Node' or 'Instance')
64 @return: the result of the expand_fn, if successful
65 @raise errors.OpPrereqError: if the item is not found
66
67 """
68 (uuid, full_name) = expand_fn(name)
69 if uuid is None or full_name is None:
70 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
71 errors.ECODE_NOENT)
72 return (uuid, full_name)
73
74
76 """Wrapper over L{_ExpandItemName} for instance."""
77 (uuid, full_name) = _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
78 if expected_uuid is not None and uuid != expected_uuid:
79 raise errors.OpPrereqError(
80 "The instances UUID '%s' does not match the expected UUID '%s' for"
81 " instance '%s'. Maybe the instance changed since you submitted this"
82 " job." % (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
83 return (uuid, full_name)
84
85
87 """Expand a short node name into the node UUID and full name.
88
89 @type cfg: L{config.ConfigWriter}
90 @param cfg: The cluster configuration
91 @type expected_uuid: string
92 @param expected_uuid: expected UUID for the node (or None if there is no
93 expectation). If it does not match, a L{errors.OpPrereqError} is
94 raised.
95 @type name: string
96 @param name: the short node name
97
98 """
99 (uuid, full_name) = _ExpandItemName(cfg.ExpandNodeName, name, "Node")
100 if expected_uuid is not None and uuid != expected_uuid:
101 raise errors.OpPrereqError(
102 "The nodes UUID '%s' does not match the expected UUID '%s' for node"
103 " '%s'. Maybe the node changed since you submitted this job." %
104 (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
105 return (uuid, full_name)
106
107
109 """Returns a dict declaring all lock levels shared.
110
111 """
112 return dict.fromkeys(locking.LEVELS, 1)
113
114
116 """Checks if the instances in a node group are still correct.
117
118 @type cfg: L{config.ConfigWriter}
119 @param cfg: The cluster configuration
120 @type group_uuid: string
121 @param group_uuid: Node group UUID
122 @type owned_instance_names: set or frozenset
123 @param owned_instance_names: List of currently owned instances
124
125 """
126 wanted_instances = frozenset(cfg.GetInstanceNames(
127 cfg.GetNodeGroupInstances(group_uuid)))
128 if owned_instance_names != wanted_instances:
129 raise errors.OpPrereqError("Instances in node group '%s' changed since"
130 " locks were acquired, wanted '%s', have '%s';"
131 " retry the operation" %
132 (group_uuid,
133 utils.CommaJoin(wanted_instances),
134 utils.CommaJoin(owned_instance_names)),
135 errors.ECODE_STATE)
136
137 return wanted_instances
138
139
141 """Returns list of checked and expanded node names.
142
143 @type lu: L{LogicalUnit}
144 @param lu: the logical unit on whose behalf we execute
145 @type short_node_names: list
146 @param short_node_names: list of node names or None for all nodes
147 @rtype: tuple of lists
148 @return: tupe with (list of node UUIDs, list of node names)
149 @raise errors.ProgrammerError: if the nodes parameter is wrong type
150
151 """
152 if short_node_names:
153 node_uuids = [ExpandNodeUuidAndName(lu.cfg, None, name)[0]
154 for name in short_node_names]
155 else:
156 node_uuids = lu.cfg.GetNodeList()
157
158 return (node_uuids, [lu.cfg.GetNodeName(uuid) for uuid in node_uuids])
159
160
162 """Returns list of checked and expanded instance names.
163
164 @type lu: L{LogicalUnit}
165 @param lu: the logical unit on whose behalf we execute
166 @type short_inst_names: list
167 @param short_inst_names: list of instance names or None for all instances
168 @rtype: tuple of lists
169 @return: tuple of (instance UUIDs, instance names)
170 @raise errors.OpPrereqError: if the instances parameter is wrong type
171 @raise errors.OpPrereqError: if any of the passed instances is not found
172
173 """
174 if short_inst_names:
175 inst_uuids = [ExpandInstanceUuidAndName(lu.cfg, None, name)[0]
176 for name in short_inst_names]
177 else:
178 inst_uuids = lu.cfg.GetInstanceList()
179 return (inst_uuids, [lu.cfg.GetInstanceName(uuid) for uuid in inst_uuids])
180
181
182 -def RunPostHook(lu, node_uuid):
183 """Runs the post-hook for an opcode on a single node.
184
185 """
186 hm = lu.proc.BuildHooksManager(lu)
187 try:
188
189 hm.RunPhase(constants.HOOKS_PHASE_POST, node_uuids=[node_uuid])
190 hm.RunPhase(constants.HOOKS_PHASE_POST, [node_uuid], is_global=True,
191 post_status=constants.POST_HOOKS_STATUS_SUCCESS)
192 except Exception, err:
193 lu.LogWarning("Errors occurred running hooks on %s: %s",
194 node_uuid, err)
195
196
198 """Distribute additional files which are part of the cluster configuration.
199
200 ConfigWriter takes care of distributing the config and ssconf files, but
201 there are more files which should be distributed to all nodes. This function
202 makes sure those are copied.
203
204 """
205
206 cluster = lu.cfg.GetClusterInfo()
207 master_info = lu.cfg.GetMasterNodeInfo()
208
209 online_node_uuids = lu.cfg.GetOnlineNodeList()
210 online_node_uuid_set = frozenset(online_node_uuids)
211 vm_node_uuids = list(online_node_uuid_set.intersection(
212 lu.cfg.GetVmCapableNodeList()))
213
214
215 for node_uuids in [online_node_uuids, vm_node_uuids]:
216 if master_info.uuid in node_uuids:
217 node_uuids.remove(master_info.uuid)
218
219
220 (files_all, _, files_mc, files_vm) = \
221 ComputeAncillaryFiles(cluster, True)
222
223
224 assert not (pathutils.CLUSTER_CONF_FILE in files_all or
225 pathutils.CLUSTER_CONF_FILE in files_vm)
226 assert not files_mc, "Master candidates not handled in this function"
227
228 filemap = [
229 (online_node_uuids, files_all),
230 (vm_node_uuids, files_vm),
231 ]
232
233
234 for (node_uuids, files) in filemap:
235 for fname in files:
236 UploadHelper(lu, node_uuids, fname)
237
238
240 """Compute files external to Ganeti which need to be consistent.
241
242 @type redist: boolean
243 @param redist: Whether to include files which need to be redistributed
244
245 """
246
247 files_all = set([
248 pathutils.SSH_KNOWN_HOSTS_FILE,
249 pathutils.CONFD_HMAC_KEY,
250 pathutils.CLUSTER_DOMAIN_SECRET_FILE,
251 pathutils.SPICE_CERT_FILE,
252 pathutils.SPICE_CACERT_FILE,
253 pathutils.RAPI_USERS_FILE,
254 ])
255
256 if redist:
257
258 files_all.add(pathutils.RAPI_CERT_FILE)
259 else:
260 files_all.update(pathutils.ALL_CERT_FILES)
261 files_all.update(ssconf.SimpleStore().GetFileList())
262
263 if cluster.modify_etc_hosts:
264 files_all.add(pathutils.ETC_HOSTS)
265
266 if cluster.use_external_mip_script:
267 files_all.add(pathutils.EXTERNAL_MASTER_SETUP_SCRIPT)
268
269
270
271
272 files_opt = set([
273 pathutils.RAPI_USERS_FILE,
274 ])
275
276
277 files_mc = set()
278
279 if not redist:
280 files_mc.add(pathutils.CLUSTER_CONF_FILE)
281
282
283 if (not redist and (cluster.IsFileStorageEnabled() or
284 cluster.IsSharedFileStorageEnabled())):
285 files_all.add(pathutils.FILE_STORAGE_PATHS_FILE)
286 files_opt.add(pathutils.FILE_STORAGE_PATHS_FILE)
287
288
289 files_vm = set(
290 filename
291 for hv_name in cluster.enabled_hypervisors
292 for filename in
293 hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[0])
294
295 files_opt |= set(
296 filename
297 for hv_name in cluster.enabled_hypervisors
298 for filename in
299 hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[1])
300
301
302 all_files_set = files_all | files_mc | files_vm
303 assert (len(all_files_set) ==
304 sum(map(len, [files_all, files_mc, files_vm]))), \
305 "Found file listed in more than one file list"
306
307
308 assert all_files_set.issuperset(files_opt), \
309 "Optional file not in a different required list"
310
311
312 assert not (redist and
313 pathutils.FILE_STORAGE_PATHS_FILE in all_files_set)
314
315 return (files_all, files_opt, files_mc, files_vm)
316
317
319 """Helper for uploading a file and showing warnings.
320
321 """
322 if os.path.exists(fname):
323 result = lu.rpc.call_upload_file(node_uuids, fname)
324 for to_node_uuids, to_result in result.items():
325 msg = to_result.fail_msg
326 if msg:
327 msg = ("Copy of file %s to node %s failed: %s" %
328 (fname, lu.cfg.GetNodeName(to_node_uuids), msg))
329 lu.LogWarning(msg)
330
331
333 """Combines the hv state from an opcode with the one of the object
334
335 @param op_input: The input dict from the opcode
336 @param obj_input: The input dict from the objects
337 @return: The verified and updated dict
338
339 """
340 if op_input:
341 invalid_hvs = set(op_input) - constants.HYPER_TYPES
342 if invalid_hvs:
343 raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:"
344 " %s" % utils.CommaJoin(invalid_hvs),
345 errors.ECODE_INVAL)
346 if obj_input is None:
347 obj_input = {}
348 type_check = constants.HVSTS_PARAMETER_TYPES
349 return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
350
351 return None
352
353
355 """Combines the disk state from an opcode with the one of the object
356
357 @param op_input: The input dict from the opcode
358 @param obj_input: The input dict from the objects
359 @return: The verified and updated dict
360 """
361 if op_input:
362 invalid_dst = set(op_input) - constants.DS_VALID_TYPES
363 if invalid_dst:
364 raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" %
365 utils.CommaJoin(invalid_dst),
366 errors.ECODE_INVAL)
367 type_check = constants.DSS_PARAMETER_TYPES
368 if obj_input is None:
369 obj_input = {}
370 return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value,
371 type_check))
372 for key, value in op_input.items())
373
374 return None
375
376
377 -def CheckOSParams(lu, required, node_uuids, osname, osparams, force_variant):
378 """OS parameters validation.
379
380 @type lu: L{LogicalUnit}
381 @param lu: the logical unit for which we check
382 @type required: boolean
383 @param required: whether the validation should fail if the OS is not
384 found
385 @type node_uuids: list
386 @param node_uuids: the list of nodes on which we should check
387 @type osname: string
388 @param osname: the name of the OS we should use
389 @type osparams: dict
390 @param osparams: the parameters which we need to check
391 @raise errors.OpPrereqError: if the parameters are not valid
392
393 """
394 node_uuids = _FilterVmNodes(lu, node_uuids)
395
396
397 for key in osparams:
398 if isinstance(osparams[key], Private):
399 osparams[key] = osparams[key].Get()
400
401 if osname:
402 result = lu.rpc.call_os_validate(node_uuids, required, osname,
403 [constants.OS_VALIDATE_PARAMETERS],
404 osparams, force_variant)
405 for node_uuid, nres in result.items():
406
407
408 nres.Raise("OS Parameters validation failed on node %s" %
409 lu.cfg.GetNodeName(node_uuid))
410 if not nres.payload:
411 lu.LogInfo("OS %s not found on node %s, validation skipped",
412 osname, lu.cfg.GetNodeName(node_uuid))
413
414
416 """Checks if a given image description is either a valid file path or a URL.
417
418 @type image: string
419 @param image: An absolute path or URL, the assumed location of a disk image.
420 @type error_message: string
421 @param error_message: The error message to show if the image is not valid.
422
423 @raise errors.OpPrereqError: If the validation fails.
424
425 """
426 if image is not None and not (utils.IsUrl(image) or os.path.isabs(image)):
427 raise errors.OpPrereqError(error_message)
428
429
431 """Checks if the OS image in the OS parameters of an opcode is
432 valid.
433
434 This function can also be used in LUs as they carry an opcode.
435
436 @type op: L{opcodes.OpCode}
437 @param op: opcode containing the OS params
438
439 @rtype: string or NoneType
440 @return:
441 None if the OS parameters in the opcode do not contain the OS
442 image, otherwise the OS image value contained in the OS parameters
443 @raise errors.OpPrereqError: if OS image is not a URL or an absolute path
444
445 """
446 os_image = objects.GetOSImage(op.osparams)
447 CheckImageValidity(os_image, "OS image must be an absolute path or a URL")
448 return os_image
449
450
452 """Hypervisor parameter validation.
453
454 This function abstracts the hypervisor parameter validation to be
455 used in both instance create and instance modify.
456
457 @type lu: L{LogicalUnit}
458 @param lu: the logical unit for which we check
459 @type node_uuids: list
460 @param node_uuids: the list of nodes on which we should check
461 @type hvname: string
462 @param hvname: the name of the hypervisor we should use
463 @type hvparams: dict
464 @param hvparams: the parameters which we need to check
465 @raise errors.OpPrereqError: if the parameters are not valid
466
467 """
468 node_uuids = _FilterVmNodes(lu, node_uuids)
469
470 cluster = lu.cfg.GetClusterInfo()
471 hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
472
473 hvinfo = lu.rpc.call_hypervisor_validate_params(node_uuids, hvname, hvfull)
474 for node_uuid in node_uuids:
475 info = hvinfo[node_uuid]
476 if info.offline:
477 continue
478 info.Raise("Hypervisor parameter validation failed on node %s" %
479 lu.cfg.GetNodeName(node_uuid))
480
481
484 ssh_result = lu.rpc.call_node_ssh_key_add(
485 [master_node], node.uuid, node.name,
486 potential_master_candidates,
487 True,
488 True,
489 False,
490 lu.op.debug,
491 lu.op.verbose)
492 ssh_result[master_node].Raise(
493 "Could not update the SSH setup of node '%s' after promotion"
494 " (UUID: %s)." % (node.name, node.uuid))
495 WarnAboutFailedSshUpdates(ssh_result, master_node, feedback_fn)
496
497
498 -def AdjustCandidatePool(
499 lu, exceptions, master_node, potential_master_candidates, feedback_fn,
500 modify_ssh_setup):
501 """Adjust the candidate pool after node operations.
502
503 @type master_node: string
504 @param master_node: name of the master node
505 @type potential_master_candidates: list of string
506 @param potential_master_candidates: list of node names of potential master
507 candidates
508 @type feedback_fn: function
509 @param feedback_fn: function emitting user-visible output
510 @type modify_ssh_setup: boolean
511 @param modify_ssh_setup: whether or not the ssh setup can be modified.
512
513 """
514 mod_list = lu.cfg.MaintainCandidatePool(exceptions)
515 if mod_list:
516 lu.LogInfo("Promoted nodes to master candidate role: %s",
517 utils.CommaJoin(node.name for node in mod_list))
518 for node in mod_list:
519 AddNodeCertToCandidateCerts(lu, lu.cfg, node.uuid)
520 if modify_ssh_setup:
521 AddMasterCandidateSshKey(
522 lu, master_node, node, potential_master_candidates, feedback_fn)
523
524 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
525 if mc_now > mc_max:
526 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
527 (mc_now, mc_max))
528
529
531 """Check node PVs.
532
533 """
534 pvlist_dict = nresult.get(constants.NV_PVLIST, None)
535 if pvlist_dict is None:
536 return (["Can't get PV list from node"], None)
537 pvlist = map(objects.LvmPvInfo.FromDict, pvlist_dict)
538 errlist = []
539
540
541
542 for pv in pvlist:
543 if ":" in pv.name:
544 errlist.append("Invalid character ':' in PV '%s' of VG '%s'" %
545 (pv.name, pv.vg_name))
546 es_pvinfo = None
547 if exclusive_storage:
548 (errmsgs, es_pvinfo) = utils.LvmExclusiveCheckNodePvs(pvlist)
549 errlist.extend(errmsgs)
550 shared_pvs = nresult.get(constants.NV_EXCLUSIVEPVS, None)
551 if shared_pvs:
552 for (pvname, lvlist) in shared_pvs:
553
554 errlist.append("PV %s is shared among unrelated LVs (%s)" %
555 (pvname, utils.CommaJoin(lvlist)))
556 return (errlist, es_pvinfo)
557
558
560 """Computes if value is in the desired range.
561
562 @param name: name of the parameter for which we perform the check
563 @param qualifier: a qualifier used in the error message (e.g. 'disk/1',
564 not just 'disk')
565 @param ispecs: dictionary containing min and max values
566 @param value: actual value that we want to use
567 @return: None or an error string
568
569 """
570 if value in [None, constants.VALUE_AUTO]:
571 return None
572 max_v = ispecs[constants.ISPECS_MAX].get(name, value)
573 min_v = ispecs[constants.ISPECS_MIN].get(name, value)
574 if value > max_v or min_v > value:
575 if qualifier:
576 fqn = "%s/%s" % (name, qualifier)
577 else:
578 fqn = name
579 return ("%s value %s is not in range [%s, %s]" %
580 (fqn, value, min_v, max_v))
581 return None
582
583
587 """Verifies ipolicy against provided specs.
588
589 @type ipolicy: dict
590 @param ipolicy: The ipolicy
591 @type mem_size: int
592 @param mem_size: The memory size
593 @type cpu_count: int
594 @param cpu_count: Used cpu cores
595 @type disk_count: int
596 @param disk_count: Number of disks used
597 @type nic_count: int
598 @param nic_count: Number of nics used
599 @type disk_sizes: list of ints
600 @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
601 @type spindle_use: int
602 @param spindle_use: The number of spindles this instance uses
603 @type disk_types: list of strings
604 @param disk_types: The disk template of the instance
605 @param _compute_fn: The compute function (unittest only)
606 @return: A list of violations, or an empty list of no violations are found
607
608 """
609 assert disk_count == len(disk_sizes)
610 assert isinstance(disk_types, list)
611 assert disk_count == len(disk_types)
612
613 test_settings = [
614 (constants.ISPEC_MEM_SIZE, "", mem_size),
615 (constants.ISPEC_CPU_COUNT, "", cpu_count),
616 (constants.ISPEC_NIC_COUNT, "", nic_count),
617 (constants.ISPEC_SPINDLE_USE, "", spindle_use),
618 ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
619 for idx, d in enumerate(disk_sizes)]
620
621 allowed_dts = set(ipolicy[constants.IPOLICY_DTS])
622 ret = []
623 if disk_count != 0:
624
625 test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
626 elif constants.DT_DISKLESS not in allowed_dts:
627 ret.append("Disk template %s is not allowed (allowed templates %s)" %
628 (constants.DT_DISKLESS, utils.CommaJoin(allowed_dts)))
629
630 forbidden_dts = set(disk_types) - allowed_dts
631 if forbidden_dts:
632 ret.append("Disk template %s is not allowed (allowed templates: %s)" %
633 (utils.CommaJoin(forbidden_dts), utils.CommaJoin(allowed_dts)))
634
635 min_errs = None
636 for minmax in ipolicy[constants.ISPECS_MINMAX]:
637 errs = filter(None,
638 (_compute_fn(name, qualifier, minmax, value)
639 for (name, qualifier, value) in test_settings))
640 if min_errs is None or len(errs) < len(min_errs):
641 min_errs = errs
642 assert min_errs is not None
643 return ret + min_errs
644
645
648 """Verifies ipolicy against provided disk sizes.
649
650 No other specs except the disk sizes, the number of disks and the disk
651 template are checked.
652
653 @type ipolicy: dict
654 @param ipolicy: The ipolicy
655 @type disk_sizes: list of ints
656 @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
657 @type disks: list of L{Disk}
658 @param disks: The Disk objects of the instance
659 @param _compute_fn: The compute function (unittest only)
660 @return: A list of violations, or an empty list of no violations are found
661
662 """
663 if len(disk_sizes) != len(disks):
664 return [constants.ISPEC_DISK_COUNT]
665 dev_types = [d.dev_type for d in disks]
666 return ComputeIPolicySpecViolation(ipolicy,
667
668 None, None, len(disk_sizes),
669 None, disk_sizes,
670 None,
671 dev_types,
672 _compute_fn=_compute_fn)
673
674
677 """Compute if instance meets the specs of ipolicy.
678
679 @type ipolicy: dict
680 @param ipolicy: The ipolicy to verify against
681 @type instance: L{objects.Instance}
682 @param instance: The instance to verify
683 @type cfg: L{config.ConfigWriter}
684 @param cfg: Cluster configuration
685 @param _compute_fn: The function to verify ipolicy (unittest only)
686 @see: L{ComputeIPolicySpecViolation}
687
688 """
689 ret = []
690 be_full = cfg.GetClusterInfo().FillBE(instance)
691 mem_size = be_full[constants.BE_MAXMEM]
692 cpu_count = be_full[constants.BE_VCPUS]
693 inst_nodes = cfg.GetInstanceNodes(instance.uuid)
694 es_flags = rpc.GetExclusiveStorageForNodes(cfg, inst_nodes)
695 disks = cfg.GetInstanceDisks(instance.uuid)
696 if any(es_flags.values()):
697
698 try:
699 spindle_use = sum([disk.spindles for disk in disks])
700 except TypeError:
701 ret.append("Number of spindles not configured for disks of instance %s"
702 " while exclusive storage is enabled, try running gnt-cluster"
703 " repair-disk-sizes" % instance.name)
704
705 spindle_use = None
706 else:
707 spindle_use = be_full[constants.BE_SPINDLE_USE]
708 disk_count = len(disks)
709 disk_sizes = [disk.size for disk in disks]
710 nic_count = len(instance.nics)
711 disk_types = [d.dev_type for d in disks]
712
713 return ret + _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
714 disk_sizes, spindle_use, disk_types)
715
716
718 """Computes a set of instances who violates given ipolicy.
719
720 @param ipolicy: The ipolicy to verify
721 @type instances: L{objects.Instance}
722 @param instances: List of instances to verify
723 @type cfg: L{config.ConfigWriter}
724 @param cfg: Cluster configuration
725 @return: A frozenset of instance names violating the ipolicy
726
727 """
728 return frozenset([inst.name for inst in instances
729 if ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
730
731
733 """Computes a set of any instances that would violate the new ipolicy.
734
735 @param old_ipolicy: The current (still in-place) ipolicy
736 @param new_ipolicy: The new (to become) ipolicy
737 @param instances: List of instances to verify
738 @type cfg: L{config.ConfigWriter}
739 @param cfg: Cluster configuration
740 @return: A list of instances which violates the new ipolicy but
741 did not before
742
743 """
744 return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) -
745 _ComputeViolatingInstances(old_ipolicy, instances, cfg))
746
747
748 -def GetUpdatedParams(old_params, update_dict,
749 use_default=True, use_none=False):
750 """Return the new version of a parameter dictionary.
751
752 @type old_params: dict
753 @param old_params: old parameters
754 @type update_dict: dict
755 @param update_dict: dict containing new parameter values, or
756 constants.VALUE_DEFAULT to reset the parameter to its default
757 value
758 @param use_default: boolean
759 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
760 values as 'to be deleted' values
761 @param use_none: boolean
762 @type use_none: whether to recognise C{None} values as 'to be
763 deleted' values
764 @rtype: dict
765 @return: the new parameter dictionary
766
767 """
768 params_copy = copy.deepcopy(old_params)
769 for key, val in update_dict.iteritems():
770 if ((use_default and val == constants.VALUE_DEFAULT) or
771 (use_none and val is None)):
772 try:
773 del params_copy[key]
774 except KeyError:
775 pass
776 else:
777 params_copy[key] = val
778 return params_copy
779
780
782 """Return the new version of an instance policy.
783
784 @param group_policy: whether this policy applies to a group and thus
785 we should support removal of policy entries
786
787 """
788 ipolicy = copy.deepcopy(old_ipolicy)
789 for key, value in new_ipolicy.items():
790 if key not in constants.IPOLICY_ALL_KEYS:
791 raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
792 errors.ECODE_INVAL)
793 if (not value or value == [constants.VALUE_DEFAULT] or
794 value == constants.VALUE_DEFAULT):
795 if group_policy:
796 if key in ipolicy:
797 del ipolicy[key]
798 else:
799 raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
800 " on the cluster'" % key,
801 errors.ECODE_INVAL)
802 else:
803 if key in constants.IPOLICY_PARAMETERS:
804
805 try:
806 ipolicy[key] = float(value)
807 except (TypeError, ValueError), err:
808 raise errors.OpPrereqError("Invalid value for attribute"
809 " '%s': '%s', error: %s" %
810 (key, value, err), errors.ECODE_INVAL)
811 elif key == constants.ISPECS_MINMAX:
812 for minmax in value:
813 for k in minmax.keys():
814 utils.ForceDictType(minmax[k], constants.ISPECS_PARAMETER_TYPES)
815 ipolicy[key] = value
816 elif key == constants.ISPECS_STD:
817 if group_policy:
818 msg = "%s cannot appear in group instance specs" % key
819 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
820 ipolicy[key] = GetUpdatedParams(old_ipolicy.get(key, {}), value,
821 use_none=False, use_default=False)
822 utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
823 else:
824
825
826 ipolicy[key] = list(value)
827 try:
828 objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
829 except errors.ConfigurationError, err:
830 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
831 errors.ECODE_INVAL)
832 return ipolicy
833
834
836 """Little helper wrapper to the rpc annotation method.
837
838 @param instance: The instance object
839 @type devs: List of L{objects.Disk}
840 @param devs: The root devices (not any of its children!)
841 @param cfg: The config object
842 @returns The annotated disk copies
843 @see L{ganeti.rpc.node.AnnotateDiskParams}
844
845 """
846 return rpc.AnnotateDiskParams(devs, cfg.GetInstanceDiskParams(instance))
847
848
850 """Tells if node supports OOB.
851
852 @type cfg: L{config.ConfigWriter}
853 @param cfg: The cluster configuration
854 @type node: L{objects.Node}
855 @param node: The node
856 @return: The OOB script if supported or an empty string otherwise
857
858 """
859 return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
860
861
863 """Updates and verifies a dict with sub dicts of the same type.
864
865 @param base: The dict with the old data
866 @param updates: The dict with the new data
867 @param type_check: Dict suitable to ForceDictType to verify correct types
868 @returns: A new dict with updated and verified values
869
870 """
871 def fn(old, value):
872 new = GetUpdatedParams(old, value)
873 utils.ForceDictType(new, type_check)
874 return new
875
876 ret = copy.deepcopy(base)
877 ret.update(dict((key, fn(base.get(key, {}), value))
878 for key, value in updates.items()))
879 return ret
880
881
883 """Filters out non-vm_capable nodes from a list.
884
885 @type lu: L{LogicalUnit}
886 @param lu: the logical unit for which we check
887 @type node_uuids: list
888 @param node_uuids: the list of nodes on which we should check
889 @rtype: list
890 @return: the list of vm-capable nodes
891
892 """
893 vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
894 return [uuid for uuid in node_uuids if uuid not in vm_nodes]
895
896
898 """Decides on which iallocator to use.
899
900 @type cfg: L{config.ConfigWriter}
901 @param cfg: Cluster configuration object
902 @type ialloc: string or None
903 @param ialloc: Iallocator specified in opcode
904 @rtype: string
905 @return: Iallocator name
906
907 """
908 if not ialloc:
909
910 ialloc = cfg.GetDefaultIAllocator()
911
912 if not ialloc:
913 raise errors.OpPrereqError("No iallocator was specified, neither in the"
914 " opcode nor as a cluster-wide default",
915 errors.ECODE_INVAL)
916
917 return ialloc
918
919
922 """Checks if node groups for locked instances are still correct.
923
924 @type cfg: L{config.ConfigWriter}
925 @param cfg: Cluster configuration
926 @type instances: dict; string as key, L{objects.Instance} as value
927 @param instances: Dictionary, instance UUID as key, instance object as value
928 @type owned_groups: iterable of string
929 @param owned_groups: List of owned groups
930 @type owned_node_uuids: iterable of string
931 @param owned_node_uuids: List of owned nodes
932 @type cur_group_uuid: string or None
933 @param cur_group_uuid: Optional group UUID to check against instance's groups
934
935 """
936 for (uuid, inst) in instances.items():
937 inst_nodes = cfg.GetInstanceNodes(inst.uuid)
938 assert owned_node_uuids.issuperset(inst_nodes), \
939 "Instance %s's nodes changed while we kept the lock" % inst.name
940
941 inst_groups = CheckInstanceNodeGroups(cfg, uuid, owned_groups)
942
943 assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
944 "Instance %s has no node in group %s" % (inst.name, cur_group_uuid)
945
946
948 """Checks if the owned node groups are still correct for an instance.
949
950 @type cfg: L{config.ConfigWriter}
951 @param cfg: The cluster configuration
952 @type inst_uuid: string
953 @param inst_uuid: Instance UUID
954 @type owned_groups: set or frozenset
955 @param owned_groups: List of currently owned node groups
956 @type primary_only: boolean
957 @param primary_only: Whether to check node groups for only the primary node
958
959 """
960 inst_groups = cfg.GetInstanceNodeGroups(inst_uuid, primary_only)
961
962 if not owned_groups.issuperset(inst_groups):
963 raise errors.OpPrereqError("Instance %s's node groups changed since"
964 " locks were acquired, current groups are"
965 " are '%s', owning groups '%s'; retry the"
966 " operation" %
967 (cfg.GetInstanceName(inst_uuid),
968 utils.CommaJoin(inst_groups),
969 utils.CommaJoin(owned_groups)),
970 errors.ECODE_STATE)
971
972 return inst_groups
973
974
976 """Unpacks the result of change-group and node-evacuate iallocator requests.
977
978 Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
979 L{constants.IALLOCATOR_MODE_CHG_GROUP}.
980
981 @type lu: L{LogicalUnit}
982 @param lu: Logical unit instance
983 @type alloc_result: tuple/list
984 @param alloc_result: Result from iallocator
985 @type early_release: bool
986 @param early_release: Whether to release locks early if possible
987 @type use_nodes: bool
988 @param use_nodes: Whether to display node names instead of groups
989
990 """
991 (moved, failed, jobs) = alloc_result
992
993 if failed:
994 failreason = utils.CommaJoin("%s (%s)" % (name, reason)
995 for (name, reason) in failed)
996 lu.LogWarning("Unable to evacuate instances %s", failreason)
997 raise errors.OpExecError("Unable to evacuate instances %s" % failreason)
998
999 if moved:
1000 lu.LogInfo("Instances to be moved: %s",
1001 utils.CommaJoin(
1002 "%s (to %s)" %
1003 (name, _NodeEvacDest(use_nodes, group, node_names))
1004 for (name, group, node_names) in moved))
1005
1006 return [map(compat.partial(_SetOpEarlyRelease, early_release),
1007 map(opcodes.OpCode.LoadOpCode, ops))
1008 for ops in jobs]
1009
1010
1012 """Returns group or nodes depending on caller's choice.
1013
1014 """
1015 if use_nodes:
1016 return utils.CommaJoin(node_names)
1017 else:
1018 return group
1019
1020
1022 """Sets C{early_release} flag on opcodes if available.
1023
1024 """
1025 try:
1026 op.early_release = early_release
1027 except AttributeError:
1028 assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
1029
1030 return op
1031
1032
1034 """Creates a map from (node, volume) to instance name.
1035
1036 @type cfg: L{config.ConfigWriter}
1037 @param cfg: The cluster configuration
1038 @type instances: list of L{objects.Instance}
1039 @rtype: dict; tuple of (node uuid, volume name) as key, L{objects.Instance}
1040 object as value
1041
1042 """
1043 return dict(
1044 ((node_uuid, vol), inst)
1045 for inst in instances
1046 for (node_uuid, vols) in cfg.GetInstanceLVsByNode(inst.uuid).items()
1047 for vol in vols)
1048
1049
1051 """Make sure that none of the given paramters is global.
1052
1053 If a global parameter is found, an L{errors.OpPrereqError} exception is
1054 raised. This is used to avoid setting global parameters for individual nodes.
1055
1056 @type params: dictionary
1057 @param params: Parameters to check
1058 @type glob_pars: dictionary
1059 @param glob_pars: Forbidden parameters
1060 @type kind: string
1061 @param kind: Kind of parameters (e.g. "node")
1062 @type bad_levels: string
1063 @param bad_levels: Level(s) at which the parameters are forbidden (e.g.
1064 "instance")
1065 @type good_levels: strings
1066 @param good_levels: Level(s) at which the parameters are allowed (e.g.
1067 "cluster or group")
1068
1069 """
1070 used_globals = glob_pars.intersection(params)
1071 if used_globals:
1072 msg = ("The following %s parameters are global and cannot"
1073 " be customized at %s level, please modify them at"
1074 " %s level: %s" %
1075 (kind, bad_levels, good_levels, utils.CommaJoin(used_globals)))
1076 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1077
1078
1080 """Whether exclusive_storage is in effect for the given node.
1081
1082 @type cfg: L{config.ConfigWriter}
1083 @param cfg: The cluster configuration
1084 @type node: L{objects.Node}
1085 @param node: The node
1086 @rtype: bool
1087 @return: The effective value of exclusive_storage
1088
1089 """
1090 return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE]
1091
1092
1094 """Given an instance object, checks if the instance is running.
1095
1096 This function asks the backend whether the instance is running and
1097 user shutdown instances are considered not to be running.
1098
1099 @type lu: L{LogicalUnit}
1100 @param lu: LU on behalf of which we make the check
1101
1102 @type instance: L{objects.Instance}
1103 @param instance: instance to check whether it is running
1104
1105 @rtype: bool
1106 @return: 'True' if the instance is running, 'False' otherwise
1107
1108 """
1109 hvparams = lu.cfg.GetClusterInfo().FillHV(instance)
1110 result = lu.rpc.call_instance_info(instance.primary_node, instance.name,
1111 instance.hypervisor, hvparams)
1112
1113
1114 result.Raise("Can't retrieve instance information for instance '%s'" %
1115 instance.name, prereq=prereq, ecode=errors.ECODE_ENVIRON)
1116
1117 return result.payload and \
1118 "state" in result.payload and \
1119 (result.payload["state"] != hypervisor.hv_base.HvInstanceState.SHUTDOWN)
1120
1121
1123 """Ensure that an instance is in one of the required states.
1124
1125 @param lu: the LU on behalf of which we make the check
1126 @param instance: the instance to check
1127 @param msg: if passed, should be a message to replace the default one
1128 @raise errors.OpPrereqError: if the instance is not in the required state
1129
1130 """
1131 if msg is None:
1132 msg = ("can't use instance from outside %s states" %
1133 utils.CommaJoin(req_states))
1134 if instance.admin_state not in req_states:
1135 raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
1136 (instance.name, instance.admin_state, msg),
1137 errors.ECODE_STATE)
1138
1139 if constants.ADMINST_UP not in req_states:
1140 pnode_uuid = instance.primary_node
1141
1142 if not lu.cfg.GetNodeInfo(pnode_uuid).offline:
1143 if IsInstanceRunning(lu, instance):
1144 raise errors.OpPrereqError("Instance %s is running, %s" %
1145 (instance.name, msg), errors.ECODE_STATE)
1146 else:
1147 lu.LogWarning("Primary node offline, ignoring check that instance"
1148 " is down")
1149
1150
1152 """Check the sanity of iallocator and node arguments and use the
1153 cluster-wide iallocator if appropriate.
1154
1155 Check that at most one of (iallocator, node) is specified. If none is
1156 specified, or the iallocator is L{constants.DEFAULT_IALLOCATOR_SHORTCUT},
1157 then the LU's opcode's iallocator slot is filled with the cluster-wide
1158 default iallocator.
1159
1160 @type iallocator_slot: string
1161 @param iallocator_slot: the name of the opcode iallocator slot
1162 @type node_slot: string
1163 @param node_slot: the name of the opcode target node slot
1164
1165 """
1166 node = getattr(lu.op, node_slot, None)
1167 ialloc = getattr(lu.op, iallocator_slot, None)
1168 if node == []:
1169 node = None
1170
1171 if node is not None and ialloc is not None:
1172 raise errors.OpPrereqError("Do not specify both, iallocator and node",
1173 errors.ECODE_INVAL)
1174 elif ((node is None and ialloc is None) or
1175 ialloc == constants.DEFAULT_IALLOCATOR_SHORTCUT):
1176 default_iallocator = lu.cfg.GetDefaultIAllocator()
1177 if default_iallocator:
1178 setattr(lu.op, iallocator_slot, default_iallocator)
1179 else:
1180 raise errors.OpPrereqError("No iallocator or node given and no"
1181 " cluster-wide default iallocator found;"
1182 " please specify either an iallocator or a"
1183 " node, or set a cluster-wide default"
1184 " iallocator", errors.ECODE_INVAL)
1185
1186
1202
1203
1205 """Ensure that a given node is online.
1206
1207 @param lu: the LU on behalf of which we make the check
1208 @param node_uuid: the node to check
1209 @param msg: if passed, should be a message to replace the default one
1210 @raise errors.OpPrereqError: if the node is offline
1211
1212 """
1213 if msg is None:
1214 msg = "Can't use offline node"
1215 if lu.cfg.GetNodeInfo(node_uuid).offline:
1216 raise errors.OpPrereqError("%s: %s" % (msg, lu.cfg.GetNodeName(node_uuid)),
1217 errors.ECODE_STATE)
1218
1219
1221 """Helper function to check if a disk template is enabled.
1222
1223 @type cluster: C{objects.Cluster}
1224 @param cluster: the cluster's configuration
1225 @type disk_template: str
1226 @param disk_template: the disk template to be checked
1227
1228 """
1229 assert disk_template is not None
1230 if disk_template not in constants.DISK_TEMPLATES:
1231 raise errors.OpPrereqError("'%s' is not a valid disk template."
1232 " Valid disk templates are: %s" %
1233 (disk_template,
1234 ",".join(constants.DISK_TEMPLATES)))
1235 if not disk_template in cluster.enabled_disk_templates:
1236 raise errors.OpPrereqError("Disk template '%s' is not enabled in cluster."
1237 " Enabled disk templates are: %s" %
1238 (disk_template,
1239 ",".join(cluster.enabled_disk_templates)))
1240
1241
1243 """Helper function to check if a storage type is enabled.
1244
1245 @type cluster: C{objects.Cluster}
1246 @param cluster: the cluster's configuration
1247 @type storage_type: str
1248 @param storage_type: the storage type to be checked
1249
1250 """
1251 assert storage_type is not None
1252 assert storage_type in constants.STORAGE_TYPES
1253
1254
1255 if storage_type == constants.ST_LVM_PV:
1256 CheckStorageTypeEnabled(cluster, constants.ST_LVM_VG)
1257 else:
1258 possible_disk_templates = \
1259 utils.storage.GetDiskTemplatesOfStorageTypes(storage_type)
1260 for disk_template in possible_disk_templates:
1261 if disk_template in cluster.enabled_disk_templates:
1262 return
1263 raise errors.OpPrereqError("No disk template of storage type '%s' is"
1264 " enabled in this cluster. Enabled disk"
1265 " templates are: %s" % (storage_type,
1266 ",".join(cluster.enabled_disk_templates)))
1267
1268
1270 """Checks ipolicy disk templates against enabled disk tempaltes.
1271
1272 @type ipolicy: dict
1273 @param ipolicy: the new ipolicy
1274 @type enabled_disk_templates: list of string
1275 @param enabled_disk_templates: list of enabled disk templates on the
1276 cluster
1277 @raises errors.OpPrereqError: if there is at least one allowed disk
1278 template that is not also enabled.
1279
1280 """
1281 assert constants.IPOLICY_DTS in ipolicy
1282 allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
1283 not_enabled = set(allowed_disk_templates) - set(enabled_disk_templates)
1284 if not_enabled:
1285 raise errors.OpPrereqError("The following disk templates are allowed"
1286 " by the ipolicy, but not enabled on the"
1287 " cluster: %s" % utils.CommaJoin(not_enabled),
1288 errors.ECODE_INVAL)
1289
1290
1308
1309
1311 """Checks if the access param is consistent with the cluster configuration.
1312
1313 @note: requires a configuration lock to run.
1314 @param parameters: the parameters to validate
1315 @param cfg: the cfg object of the cluster
1316 @param group: if set, only check for consistency within this group.
1317 @raise errors.OpPrereqError: if the LU attempts to change the access parameter
1318 to an invalid value, such as "pink bunny".
1319 @raise errors.OpPrereqError: if the LU attempts to change the access parameter
1320 to an inconsistent value, such as asking for RBD
1321 userspace access to the chroot hypervisor.
1322
1323 """
1324 CheckDiskAccessModeValidity(parameters)
1325
1326 for disk_template in parameters:
1327 access = parameters[disk_template].get(constants.LDP_ACCESS,
1328 constants.DISK_KERNELSPACE)
1329
1330 if disk_template not in constants.DTS_HAVE_ACCESS:
1331 continue
1332
1333
1334
1335 inst_uuids = cfg.GetNodeGroupInstances(group) if group else \
1336 cfg.GetInstanceList()
1337
1338 for entry in inst_uuids:
1339 inst = cfg.GetInstanceInfo(entry)
1340 disks = cfg.GetInstanceDisks(entry)
1341 for disk in disks:
1342
1343 if disk.dev_type != disk_template:
1344 continue
1345
1346 hv = inst.hypervisor
1347
1348 if not IsValidDiskAccessModeCombination(hv, disk.dev_type, access):
1349 raise errors.OpPrereqError("Instance {i}: cannot use '{a}' access"
1350 " setting with {h} hypervisor and {d} disk"
1351 " type.".format(i=inst.name,
1352 a=access,
1353 h=hv,
1354 d=disk.dev_type))
1355
1356
1358 """Checks if an hypervisor can read a disk template with given mode.
1359
1360 @param hv: the hypervisor that will access the data
1361 @param disk_template: the disk template the data is stored as
1362 @param mode: how the hypervisor should access the data
1363 @return: True if the hypervisor can read a given read disk_template
1364 in the specified mode.
1365
1366 """
1367 if mode == constants.DISK_KERNELSPACE:
1368 return True
1369
1370 if (hv == constants.HT_KVM and
1371 disk_template in constants.DTS_HAVE_ACCESS and
1372 mode == constants.DISK_USERSPACE):
1373 return True
1374
1375
1376 return False
1377
1378
1400
1401
1403 """Removes the node's certificate from the candidate certificates list.
1404
1405 @type cfg: C{config.ConfigWriter}
1406 @param cfg: the cluster's configuration
1407 @type node_uuid: string
1408 @param node_uuid: the node's UUID
1409
1410 """
1411 cfg.RemoveNodeFromCandidateCerts(node_uuid)
1412
1413
1438
1439
1463
1464
1466 """Create an OpCode that connects a group to the instance
1467 communication network.
1468
1469 This OpCode contains the configuration necessary for the instance
1470 communication network.
1471
1472 @type group_uuid: string
1473 @param group_uuid: UUID of the group to connect
1474
1475 @type network: string
1476 @param network: name or UUID of the network to connect to, i.e., the
1477 instance communication network
1478
1479 @rtype: L{ganeti.opcodes.OpCode}
1480 @return: OpCode that connects the group to the instance
1481 communication network
1482
1483 """
1484 return opcodes.OpNetworkConnect(
1485 group_name=group_uuid,
1486 network_name=network,
1487 network_mode=constants.INSTANCE_COMMUNICATION_NETWORK_MODE,
1488 network_link=constants.INSTANCE_COMMUNICATION_NETWORK_LINK,
1489 conflicts_check=True)
1490
1491
1493 """Determines the size of the specified image.
1494
1495 @type image: string
1496 @param image: absolute filepath or URL of the image
1497
1498 @type node_uuid: string
1499 @param node_uuid: if L{image} is a filepath, this is the UUID of the
1500 node where the image is located
1501
1502 @rtype: int
1503 @return: size of the image in MB, rounded up
1504 @raise OpExecError: if the image does not exist
1505
1506 """
1507
1508 class _HeadRequest(urllib2.Request):
1509 def get_method(self):
1510 return "HEAD"
1511
1512 if utils.IsUrl(image):
1513 try:
1514 response = urllib2.urlopen(_HeadRequest(image))
1515 except urllib2.URLError:
1516 raise errors.OpExecError("Could not retrieve image from given url '%s'" %
1517 image)
1518
1519 content_length_str = response.info().getheader('content-length')
1520
1521 if not content_length_str:
1522 raise errors.OpExecError("Could not determine image size from given url"
1523 " '%s'" % image)
1524
1525 byte_size = int(content_length_str)
1526 else:
1527
1528 result = lu.rpc.call_get_file_info(node_uuid, image)
1529 result.Raise("Could not determine size of file '%s'" % image)
1530
1531 success, attributes = result.payload
1532 if not success:
1533 raise errors.OpExecError("Could not open file '%s'" % image)
1534 byte_size = attributes[constants.STAT_SIZE]
1535
1536
1537 return math.ceil(byte_size / 1024. / 1024.)
1538
1539
1541 """Ensure KVM daemon is running on nodes with KVM instances.
1542
1543 If user shutdown is enabled in the cluster:
1544 - The KVM daemon will be started on VM capable nodes containing
1545 KVM instances.
1546 - The KVM daemon will be stopped on non VM capable nodes.
1547
1548 If user shutdown is disabled in the cluster:
1549 - The KVM daemon will be stopped on all nodes
1550
1551 Issues a warning for each failed RPC call.
1552
1553 @type lu: L{LogicalUnit}
1554 @param lu: logical unit on whose behalf we execute
1555
1556 @type feedback_fn: callable
1557 @param feedback_fn: feedback function
1558
1559 @type nodes: list of string
1560 @param nodes: if supplied, it overrides the node uuids to start/stop;
1561 this is used mainly for optimization
1562
1563 @type silent_stop: bool
1564 @param silent_stop: if we should suppress warnings in case KVM daemon is
1565 already stopped
1566 """
1567 cluster = lu.cfg.GetClusterInfo()
1568
1569
1570 if nodes is not None:
1571 node_uuids = set(nodes)
1572 else:
1573 node_uuids = lu.cfg.GetNodeList()
1574
1575
1576 if constants.HT_KVM in cluster.enabled_hypervisors and \
1577 cluster.enabled_user_shutdown:
1578 start_nodes = []
1579 stop_nodes = []
1580
1581 for node_uuid in node_uuids:
1582 if lu.cfg.GetNodeInfo(node_uuid).vm_capable:
1583 start_nodes.append(node_uuid)
1584 else:
1585 stop_nodes.append(node_uuid)
1586 else:
1587 start_nodes = []
1588 stop_nodes = node_uuids
1589
1590
1591 if start_nodes:
1592 results = lu.rpc.call_node_ensure_daemon(start_nodes, constants.KVMD, True)
1593 for node_uuid in start_nodes:
1594 results[node_uuid].Warn("Failed to start KVM daemon in node '%s'" %
1595 node_uuid, feedback_fn)
1596
1597
1598 if stop_nodes:
1599 results = lu.rpc.call_node_ensure_daemon(stop_nodes, constants.KVMD, False)
1600 if not silent_stop:
1601 for node_uuid in stop_nodes:
1602 results[node_uuid].Warn("Failed to stop KVM daemon in node '%s'" %
1603 node_uuid, feedback_fn)
1604
1605
1607 node_errors = result[master_uuid].payload
1608 if node_errors:
1609 feedback_fn("Some nodes' SSH key files could not be updated:")
1610 for node_name, error_msg in node_errors:
1611 feedback_fn("%s: %s" % (node_name, error_msg))
1612