1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Common functions used by multiple logical units."""
32
33 import copy
34 import math
35 import os
36 import urllib2
37
38 from ganeti import compat
39 from ganeti import constants
40 from ganeti import errors
41 from ganeti import hypervisor
42 from ganeti import locking
43 from ganeti import objects
44 from ganeti import opcodes
45 from ganeti import pathutils
46 import ganeti.rpc.node as rpc
47 from ganeti.serializer import Private
48 from ganeti import ssconf
49 from ganeti import utils
50
51
52
53 INSTANCE_DOWN = [constants.ADMINST_DOWN]
54 INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
55 INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
56
57
59 """Expand an item name.
60
61 @param expand_fn: the function to use for expansion
62 @param name: requested item name
63 @param kind: text description ('Node' or 'Instance')
64 @return: the result of the expand_fn, if successful
65 @raise errors.OpPrereqError: if the item is not found
66
67 """
68 (uuid, full_name) = expand_fn(name)
69 if uuid is None or full_name is None:
70 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
71 errors.ECODE_NOENT)
72 return (uuid, full_name)
73
74
76 """Wrapper over L{_ExpandItemName} for instance."""
77 (uuid, full_name) = _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
78 if expected_uuid is not None and uuid != expected_uuid:
79 raise errors.OpPrereqError(
80 "The instances UUID '%s' does not match the expected UUID '%s' for"
81 " instance '%s'. Maybe the instance changed since you submitted this"
82 " job." % (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
83 return (uuid, full_name)
84
85
87 """Expand a short node name into the node UUID and full name.
88
89 @type cfg: L{config.ConfigWriter}
90 @param cfg: The cluster configuration
91 @type expected_uuid: string
92 @param expected_uuid: expected UUID for the node (or None if there is no
93 expectation). If it does not match, a L{errors.OpPrereqError} is
94 raised.
95 @type name: string
96 @param name: the short node name
97
98 """
99 (uuid, full_name) = _ExpandItemName(cfg.ExpandNodeName, name, "Node")
100 if expected_uuid is not None and uuid != expected_uuid:
101 raise errors.OpPrereqError(
102 "The nodes UUID '%s' does not match the expected UUID '%s' for node"
103 " '%s'. Maybe the node changed since you submitted this job." %
104 (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
105 return (uuid, full_name)
106
107
109 """Returns a dict declaring all lock levels shared.
110
111 """
112 return dict.fromkeys(locking.LEVELS, 1)
113
114
116 """Checks if the instances in a node group are still correct.
117
118 @type cfg: L{config.ConfigWriter}
119 @param cfg: The cluster configuration
120 @type group_uuid: string
121 @param group_uuid: Node group UUID
122 @type owned_instance_names: set or frozenset
123 @param owned_instance_names: List of currently owned instances
124
125 """
126 wanted_instances = frozenset(cfg.GetInstanceNames(
127 cfg.GetNodeGroupInstances(group_uuid)))
128 if owned_instance_names != wanted_instances:
129 raise errors.OpPrereqError("Instances in node group '%s' changed since"
130 " locks were acquired, wanted '%s', have '%s';"
131 " retry the operation" %
132 (group_uuid,
133 utils.CommaJoin(wanted_instances),
134 utils.CommaJoin(owned_instance_names)),
135 errors.ECODE_STATE)
136
137 return wanted_instances
138
139
141 """Returns list of checked and expanded node names.
142
143 @type lu: L{LogicalUnit}
144 @param lu: the logical unit on whose behalf we execute
145 @type short_node_names: list
146 @param short_node_names: list of node names or None for all nodes
147 @rtype: tuple of lists
148 @return: tupe with (list of node UUIDs, list of node names)
149 @raise errors.ProgrammerError: if the nodes parameter is wrong type
150
151 """
152 if short_node_names:
153 node_uuids = [ExpandNodeUuidAndName(lu.cfg, None, name)[0]
154 for name in short_node_names]
155 else:
156 node_uuids = lu.cfg.GetNodeList()
157
158 return (node_uuids, [lu.cfg.GetNodeName(uuid) for uuid in node_uuids])
159
160
162 """Returns list of checked and expanded instance names.
163
164 @type lu: L{LogicalUnit}
165 @param lu: the logical unit on whose behalf we execute
166 @type short_inst_names: list
167 @param short_inst_names: list of instance names or None for all instances
168 @rtype: tuple of lists
169 @return: tuple of (instance UUIDs, instance names)
170 @raise errors.OpPrereqError: if the instances parameter is wrong type
171 @raise errors.OpPrereqError: if any of the passed instances is not found
172
173 """
174 if short_inst_names:
175 inst_uuids = [ExpandInstanceUuidAndName(lu.cfg, None, name)[0]
176 for name in short_inst_names]
177 else:
178 inst_uuids = lu.cfg.GetInstanceList()
179 return (inst_uuids, [lu.cfg.GetInstanceName(uuid) for uuid in inst_uuids])
180
181
182 -def RunPostHook(lu, node_name):
183 """Runs the post-hook for an opcode on a single node.
184
185 """
186 hm = lu.proc.BuildHooksManager(lu)
187 try:
188 hm.RunPhase(constants.HOOKS_PHASE_POST, node_names=[node_name])
189 except Exception, err:
190 lu.LogWarning("Errors occurred running hooks on %s: %s",
191 node_name, err)
192
193
195 """Distribute additional files which are part of the cluster configuration.
196
197 ConfigWriter takes care of distributing the config and ssconf files, but
198 there are more files which should be distributed to all nodes. This function
199 makes sure those are copied.
200
201 """
202
203 cluster = lu.cfg.GetClusterInfo()
204 master_info = lu.cfg.GetMasterNodeInfo()
205
206 online_node_uuids = lu.cfg.GetOnlineNodeList()
207 online_node_uuid_set = frozenset(online_node_uuids)
208 vm_node_uuids = list(online_node_uuid_set.intersection(
209 lu.cfg.GetVmCapableNodeList()))
210
211
212 for node_uuids in [online_node_uuids, vm_node_uuids]:
213 if master_info.uuid in node_uuids:
214 node_uuids.remove(master_info.uuid)
215
216
217 (files_all, _, files_mc, files_vm) = \
218 ComputeAncillaryFiles(cluster, True)
219
220
221 assert not (pathutils.CLUSTER_CONF_FILE in files_all or
222 pathutils.CLUSTER_CONF_FILE in files_vm)
223 assert not files_mc, "Master candidates not handled in this function"
224
225 filemap = [
226 (online_node_uuids, files_all),
227 (vm_node_uuids, files_vm),
228 ]
229
230
231 for (node_uuids, files) in filemap:
232 for fname in files:
233 UploadHelper(lu, node_uuids, fname)
234
235
237 """Compute files external to Ganeti which need to be consistent.
238
239 @type redist: boolean
240 @param redist: Whether to include files which need to be redistributed
241
242 """
243
244 files_all = set([
245 pathutils.SSH_KNOWN_HOSTS_FILE,
246 pathutils.CONFD_HMAC_KEY,
247 pathutils.CLUSTER_DOMAIN_SECRET_FILE,
248 pathutils.SPICE_CERT_FILE,
249 pathutils.SPICE_CACERT_FILE,
250 pathutils.RAPI_USERS_FILE,
251 ])
252
253 if redist:
254
255 files_all.add(pathutils.RAPI_CERT_FILE)
256 else:
257 files_all.update(pathutils.ALL_CERT_FILES)
258 files_all.update(ssconf.SimpleStore().GetFileList())
259
260 if cluster.modify_etc_hosts:
261 files_all.add(pathutils.ETC_HOSTS)
262
263 if cluster.use_external_mip_script:
264 files_all.add(pathutils.EXTERNAL_MASTER_SETUP_SCRIPT)
265
266
267
268
269 files_opt = set([
270 pathutils.RAPI_USERS_FILE,
271 ])
272
273
274 files_mc = set()
275
276 if not redist:
277 files_mc.add(pathutils.CLUSTER_CONF_FILE)
278
279
280 if (not redist and (cluster.IsFileStorageEnabled() or
281 cluster.IsSharedFileStorageEnabled())):
282 files_all.add(pathutils.FILE_STORAGE_PATHS_FILE)
283 files_opt.add(pathutils.FILE_STORAGE_PATHS_FILE)
284
285
286 files_vm = set(
287 filename
288 for hv_name in cluster.enabled_hypervisors
289 for filename in
290 hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[0])
291
292 files_opt |= set(
293 filename
294 for hv_name in cluster.enabled_hypervisors
295 for filename in
296 hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[1])
297
298
299 all_files_set = files_all | files_mc | files_vm
300 assert (len(all_files_set) ==
301 sum(map(len, [files_all, files_mc, files_vm]))), \
302 "Found file listed in more than one file list"
303
304
305 assert all_files_set.issuperset(files_opt), \
306 "Optional file not in a different required list"
307
308
309 assert not (redist and
310 pathutils.FILE_STORAGE_PATHS_FILE in all_files_set)
311
312 return (files_all, files_opt, files_mc, files_vm)
313
314
316 """Helper for uploading a file and showing warnings.
317
318 """
319 if os.path.exists(fname):
320 result = lu.rpc.call_upload_file(node_uuids, fname)
321 for to_node_uuids, to_result in result.items():
322 msg = to_result.fail_msg
323 if msg:
324 msg = ("Copy of file %s to node %s failed: %s" %
325 (fname, lu.cfg.GetNodeName(to_node_uuids), msg))
326 lu.LogWarning(msg)
327
328
330 """Combines the hv state from an opcode with the one of the object
331
332 @param op_input: The input dict from the opcode
333 @param obj_input: The input dict from the objects
334 @return: The verified and updated dict
335
336 """
337 if op_input:
338 invalid_hvs = set(op_input) - constants.HYPER_TYPES
339 if invalid_hvs:
340 raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:"
341 " %s" % utils.CommaJoin(invalid_hvs),
342 errors.ECODE_INVAL)
343 if obj_input is None:
344 obj_input = {}
345 type_check = constants.HVSTS_PARAMETER_TYPES
346 return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
347
348 return None
349
350
352 """Combines the disk state from an opcode with the one of the object
353
354 @param op_input: The input dict from the opcode
355 @param obj_input: The input dict from the objects
356 @return: The verified and updated dict
357 """
358 if op_input:
359 invalid_dst = set(op_input) - constants.DS_VALID_TYPES
360 if invalid_dst:
361 raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" %
362 utils.CommaJoin(invalid_dst),
363 errors.ECODE_INVAL)
364 type_check = constants.DSS_PARAMETER_TYPES
365 if obj_input is None:
366 obj_input = {}
367 return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value,
368 type_check))
369 for key, value in op_input.items())
370
371 return None
372
373
374 -def CheckOSParams(lu, required, node_uuids, osname, osparams, force_variant):
375 """OS parameters validation.
376
377 @type lu: L{LogicalUnit}
378 @param lu: the logical unit for which we check
379 @type required: boolean
380 @param required: whether the validation should fail if the OS is not
381 found
382 @type node_uuids: list
383 @param node_uuids: the list of nodes on which we should check
384 @type osname: string
385 @param osname: the name of the OS we should use
386 @type osparams: dict
387 @param osparams: the parameters which we need to check
388 @raise errors.OpPrereqError: if the parameters are not valid
389
390 """
391 node_uuids = _FilterVmNodes(lu, node_uuids)
392
393
394 for key in osparams:
395 if isinstance(osparams[key], Private):
396 osparams[key] = osparams[key].Get()
397
398 if osname:
399 result = lu.rpc.call_os_validate(node_uuids, required, osname,
400 [constants.OS_VALIDATE_PARAMETERS],
401 osparams, force_variant)
402 for node_uuid, nres in result.items():
403
404
405 nres.Raise("OS Parameters validation failed on node %s" %
406 lu.cfg.GetNodeName(node_uuid))
407 if not nres.payload:
408 lu.LogInfo("OS %s not found on node %s, validation skipped",
409 osname, lu.cfg.GetNodeName(node_uuid))
410
411
413 """Checks if a given image description is either a valid file path or a URL.
414
415 @type image: string
416 @param image: An absolute path or URL, the assumed location of a disk image.
417 @type error_message: string
418 @param error_message: The error message to show if the image is not valid.
419
420 @raise errors.OpPrereqError: If the validation fails.
421
422 """
423 if image is not None and not (utils.IsUrl(image) or os.path.isabs(image)):
424 raise errors.OpPrereqError(error_message)
425
426
428 """Checks if the OS image in the OS parameters of an opcode is
429 valid.
430
431 This function can also be used in LUs as they carry an opcode.
432
433 @type op: L{opcodes.OpCode}
434 @param op: opcode containing the OS params
435
436 @rtype: string or NoneType
437 @return:
438 None if the OS parameters in the opcode do not contain the OS
439 image, otherwise the OS image value contained in the OS parameters
440 @raise errors.OpPrereqError: if OS image is not a URL or an absolute path
441
442 """
443 os_image = objects.GetOSImage(op.osparams)
444 CheckImageValidity(os_image, "OS image must be an absolute path or a URL")
445 return os_image
446
447
449 """Hypervisor parameter validation.
450
451 This function abstracts the hypervisor parameter validation to be
452 used in both instance create and instance modify.
453
454 @type lu: L{LogicalUnit}
455 @param lu: the logical unit for which we check
456 @type node_uuids: list
457 @param node_uuids: the list of nodes on which we should check
458 @type hvname: string
459 @param hvname: the name of the hypervisor we should use
460 @type hvparams: dict
461 @param hvparams: the parameters which we need to check
462 @raise errors.OpPrereqError: if the parameters are not valid
463
464 """
465 node_uuids = _FilterVmNodes(lu, node_uuids)
466
467 cluster = lu.cfg.GetClusterInfo()
468 hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
469
470 hvinfo = lu.rpc.call_hypervisor_validate_params(node_uuids, hvname, hvfull)
471 for node_uuid in node_uuids:
472 info = hvinfo[node_uuid]
473 if info.offline:
474 continue
475 info.Raise("Hypervisor parameter validation failed on node %s" %
476 lu.cfg.GetNodeName(node_uuid))
477
478
491
492
493 -def AdjustCandidatePool(
494 lu, exceptions, master_node, potential_master_candidates, feedback_fn,
495 modify_ssh_setup):
496 """Adjust the candidate pool after node operations.
497
498 @type master_node: string
499 @param master_node: name of the master node
500 @type potential_master_candidates: list of string
501 @param potential_master_candidates: list of node names of potential master
502 candidates
503 @type feedback_fn: function
504 @param feedback_fn: function emitting user-visible output
505 @type modify_ssh_setup: boolean
506 @param modify_ssh_setup: whether or not the ssh setup can be modified.
507
508 """
509 mod_list = lu.cfg.MaintainCandidatePool(exceptions)
510 if mod_list:
511 lu.LogInfo("Promoted nodes to master candidate role: %s",
512 utils.CommaJoin(node.name for node in mod_list))
513 for node in mod_list:
514 AddNodeCertToCandidateCerts(lu, lu.cfg, node.uuid)
515 if modify_ssh_setup:
516 AddMasterCandidateSshKey(
517 lu, master_node, node, potential_master_candidates, feedback_fn)
518
519 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
520 if mc_now > mc_max:
521 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
522 (mc_now, mc_max))
523
524
526 """Check node PVs.
527
528 """
529 pvlist_dict = nresult.get(constants.NV_PVLIST, None)
530 if pvlist_dict is None:
531 return (["Can't get PV list from node"], None)
532 pvlist = map(objects.LvmPvInfo.FromDict, pvlist_dict)
533 errlist = []
534
535
536
537 for pv in pvlist:
538 if ":" in pv.name:
539 errlist.append("Invalid character ':' in PV '%s' of VG '%s'" %
540 (pv.name, pv.vg_name))
541 es_pvinfo = None
542 if exclusive_storage:
543 (errmsgs, es_pvinfo) = utils.LvmExclusiveCheckNodePvs(pvlist)
544 errlist.extend(errmsgs)
545 shared_pvs = nresult.get(constants.NV_EXCLUSIVEPVS, None)
546 if shared_pvs:
547 for (pvname, lvlist) in shared_pvs:
548
549 errlist.append("PV %s is shared among unrelated LVs (%s)" %
550 (pvname, utils.CommaJoin(lvlist)))
551 return (errlist, es_pvinfo)
552
553
555 """Computes if value is in the desired range.
556
557 @param name: name of the parameter for which we perform the check
558 @param qualifier: a qualifier used in the error message (e.g. 'disk/1',
559 not just 'disk')
560 @param ispecs: dictionary containing min and max values
561 @param value: actual value that we want to use
562 @return: None or an error string
563
564 """
565 if value in [None, constants.VALUE_AUTO]:
566 return None
567 max_v = ispecs[constants.ISPECS_MAX].get(name, value)
568 min_v = ispecs[constants.ISPECS_MIN].get(name, value)
569 if value > max_v or min_v > value:
570 if qualifier:
571 fqn = "%s/%s" % (name, qualifier)
572 else:
573 fqn = name
574 return ("%s value %s is not in range [%s, %s]" %
575 (fqn, value, min_v, max_v))
576 return None
577
578
582 """Verifies ipolicy against provided specs.
583
584 @type ipolicy: dict
585 @param ipolicy: The ipolicy
586 @type mem_size: int
587 @param mem_size: The memory size
588 @type cpu_count: int
589 @param cpu_count: Used cpu cores
590 @type disk_count: int
591 @param disk_count: Number of disks used
592 @type nic_count: int
593 @param nic_count: Number of nics used
594 @type disk_sizes: list of ints
595 @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
596 @type spindle_use: int
597 @param spindle_use: The number of spindles this instance uses
598 @type disk_types: list of strings
599 @param disk_types: The disk template of the instance
600 @param _compute_fn: The compute function (unittest only)
601 @return: A list of violations, or an empty list of no violations are found
602
603 """
604 assert disk_count == len(disk_sizes)
605 assert isinstance(disk_types, list)
606 assert disk_count == len(disk_types)
607
608 test_settings = [
609 (constants.ISPEC_MEM_SIZE, "", mem_size),
610 (constants.ISPEC_CPU_COUNT, "", cpu_count),
611 (constants.ISPEC_NIC_COUNT, "", nic_count),
612 (constants.ISPEC_SPINDLE_USE, "", spindle_use),
613 ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
614 for idx, d in enumerate(disk_sizes)]
615
616 allowed_dts = set(ipolicy[constants.IPOLICY_DTS])
617 ret = []
618 if disk_count != 0:
619
620 test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
621 elif constants.DT_DISKLESS not in allowed_dts:
622 ret.append("Disk template %s is not allowed (allowed templates %s)" %
623 (constants.DT_DISKLESS, utils.CommaJoin(allowed_dts)))
624
625 forbidden_dts = set(disk_types) - allowed_dts
626 if forbidden_dts:
627 ret.append("Disk template %s is not allowed (allowed templates: %s)" %
628 (utils.CommaJoin(forbidden_dts), utils.CommaJoin(allowed_dts)))
629
630 min_errs = None
631 for minmax in ipolicy[constants.ISPECS_MINMAX]:
632 errs = filter(None,
633 (_compute_fn(name, qualifier, minmax, value)
634 for (name, qualifier, value) in test_settings))
635 if min_errs is None or len(errs) < len(min_errs):
636 min_errs = errs
637 assert min_errs is not None
638 return ret + min_errs
639
640
643 """Verifies ipolicy against provided disk sizes.
644
645 No other specs except the disk sizes, the number of disks and the disk
646 template are checked.
647
648 @type ipolicy: dict
649 @param ipolicy: The ipolicy
650 @type disk_sizes: list of ints
651 @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
652 @type disks: list of L{Disk}
653 @param disks: The Disk objects of the instance
654 @param _compute_fn: The compute function (unittest only)
655 @return: A list of violations, or an empty list of no violations are found
656
657 """
658 if len(disk_sizes) != len(disks):
659 return [constants.ISPEC_DISK_COUNT]
660 dev_types = [d.dev_type for d in disks]
661 return ComputeIPolicySpecViolation(ipolicy,
662
663 None, None, len(disk_sizes),
664 None, disk_sizes,
665 None,
666 dev_types,
667 _compute_fn=_compute_fn)
668
669
672 """Compute if instance meets the specs of ipolicy.
673
674 @type ipolicy: dict
675 @param ipolicy: The ipolicy to verify against
676 @type instance: L{objects.Instance}
677 @param instance: The instance to verify
678 @type cfg: L{config.ConfigWriter}
679 @param cfg: Cluster configuration
680 @param _compute_fn: The function to verify ipolicy (unittest only)
681 @see: L{ComputeIPolicySpecViolation}
682
683 """
684 ret = []
685 be_full = cfg.GetClusterInfo().FillBE(instance)
686 mem_size = be_full[constants.BE_MAXMEM]
687 cpu_count = be_full[constants.BE_VCPUS]
688 inst_nodes = cfg.GetInstanceNodes(instance.uuid)
689 es_flags = rpc.GetExclusiveStorageForNodes(cfg, inst_nodes)
690 disks = cfg.GetInstanceDisks(instance.uuid)
691 if any(es_flags.values()):
692
693 try:
694 spindle_use = sum([disk.spindles for disk in disks])
695 except TypeError:
696 ret.append("Number of spindles not configured for disks of instance %s"
697 " while exclusive storage is enabled, try running gnt-cluster"
698 " repair-disk-sizes" % instance.name)
699
700 spindle_use = None
701 else:
702 spindle_use = be_full[constants.BE_SPINDLE_USE]
703 disk_count = len(disks)
704 disk_sizes = [disk.size for disk in disks]
705 nic_count = len(instance.nics)
706 disk_types = [d.dev_type for d in disks]
707
708 return ret + _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
709 disk_sizes, spindle_use, disk_types)
710
711
713 """Computes a set of instances who violates given ipolicy.
714
715 @param ipolicy: The ipolicy to verify
716 @type instances: L{objects.Instance}
717 @param instances: List of instances to verify
718 @type cfg: L{config.ConfigWriter}
719 @param cfg: Cluster configuration
720 @return: A frozenset of instance names violating the ipolicy
721
722 """
723 return frozenset([inst.name for inst in instances
724 if ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
725
726
728 """Computes a set of any instances that would violate the new ipolicy.
729
730 @param old_ipolicy: The current (still in-place) ipolicy
731 @param new_ipolicy: The new (to become) ipolicy
732 @param instances: List of instances to verify
733 @type cfg: L{config.ConfigWriter}
734 @param cfg: Cluster configuration
735 @return: A list of instances which violates the new ipolicy but
736 did not before
737
738 """
739 return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) -
740 _ComputeViolatingInstances(old_ipolicy, instances, cfg))
741
742
743 -def GetUpdatedParams(old_params, update_dict,
744 use_default=True, use_none=False):
745 """Return the new version of a parameter dictionary.
746
747 @type old_params: dict
748 @param old_params: old parameters
749 @type update_dict: dict
750 @param update_dict: dict containing new parameter values, or
751 constants.VALUE_DEFAULT to reset the parameter to its default
752 value
753 @param use_default: boolean
754 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
755 values as 'to be deleted' values
756 @param use_none: boolean
757 @type use_none: whether to recognise C{None} values as 'to be
758 deleted' values
759 @rtype: dict
760 @return: the new parameter dictionary
761
762 """
763 params_copy = copy.deepcopy(old_params)
764 for key, val in update_dict.iteritems():
765 if ((use_default and val == constants.VALUE_DEFAULT) or
766 (use_none and val is None)):
767 try:
768 del params_copy[key]
769 except KeyError:
770 pass
771 else:
772 params_copy[key] = val
773 return params_copy
774
775
777 """Return the new version of an instance policy.
778
779 @param group_policy: whether this policy applies to a group and thus
780 we should support removal of policy entries
781
782 """
783 ipolicy = copy.deepcopy(old_ipolicy)
784 for key, value in new_ipolicy.items():
785 if key not in constants.IPOLICY_ALL_KEYS:
786 raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
787 errors.ECODE_INVAL)
788 if (not value or value == [constants.VALUE_DEFAULT] or
789 value == constants.VALUE_DEFAULT):
790 if group_policy:
791 if key in ipolicy:
792 del ipolicy[key]
793 else:
794 raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
795 " on the cluster'" % key,
796 errors.ECODE_INVAL)
797 else:
798 if key in constants.IPOLICY_PARAMETERS:
799
800 try:
801 ipolicy[key] = float(value)
802 except (TypeError, ValueError), err:
803 raise errors.OpPrereqError("Invalid value for attribute"
804 " '%s': '%s', error: %s" %
805 (key, value, err), errors.ECODE_INVAL)
806 elif key == constants.ISPECS_MINMAX:
807 for minmax in value:
808 for k in minmax.keys():
809 utils.ForceDictType(minmax[k], constants.ISPECS_PARAMETER_TYPES)
810 ipolicy[key] = value
811 elif key == constants.ISPECS_STD:
812 if group_policy:
813 msg = "%s cannot appear in group instance specs" % key
814 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
815 ipolicy[key] = GetUpdatedParams(old_ipolicy.get(key, {}), value,
816 use_none=False, use_default=False)
817 utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
818 else:
819
820
821 ipolicy[key] = list(value)
822 try:
823 objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
824 except errors.ConfigurationError, err:
825 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
826 errors.ECODE_INVAL)
827 return ipolicy
828
829
831 """Little helper wrapper to the rpc annotation method.
832
833 @param instance: The instance object
834 @type devs: List of L{objects.Disk}
835 @param devs: The root devices (not any of its children!)
836 @param cfg: The config object
837 @returns The annotated disk copies
838 @see L{ganeti.rpc.node.AnnotateDiskParams}
839
840 """
841 return rpc.AnnotateDiskParams(devs, cfg.GetInstanceDiskParams(instance))
842
843
845 """Tells if node supports OOB.
846
847 @type cfg: L{config.ConfigWriter}
848 @param cfg: The cluster configuration
849 @type node: L{objects.Node}
850 @param node: The node
851 @return: The OOB script if supported or an empty string otherwise
852
853 """
854 return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
855
856
858 """Updates and verifies a dict with sub dicts of the same type.
859
860 @param base: The dict with the old data
861 @param updates: The dict with the new data
862 @param type_check: Dict suitable to ForceDictType to verify correct types
863 @returns: A new dict with updated and verified values
864
865 """
866 def fn(old, value):
867 new = GetUpdatedParams(old, value)
868 utils.ForceDictType(new, type_check)
869 return new
870
871 ret = copy.deepcopy(base)
872 ret.update(dict((key, fn(base.get(key, {}), value))
873 for key, value in updates.items()))
874 return ret
875
876
878 """Filters out non-vm_capable nodes from a list.
879
880 @type lu: L{LogicalUnit}
881 @param lu: the logical unit for which we check
882 @type node_uuids: list
883 @param node_uuids: the list of nodes on which we should check
884 @rtype: list
885 @return: the list of vm-capable nodes
886
887 """
888 vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
889 return [uuid for uuid in node_uuids if uuid not in vm_nodes]
890
891
893 """Decides on which iallocator to use.
894
895 @type cfg: L{config.ConfigWriter}
896 @param cfg: Cluster configuration object
897 @type ialloc: string or None
898 @param ialloc: Iallocator specified in opcode
899 @rtype: string
900 @return: Iallocator name
901
902 """
903 if not ialloc:
904
905 ialloc = cfg.GetDefaultIAllocator()
906
907 if not ialloc:
908 raise errors.OpPrereqError("No iallocator was specified, neither in the"
909 " opcode nor as a cluster-wide default",
910 errors.ECODE_INVAL)
911
912 return ialloc
913
914
917 """Checks if node groups for locked instances are still correct.
918
919 @type cfg: L{config.ConfigWriter}
920 @param cfg: Cluster configuration
921 @type instances: dict; string as key, L{objects.Instance} as value
922 @param instances: Dictionary, instance UUID as key, instance object as value
923 @type owned_groups: iterable of string
924 @param owned_groups: List of owned groups
925 @type owned_node_uuids: iterable of string
926 @param owned_node_uuids: List of owned nodes
927 @type cur_group_uuid: string or None
928 @param cur_group_uuid: Optional group UUID to check against instance's groups
929
930 """
931 for (uuid, inst) in instances.items():
932 inst_nodes = cfg.GetInstanceNodes(inst.uuid)
933 assert owned_node_uuids.issuperset(inst_nodes), \
934 "Instance %s's nodes changed while we kept the lock" % inst.name
935
936 inst_groups = CheckInstanceNodeGroups(cfg, uuid, owned_groups)
937
938 assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
939 "Instance %s has no node in group %s" % (inst.name, cur_group_uuid)
940
941
943 """Checks if the owned node groups are still correct for an instance.
944
945 @type cfg: L{config.ConfigWriter}
946 @param cfg: The cluster configuration
947 @type inst_uuid: string
948 @param inst_uuid: Instance UUID
949 @type owned_groups: set or frozenset
950 @param owned_groups: List of currently owned node groups
951 @type primary_only: boolean
952 @param primary_only: Whether to check node groups for only the primary node
953
954 """
955 inst_groups = cfg.GetInstanceNodeGroups(inst_uuid, primary_only)
956
957 if not owned_groups.issuperset(inst_groups):
958 raise errors.OpPrereqError("Instance %s's node groups changed since"
959 " locks were acquired, current groups are"
960 " are '%s', owning groups '%s'; retry the"
961 " operation" %
962 (cfg.GetInstanceName(inst_uuid),
963 utils.CommaJoin(inst_groups),
964 utils.CommaJoin(owned_groups)),
965 errors.ECODE_STATE)
966
967 return inst_groups
968
969
971 """Unpacks the result of change-group and node-evacuate iallocator requests.
972
973 Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
974 L{constants.IALLOCATOR_MODE_CHG_GROUP}.
975
976 @type lu: L{LogicalUnit}
977 @param lu: Logical unit instance
978 @type alloc_result: tuple/list
979 @param alloc_result: Result from iallocator
980 @type early_release: bool
981 @param early_release: Whether to release locks early if possible
982 @type use_nodes: bool
983 @param use_nodes: Whether to display node names instead of groups
984
985 """
986 (moved, failed, jobs) = alloc_result
987
988 if failed:
989 failreason = utils.CommaJoin("%s (%s)" % (name, reason)
990 for (name, reason) in failed)
991 lu.LogWarning("Unable to evacuate instances %s", failreason)
992 raise errors.OpExecError("Unable to evacuate instances %s" % failreason)
993
994 if moved:
995 lu.LogInfo("Instances to be moved: %s",
996 utils.CommaJoin(
997 "%s (to %s)" %
998 (name, _NodeEvacDest(use_nodes, group, node_names))
999 for (name, group, node_names) in moved))
1000
1001 return [map(compat.partial(_SetOpEarlyRelease, early_release),
1002 map(opcodes.OpCode.LoadOpCode, ops))
1003 for ops in jobs]
1004
1005
1007 """Returns group or nodes depending on caller's choice.
1008
1009 """
1010 if use_nodes:
1011 return utils.CommaJoin(node_names)
1012 else:
1013 return group
1014
1015
1017 """Sets C{early_release} flag on opcodes if available.
1018
1019 """
1020 try:
1021 op.early_release = early_release
1022 except AttributeError:
1023 assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
1024
1025 return op
1026
1027
1029 """Creates a map from (node, volume) to instance name.
1030
1031 @type cfg: L{config.ConfigWriter}
1032 @param cfg: The cluster configuration
1033 @type instances: list of L{objects.Instance}
1034 @rtype: dict; tuple of (node uuid, volume name) as key, L{objects.Instance}
1035 object as value
1036
1037 """
1038 return dict(
1039 ((node_uuid, vol), inst)
1040 for inst in instances
1041 for (node_uuid, vols) in cfg.GetInstanceLVsByNode(inst.uuid).items()
1042 for vol in vols)
1043
1044
1046 """Make sure that none of the given paramters is global.
1047
1048 If a global parameter is found, an L{errors.OpPrereqError} exception is
1049 raised. This is used to avoid setting global parameters for individual nodes.
1050
1051 @type params: dictionary
1052 @param params: Parameters to check
1053 @type glob_pars: dictionary
1054 @param glob_pars: Forbidden parameters
1055 @type kind: string
1056 @param kind: Kind of parameters (e.g. "node")
1057 @type bad_levels: string
1058 @param bad_levels: Level(s) at which the parameters are forbidden (e.g.
1059 "instance")
1060 @type good_levels: strings
1061 @param good_levels: Level(s) at which the parameters are allowed (e.g.
1062 "cluster or group")
1063
1064 """
1065 used_globals = glob_pars.intersection(params)
1066 if used_globals:
1067 msg = ("The following %s parameters are global and cannot"
1068 " be customized at %s level, please modify them at"
1069 " %s level: %s" %
1070 (kind, bad_levels, good_levels, utils.CommaJoin(used_globals)))
1071 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1072
1073
1075 """Whether exclusive_storage is in effect for the given node.
1076
1077 @type cfg: L{config.ConfigWriter}
1078 @param cfg: The cluster configuration
1079 @type node: L{objects.Node}
1080 @param node: The node
1081 @rtype: bool
1082 @return: The effective value of exclusive_storage
1083
1084 """
1085 return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE]
1086
1087
1089 """Given an instance object, checks if the instance is running.
1090
1091 This function asks the backend whether the instance is running and
1092 user shutdown instances are considered not to be running.
1093
1094 @type lu: L{LogicalUnit}
1095 @param lu: LU on behalf of which we make the check
1096
1097 @type instance: L{objects.Instance}
1098 @param instance: instance to check whether it is running
1099
1100 @rtype: bool
1101 @return: 'True' if the instance is running, 'False' otherwise
1102
1103 """
1104 hvparams = lu.cfg.GetClusterInfo().FillHV(instance)
1105 result = lu.rpc.call_instance_info(instance.primary_node, instance.name,
1106 instance.hypervisor, hvparams)
1107
1108
1109 result.Raise("Can't retrieve instance information for instance '%s'" %
1110 instance.name, prereq=prereq, ecode=errors.ECODE_ENVIRON)
1111
1112 return result.payload and \
1113 "state" in result.payload and \
1114 (result.payload["state"] != hypervisor.hv_base.HvInstanceState.SHUTDOWN)
1115
1116
1118 """Ensure that an instance is in one of the required states.
1119
1120 @param lu: the LU on behalf of which we make the check
1121 @param instance: the instance to check
1122 @param msg: if passed, should be a message to replace the default one
1123 @raise errors.OpPrereqError: if the instance is not in the required state
1124
1125 """
1126 if msg is None:
1127 msg = ("can't use instance from outside %s states" %
1128 utils.CommaJoin(req_states))
1129 if instance.admin_state not in req_states:
1130 raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
1131 (instance.name, instance.admin_state, msg),
1132 errors.ECODE_STATE)
1133
1134 if constants.ADMINST_UP not in req_states:
1135 pnode_uuid = instance.primary_node
1136
1137 if not lu.cfg.GetNodeInfo(pnode_uuid).offline:
1138 if IsInstanceRunning(lu, instance):
1139 raise errors.OpPrereqError("Instance %s is running, %s" %
1140 (instance.name, msg), errors.ECODE_STATE)
1141 else:
1142 lu.LogWarning("Primary node offline, ignoring check that instance"
1143 " is down")
1144
1145
1147 """Check the sanity of iallocator and node arguments and use the
1148 cluster-wide iallocator if appropriate.
1149
1150 Check that at most one of (iallocator, node) is specified. If none is
1151 specified, or the iallocator is L{constants.DEFAULT_IALLOCATOR_SHORTCUT},
1152 then the LU's opcode's iallocator slot is filled with the cluster-wide
1153 default iallocator.
1154
1155 @type iallocator_slot: string
1156 @param iallocator_slot: the name of the opcode iallocator slot
1157 @type node_slot: string
1158 @param node_slot: the name of the opcode target node slot
1159
1160 """
1161 node = getattr(lu.op, node_slot, None)
1162 ialloc = getattr(lu.op, iallocator_slot, None)
1163 if node == []:
1164 node = None
1165
1166 if node is not None and ialloc is not None:
1167 raise errors.OpPrereqError("Do not specify both, iallocator and node",
1168 errors.ECODE_INVAL)
1169 elif ((node is None and ialloc is None) or
1170 ialloc == constants.DEFAULT_IALLOCATOR_SHORTCUT):
1171 default_iallocator = lu.cfg.GetDefaultIAllocator()
1172 if default_iallocator:
1173 setattr(lu.op, iallocator_slot, default_iallocator)
1174 else:
1175 raise errors.OpPrereqError("No iallocator or node given and no"
1176 " cluster-wide default iallocator found;"
1177 " please specify either an iallocator or a"
1178 " node, or set a cluster-wide default"
1179 " iallocator", errors.ECODE_INVAL)
1180
1181
1197
1198
1200 """Ensure that a given node is online.
1201
1202 @param lu: the LU on behalf of which we make the check
1203 @param node_uuid: the node to check
1204 @param msg: if passed, should be a message to replace the default one
1205 @raise errors.OpPrereqError: if the node is offline
1206
1207 """
1208 if msg is None:
1209 msg = "Can't use offline node"
1210 if lu.cfg.GetNodeInfo(node_uuid).offline:
1211 raise errors.OpPrereqError("%s: %s" % (msg, lu.cfg.GetNodeName(node_uuid)),
1212 errors.ECODE_STATE)
1213
1214
1216 """Helper function to check if a disk template is enabled.
1217
1218 @type cluster: C{objects.Cluster}
1219 @param cluster: the cluster's configuration
1220 @type disk_template: str
1221 @param disk_template: the disk template to be checked
1222
1223 """
1224 assert disk_template is not None
1225 if disk_template not in constants.DISK_TEMPLATES:
1226 raise errors.OpPrereqError("'%s' is not a valid disk template."
1227 " Valid disk templates are: %s" %
1228 (disk_template,
1229 ",".join(constants.DISK_TEMPLATES)))
1230 if not disk_template in cluster.enabled_disk_templates:
1231 raise errors.OpPrereqError("Disk template '%s' is not enabled in cluster."
1232 " Enabled disk templates are: %s" %
1233 (disk_template,
1234 ",".join(cluster.enabled_disk_templates)))
1235
1236
1238 """Helper function to check if a storage type is enabled.
1239
1240 @type cluster: C{objects.Cluster}
1241 @param cluster: the cluster's configuration
1242 @type storage_type: str
1243 @param storage_type: the storage type to be checked
1244
1245 """
1246 assert storage_type is not None
1247 assert storage_type in constants.STORAGE_TYPES
1248
1249
1250 if storage_type == constants.ST_LVM_PV:
1251 CheckStorageTypeEnabled(cluster, constants.ST_LVM_VG)
1252 else:
1253 possible_disk_templates = \
1254 utils.storage.GetDiskTemplatesOfStorageTypes(storage_type)
1255 for disk_template in possible_disk_templates:
1256 if disk_template in cluster.enabled_disk_templates:
1257 return
1258 raise errors.OpPrereqError("No disk template of storage type '%s' is"
1259 " enabled in this cluster. Enabled disk"
1260 " templates are: %s" % (storage_type,
1261 ",".join(cluster.enabled_disk_templates)))
1262
1263
1265 """Checks ipolicy disk templates against enabled disk tempaltes.
1266
1267 @type ipolicy: dict
1268 @param ipolicy: the new ipolicy
1269 @type enabled_disk_templates: list of string
1270 @param enabled_disk_templates: list of enabled disk templates on the
1271 cluster
1272 @raises errors.OpPrereqError: if there is at least one allowed disk
1273 template that is not also enabled.
1274
1275 """
1276 assert constants.IPOLICY_DTS in ipolicy
1277 allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
1278 not_enabled = set(allowed_disk_templates) - set(enabled_disk_templates)
1279 if not_enabled:
1280 raise errors.OpPrereqError("The following disk templates are allowed"
1281 " by the ipolicy, but not enabled on the"
1282 " cluster: %s" % utils.CommaJoin(not_enabled),
1283 errors.ECODE_INVAL)
1284
1285
1303
1304
1306 """Checks if the access param is consistent with the cluster configuration.
1307
1308 @note: requires a configuration lock to run.
1309 @param parameters: the parameters to validate
1310 @param cfg: the cfg object of the cluster
1311 @param group: if set, only check for consistency within this group.
1312 @raise errors.OpPrereqError: if the LU attempts to change the access parameter
1313 to an invalid value, such as "pink bunny".
1314 @raise errors.OpPrereqError: if the LU attempts to change the access parameter
1315 to an inconsistent value, such as asking for RBD
1316 userspace access to the chroot hypervisor.
1317
1318 """
1319 CheckDiskAccessModeValidity(parameters)
1320
1321 for disk_template in parameters:
1322 access = parameters[disk_template].get(constants.LDP_ACCESS,
1323 constants.DISK_KERNELSPACE)
1324
1325 if disk_template not in constants.DTS_HAVE_ACCESS:
1326 continue
1327
1328
1329
1330 inst_uuids = cfg.GetNodeGroupInstances(group) if group else \
1331 cfg.GetInstanceList()
1332
1333 for entry in inst_uuids:
1334 inst = cfg.GetInstanceInfo(entry)
1335 disks = cfg.GetInstanceDisks(entry)
1336 for disk in disks:
1337
1338 if disk.dev_type != disk_template:
1339 continue
1340
1341 hv = inst.hypervisor
1342
1343 if not IsValidDiskAccessModeCombination(hv, disk.dev_type, access):
1344 raise errors.OpPrereqError("Instance {i}: cannot use '{a}' access"
1345 " setting with {h} hypervisor and {d} disk"
1346 " type.".format(i=inst.name,
1347 a=access,
1348 h=hv,
1349 d=disk.dev_type))
1350
1351
1353 """Checks if an hypervisor can read a disk template with given mode.
1354
1355 @param hv: the hypervisor that will access the data
1356 @param disk_template: the disk template the data is stored as
1357 @param mode: how the hypervisor should access the data
1358 @return: True if the hypervisor can read a given read disk_template
1359 in the specified mode.
1360
1361 """
1362 if mode == constants.DISK_KERNELSPACE:
1363 return True
1364
1365 if (hv == constants.HT_KVM and
1366 disk_template in constants.DTS_HAVE_ACCESS and
1367 mode == constants.DISK_USERSPACE):
1368 return True
1369
1370
1371 return False
1372
1373
1395
1396
1398 """Removes the node's certificate from the candidate certificates list.
1399
1400 @type cfg: C{config.ConfigWriter}
1401 @param cfg: the cluster's configuration
1402 @type node_uuid: string
1403 @param node_uuid: the node's UUID
1404
1405 """
1406 cfg.RemoveNodeFromCandidateCerts(node_uuid)
1407
1408
1433
1434
1458
1459
1461 """Create an OpCode that connects a group to the instance
1462 communication network.
1463
1464 This OpCode contains the configuration necessary for the instance
1465 communication network.
1466
1467 @type group_uuid: string
1468 @param group_uuid: UUID of the group to connect
1469
1470 @type network: string
1471 @param network: name or UUID of the network to connect to, i.e., the
1472 instance communication network
1473
1474 @rtype: L{ganeti.opcodes.OpCode}
1475 @return: OpCode that connects the group to the instance
1476 communication network
1477
1478 """
1479 return opcodes.OpNetworkConnect(
1480 group_name=group_uuid,
1481 network_name=network,
1482 network_mode=constants.INSTANCE_COMMUNICATION_NETWORK_MODE,
1483 network_link=constants.INSTANCE_COMMUNICATION_NETWORK_LINK,
1484 conflicts_check=True)
1485
1486
1488 """Determines the size of the specified image.
1489
1490 @type image: string
1491 @param image: absolute filepath or URL of the image
1492
1493 @type node_uuid: string
1494 @param node_uuid: if L{image} is a filepath, this is the UUID of the
1495 node where the image is located
1496
1497 @rtype: int
1498 @return: size of the image in MB, rounded up
1499 @raise OpExecError: if the image does not exist
1500
1501 """
1502
1503 class _HeadRequest(urllib2.Request):
1504 def get_method(self):
1505 return "HEAD"
1506
1507 if utils.IsUrl(image):
1508 try:
1509 response = urllib2.urlopen(_HeadRequest(image))
1510 except urllib2.URLError:
1511 raise errors.OpExecError("Could not retrieve image from given url '%s'" %
1512 image)
1513
1514 content_length_str = response.info().getheader('content-length')
1515
1516 if not content_length_str:
1517 raise errors.OpExecError("Could not determine image size from given url"
1518 " '%s'" % image)
1519
1520 byte_size = int(content_length_str)
1521 else:
1522
1523 result = lu.rpc.call_get_file_info(node_uuid, image)
1524 result.Raise("Could not determine size of file '%s'" % image)
1525
1526 success, attributes = result.payload
1527 if not success:
1528 raise errors.OpExecError("Could not open file '%s'" % image)
1529 byte_size = attributes[constants.STAT_SIZE]
1530
1531
1532 return math.ceil(byte_size / 1024. / 1024.)
1533
1534
1536 """Ensure KVM daemon is running on nodes with KVM instances.
1537
1538 If user shutdown is enabled in the cluster:
1539 - The KVM daemon will be started on VM capable nodes containing
1540 KVM instances.
1541 - The KVM daemon will be stopped on non VM capable nodes.
1542
1543 If user shutdown is disabled in the cluster:
1544 - The KVM daemon will be stopped on all nodes
1545
1546 Issues a warning for each failed RPC call.
1547
1548 @type lu: L{LogicalUnit}
1549 @param lu: logical unit on whose behalf we execute
1550
1551 @type feedback_fn: callable
1552 @param feedback_fn: feedback function
1553
1554 @type nodes: list of string
1555 @param nodes: if supplied, it overrides the node uuids to start/stop;
1556 this is used mainly for optimization
1557
1558 """
1559 cluster = lu.cfg.GetClusterInfo()
1560
1561
1562 if nodes is not None:
1563 node_uuids = set(nodes)
1564 else:
1565 node_uuids = lu.cfg.GetNodeList()
1566
1567
1568 if constants.HT_KVM in cluster.enabled_hypervisors and \
1569 cluster.enabled_user_shutdown:
1570 start_nodes = []
1571 stop_nodes = []
1572
1573 for node_uuid in node_uuids:
1574 if lu.cfg.GetNodeInfo(node_uuid).vm_capable:
1575 start_nodes.append(node_uuid)
1576 else:
1577 stop_nodes.append(node_uuid)
1578 else:
1579 start_nodes = []
1580 stop_nodes = node_uuids
1581
1582
1583 if start_nodes:
1584 results = lu.rpc.call_node_ensure_daemon(start_nodes, constants.KVMD, True)
1585 for node_uuid in start_nodes:
1586 results[node_uuid].Warn("Failed to start KVM daemon in node '%s'" %
1587 node_uuid, feedback_fn)
1588
1589
1590 if stop_nodes:
1591 results = lu.rpc.call_node_ensure_daemon(stop_nodes, constants.KVMD, False)
1592 for node_uuid in stop_nodes:
1593 results[node_uuid].Warn("Failed to stop KVM daemon in node '%s'" %
1594 node_uuid, feedback_fn)
1595
1596
1598 node_errors = result[master_uuid].payload
1599 if node_errors:
1600 feedback_fn("Some nodes' SSH key files could not be updated:")
1601 for node_name, error_msg in node_errors:
1602 feedback_fn("%s: %s" % (node_name, error_msg))
1603