1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Common functions used by multiple logical units."""
32
33 import copy
34 import os
35
36 from ganeti import compat
37 from ganeti import constants
38 from ganeti import errors
39 from ganeti import hypervisor
40 from ganeti import locking
41 from ganeti import objects
42 from ganeti import opcodes
43 from ganeti import pathutils
44 from ganeti import rpc
45 from ganeti import ssconf
46 from ganeti import utils
47
48
49
50 INSTANCE_DOWN = [constants.ADMINST_DOWN]
51 INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
52 INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
53
54
55 CAN_CHANGE_INSTANCE_OFFLINE = (frozenset(INSTANCE_DOWN) | frozenset([
56 constants.ADMINST_OFFLINE,
57 ]))
58
59
61 """Expand an item name.
62
63 @param expand_fn: the function to use for expansion
64 @param name: requested item name
65 @param kind: text description ('Node' or 'Instance')
66 @return: the result of the expand_fn, if successful
67 @raise errors.OpPrereqError: if the item is not found
68
69 """
70 (uuid, full_name) = expand_fn(name)
71 if uuid is None or full_name is None:
72 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
73 errors.ECODE_NOENT)
74 return (uuid, full_name)
75
76
78 """Wrapper over L{_ExpandItemName} for instance."""
79 (uuid, full_name) = _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
80 if expected_uuid is not None and uuid != expected_uuid:
81 raise errors.OpPrereqError(
82 "The instances UUID '%s' does not match the expected UUID '%s' for"
83 " instance '%s'. Maybe the instance changed since you submitted this"
84 " job." % (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
85 return (uuid, full_name)
86
87
89 """Expand a short node name into the node UUID and full name.
90
91 @type cfg: L{config.ConfigWriter}
92 @param cfg: The cluster configuration
93 @type expected_uuid: string
94 @param expected_uuid: expected UUID for the node (or None if there is no
95 expectation). If it does not match, a L{errors.OpPrereqError} is
96 raised.
97 @type name: string
98 @param name: the short node name
99
100 """
101 (uuid, full_name) = _ExpandItemName(cfg.ExpandNodeName, name, "Node")
102 if expected_uuid is not None and uuid != expected_uuid:
103 raise errors.OpPrereqError(
104 "The nodes UUID '%s' does not match the expected UUID '%s' for node"
105 " '%s'. Maybe the node changed since you submitted this job." %
106 (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
107 return (uuid, full_name)
108
109
111 """Returns a dict declaring all lock levels shared.
112
113 """
114 return dict.fromkeys(locking.LEVELS, 1)
115
116
118 """Checks if the instances in a node group are still correct.
119
120 @type cfg: L{config.ConfigWriter}
121 @param cfg: The cluster configuration
122 @type group_uuid: string
123 @param group_uuid: Node group UUID
124 @type owned_instance_names: set or frozenset
125 @param owned_instance_names: List of currently owned instances
126
127 """
128 wanted_instances = frozenset(cfg.GetInstanceNames(
129 cfg.GetNodeGroupInstances(group_uuid)))
130 if owned_instance_names != wanted_instances:
131 raise errors.OpPrereqError("Instances in node group '%s' changed since"
132 " locks were acquired, wanted '%s', have '%s';"
133 " retry the operation" %
134 (group_uuid,
135 utils.CommaJoin(wanted_instances),
136 utils.CommaJoin(owned_instance_names)),
137 errors.ECODE_STATE)
138
139 return wanted_instances
140
141
143 """Returns list of checked and expanded node names.
144
145 @type lu: L{LogicalUnit}
146 @param lu: the logical unit on whose behalf we execute
147 @type short_node_names: list
148 @param short_node_names: list of node names or None for all nodes
149 @rtype: tuple of lists
150 @return: tupe with (list of node UUIDs, list of node names)
151 @raise errors.ProgrammerError: if the nodes parameter is wrong type
152
153 """
154 if short_node_names:
155 node_uuids = [ExpandNodeUuidAndName(lu.cfg, None, name)[0]
156 for name in short_node_names]
157 else:
158 node_uuids = lu.cfg.GetNodeList()
159
160 return (node_uuids, [lu.cfg.GetNodeName(uuid) for uuid in node_uuids])
161
162
164 """Returns list of checked and expanded instance names.
165
166 @type lu: L{LogicalUnit}
167 @param lu: the logical unit on whose behalf we execute
168 @type short_inst_names: list
169 @param short_inst_names: list of instance names or None for all instances
170 @rtype: tuple of lists
171 @return: tuple of (instance UUIDs, instance names)
172 @raise errors.OpPrereqError: if the instances parameter is wrong type
173 @raise errors.OpPrereqError: if any of the passed instances is not found
174
175 """
176 if short_inst_names:
177 inst_uuids = [ExpandInstanceUuidAndName(lu.cfg, None, name)[0]
178 for name in short_inst_names]
179 else:
180 inst_uuids = lu.cfg.GetInstanceList()
181 return (inst_uuids, [lu.cfg.GetInstanceName(uuid) for uuid in inst_uuids])
182
183
184 -def RunPostHook(lu, node_name):
185 """Runs the post-hook for an opcode on a single node.
186
187 """
188 hm = lu.proc.BuildHooksManager(lu)
189 try:
190 hm.RunPhase(constants.HOOKS_PHASE_POST, node_names=[node_name])
191 except Exception, err:
192 lu.LogWarning("Errors occurred running hooks on %s: %s",
193 node_name, err)
194
195
197 """Distribute additional files which are part of the cluster configuration.
198
199 ConfigWriter takes care of distributing the config and ssconf files, but
200 there are more files which should be distributed to all nodes. This function
201 makes sure those are copied.
202
203 """
204
205 cluster = lu.cfg.GetClusterInfo()
206 master_info = lu.cfg.GetMasterNodeInfo()
207
208 online_node_uuids = lu.cfg.GetOnlineNodeList()
209 online_node_uuid_set = frozenset(online_node_uuids)
210 vm_node_uuids = list(online_node_uuid_set.intersection(
211 lu.cfg.GetVmCapableNodeList()))
212
213
214 for node_uuids in [online_node_uuids, vm_node_uuids]:
215 if master_info.uuid in node_uuids:
216 node_uuids.remove(master_info.uuid)
217
218
219 (files_all, _, files_mc, files_vm) = \
220 ComputeAncillaryFiles(cluster, True)
221
222
223 assert not (pathutils.CLUSTER_CONF_FILE in files_all or
224 pathutils.CLUSTER_CONF_FILE in files_vm)
225 assert not files_mc, "Master candidates not handled in this function"
226
227 filemap = [
228 (online_node_uuids, files_all),
229 (vm_node_uuids, files_vm),
230 ]
231
232
233 for (node_uuids, files) in filemap:
234 for fname in files:
235 UploadHelper(lu, node_uuids, fname)
236
237
239 """Compute files external to Ganeti which need to be consistent.
240
241 @type redist: boolean
242 @param redist: Whether to include files which need to be redistributed
243
244 """
245
246 files_all = set([
247 pathutils.SSH_KNOWN_HOSTS_FILE,
248 pathutils.CONFD_HMAC_KEY,
249 pathutils.CLUSTER_DOMAIN_SECRET_FILE,
250 pathutils.SPICE_CERT_FILE,
251 pathutils.SPICE_CACERT_FILE,
252 pathutils.RAPI_USERS_FILE,
253 ])
254
255 if redist:
256
257 files_all.add(pathutils.RAPI_CERT_FILE)
258 else:
259 files_all.update(pathutils.ALL_CERT_FILES)
260 files_all.update(ssconf.SimpleStore().GetFileList())
261
262 if cluster.modify_etc_hosts:
263 files_all.add(pathutils.ETC_HOSTS)
264
265 if cluster.use_external_mip_script:
266 files_all.add(pathutils.EXTERNAL_MASTER_SETUP_SCRIPT)
267
268
269
270
271 files_opt = set([
272 pathutils.RAPI_USERS_FILE,
273 ])
274
275
276 files_mc = set()
277
278 if not redist:
279 files_mc.add(pathutils.CLUSTER_CONF_FILE)
280
281
282 if (not redist and (cluster.IsFileStorageEnabled() or
283 cluster.IsSharedFileStorageEnabled())):
284 files_all.add(pathutils.FILE_STORAGE_PATHS_FILE)
285 files_opt.add(pathutils.FILE_STORAGE_PATHS_FILE)
286
287
288 files_vm = set(
289 filename
290 for hv_name in cluster.enabled_hypervisors
291 for filename in
292 hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[0])
293
294 files_opt |= set(
295 filename
296 for hv_name in cluster.enabled_hypervisors
297 for filename in
298 hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[1])
299
300
301 all_files_set = files_all | files_mc | files_vm
302 assert (len(all_files_set) ==
303 sum(map(len, [files_all, files_mc, files_vm]))), \
304 "Found file listed in more than one file list"
305
306
307 assert all_files_set.issuperset(files_opt), \
308 "Optional file not in a different required list"
309
310
311 assert not (redist and
312 pathutils.FILE_STORAGE_PATHS_FILE in all_files_set)
313
314 return (files_all, files_opt, files_mc, files_vm)
315
316
318 """Helper for uploading a file and showing warnings.
319
320 """
321 if os.path.exists(fname):
322 result = lu.rpc.call_upload_file(node_uuids, fname)
323 for to_node_uuids, to_result in result.items():
324 msg = to_result.fail_msg
325 if msg:
326 msg = ("Copy of file %s to node %s failed: %s" %
327 (fname, lu.cfg.GetNodeName(to_node_uuids), msg))
328 lu.LogWarning(msg)
329
330
332 """Combines the hv state from an opcode with the one of the object
333
334 @param op_input: The input dict from the opcode
335 @param obj_input: The input dict from the objects
336 @return: The verified and updated dict
337
338 """
339 if op_input:
340 invalid_hvs = set(op_input) - constants.HYPER_TYPES
341 if invalid_hvs:
342 raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:"
343 " %s" % utils.CommaJoin(invalid_hvs),
344 errors.ECODE_INVAL)
345 if obj_input is None:
346 obj_input = {}
347 type_check = constants.HVSTS_PARAMETER_TYPES
348 return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
349
350 return None
351
352
354 """Combines the disk state from an opcode with the one of the object
355
356 @param op_input: The input dict from the opcode
357 @param obj_input: The input dict from the objects
358 @return: The verified and updated dict
359 """
360 if op_input:
361 invalid_dst = set(op_input) - constants.DS_VALID_TYPES
362 if invalid_dst:
363 raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" %
364 utils.CommaJoin(invalid_dst),
365 errors.ECODE_INVAL)
366 type_check = constants.DSS_PARAMETER_TYPES
367 if obj_input is None:
368 obj_input = {}
369 return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value,
370 type_check))
371 for key, value in op_input.items())
372
373 return None
374
375
377 """OS parameters validation.
378
379 @type lu: L{LogicalUnit}
380 @param lu: the logical unit for which we check
381 @type required: boolean
382 @param required: whether the validation should fail if the OS is not
383 found
384 @type node_uuids: list
385 @param node_uuids: the list of nodes on which we should check
386 @type osname: string
387 @param osname: the name of the hypervisor we should use
388 @type osparams: dict
389 @param osparams: the parameters which we need to check
390 @raise errors.OpPrereqError: if the parameters are not valid
391
392 """
393 node_uuids = _FilterVmNodes(lu, node_uuids)
394 result = lu.rpc.call_os_validate(node_uuids, required, osname,
395 [constants.OS_VALIDATE_PARAMETERS],
396 osparams)
397 for node_uuid, nres in result.items():
398
399
400 nres.Raise("OS Parameters validation failed on node %s" %
401 lu.cfg.GetNodeName(node_uuid))
402 if not nres.payload:
403 lu.LogInfo("OS %s not found on node %s, validation skipped",
404 osname, lu.cfg.GetNodeName(node_uuid))
405
406
408 """Hypervisor parameter validation.
409
410 This function abstract the hypervisor parameter validation to be
411 used in both instance create and instance modify.
412
413 @type lu: L{LogicalUnit}
414 @param lu: the logical unit for which we check
415 @type node_uuids: list
416 @param node_uuids: the list of nodes on which we should check
417 @type hvname: string
418 @param hvname: the name of the hypervisor we should use
419 @type hvparams: dict
420 @param hvparams: the parameters which we need to check
421 @raise errors.OpPrereqError: if the parameters are not valid
422
423 """
424 node_uuids = _FilterVmNodes(lu, node_uuids)
425
426 cluster = lu.cfg.GetClusterInfo()
427 hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
428
429 hvinfo = lu.rpc.call_hypervisor_validate_params(node_uuids, hvname, hvfull)
430 for node_uuid in node_uuids:
431 info = hvinfo[node_uuid]
432 if info.offline:
433 continue
434 info.Raise("Hypervisor parameter validation failed on node %s" %
435 lu.cfg.GetNodeName(node_uuid))
436
437
439 """Adjust the candidate pool after node operations.
440
441 """
442 mod_list = lu.cfg.MaintainCandidatePool(exceptions)
443 if mod_list:
444 lu.LogInfo("Promoted nodes to master candidate role: %s",
445 utils.CommaJoin(node.name for node in mod_list))
446 for node in mod_list:
447 lu.context.ReaddNode(node)
448 mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
449 if mc_now > mc_max:
450 lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
451 (mc_now, mc_max))
452
453
455 """Check node PVs.
456
457 """
458 pvlist_dict = nresult.get(constants.NV_PVLIST, None)
459 if pvlist_dict is None:
460 return (["Can't get PV list from node"], None)
461 pvlist = map(objects.LvmPvInfo.FromDict, pvlist_dict)
462 errlist = []
463
464
465
466 for pv in pvlist:
467 if ":" in pv.name:
468 errlist.append("Invalid character ':' in PV '%s' of VG '%s'" %
469 (pv.name, pv.vg_name))
470 es_pvinfo = None
471 if exclusive_storage:
472 (errmsgs, es_pvinfo) = utils.LvmExclusiveCheckNodePvs(pvlist)
473 errlist.extend(errmsgs)
474 shared_pvs = nresult.get(constants.NV_EXCLUSIVEPVS, None)
475 if shared_pvs:
476 for (pvname, lvlist) in shared_pvs:
477
478 errlist.append("PV %s is shared among unrelated LVs (%s)" %
479 (pvname, utils.CommaJoin(lvlist)))
480 return (errlist, es_pvinfo)
481
482
484 """Computes if value is in the desired range.
485
486 @param name: name of the parameter for which we perform the check
487 @param qualifier: a qualifier used in the error message (e.g. 'disk/1',
488 not just 'disk')
489 @param ispecs: dictionary containing min and max values
490 @param value: actual value that we want to use
491 @return: None or an error string
492
493 """
494 if value in [None, constants.VALUE_AUTO]:
495 return None
496 max_v = ispecs[constants.ISPECS_MAX].get(name, value)
497 min_v = ispecs[constants.ISPECS_MIN].get(name, value)
498 if value > max_v or min_v > value:
499 if qualifier:
500 fqn = "%s/%s" % (name, qualifier)
501 else:
502 fqn = name
503 return ("%s value %s is not in range [%s, %s]" %
504 (fqn, value, min_v, max_v))
505 return None
506
507
512 """Verifies ipolicy against provided specs.
513
514 @type ipolicy: dict
515 @param ipolicy: The ipolicy
516 @type mem_size: int
517 @param mem_size: The memory size
518 @type cpu_count: int
519 @param cpu_count: Used cpu cores
520 @type disk_count: int
521 @param disk_count: Number of disks used
522 @type nic_count: int
523 @param nic_count: Number of nics used
524 @type disk_sizes: list of ints
525 @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
526 @type spindle_use: int
527 @param spindle_use: The number of spindles this instance uses
528 @type disk_template: string
529 @param disk_template: The disk template of the instance
530 @param _compute_fn: The compute function (unittest only)
531 @return: A list of violations, or an empty list of no violations are found
532
533 """
534 assert disk_count == len(disk_sizes)
535
536 test_settings = [
537 (constants.ISPEC_MEM_SIZE, "", mem_size),
538 (constants.ISPEC_CPU_COUNT, "", cpu_count),
539 (constants.ISPEC_NIC_COUNT, "", nic_count),
540 (constants.ISPEC_SPINDLE_USE, "", spindle_use),
541 ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
542 for idx, d in enumerate(disk_sizes)]
543 if disk_template != constants.DT_DISKLESS:
544
545 test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
546 ret = []
547 allowed_dts = ipolicy[constants.IPOLICY_DTS]
548 if disk_template not in allowed_dts:
549 ret.append("Disk template %s is not allowed (allowed templates: %s)" %
550 (disk_template, utils.CommaJoin(allowed_dts)))
551
552 min_errs = None
553 for minmax in ipolicy[constants.ISPECS_MINMAX]:
554 errs = filter(None,
555 (_compute_fn(name, qualifier, minmax, value)
556 for (name, qualifier, value) in test_settings))
557 if min_errs is None or len(errs) < len(min_errs):
558 min_errs = errs
559 assert min_errs is not None
560 return ret + min_errs
561
562
565 """Compute if instance meets the specs of ipolicy.
566
567 @type ipolicy: dict
568 @param ipolicy: The ipolicy to verify against
569 @type instance: L{objects.Instance}
570 @param instance: The instance to verify
571 @type cfg: L{config.ConfigWriter}
572 @param cfg: Cluster configuration
573 @param _compute_fn: The function to verify ipolicy (unittest only)
574 @see: L{ComputeIPolicySpecViolation}
575
576 """
577 ret = []
578 be_full = cfg.GetClusterInfo().FillBE(instance)
579 mem_size = be_full[constants.BE_MAXMEM]
580 cpu_count = be_full[constants.BE_VCPUS]
581 es_flags = rpc.GetExclusiveStorageForNodes(cfg, instance.all_nodes)
582 if any(es_flags.values()):
583
584 try:
585 spindle_use = sum([disk.spindles for disk in instance.disks])
586 except TypeError:
587 ret.append("Number of spindles not configured for disks of instance %s"
588 " while exclusive storage is enabled, try running gnt-cluster"
589 " repair-disk-sizes" % instance.name)
590
591 spindle_use = None
592 else:
593 spindle_use = be_full[constants.BE_SPINDLE_USE]
594 disk_count = len(instance.disks)
595 disk_sizes = [disk.size for disk in instance.disks]
596 nic_count = len(instance.nics)
597 disk_template = instance.disk_template
598
599 return ret + _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
600 disk_sizes, spindle_use, disk_template)
601
602
604 """Computes a set of instances who violates given ipolicy.
605
606 @param ipolicy: The ipolicy to verify
607 @type instances: L{objects.Instance}
608 @param instances: List of instances to verify
609 @type cfg: L{config.ConfigWriter}
610 @param cfg: Cluster configuration
611 @return: A frozenset of instance names violating the ipolicy
612
613 """
614 return frozenset([inst.name for inst in instances
615 if ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
616
617
619 """Computes a set of any instances that would violate the new ipolicy.
620
621 @param old_ipolicy: The current (still in-place) ipolicy
622 @param new_ipolicy: The new (to become) ipolicy
623 @param instances: List of instances to verify
624 @type cfg: L{config.ConfigWriter}
625 @param cfg: Cluster configuration
626 @return: A list of instances which violates the new ipolicy but
627 did not before
628
629 """
630 return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) -
631 _ComputeViolatingInstances(old_ipolicy, instances, cfg))
632
633
634 -def GetUpdatedParams(old_params, update_dict,
635 use_default=True, use_none=False):
636 """Return the new version of a parameter dictionary.
637
638 @type old_params: dict
639 @param old_params: old parameters
640 @type update_dict: dict
641 @param update_dict: dict containing new parameter values, or
642 constants.VALUE_DEFAULT to reset the parameter to its default
643 value
644 @param use_default: boolean
645 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
646 values as 'to be deleted' values
647 @param use_none: boolean
648 @type use_none: whether to recognise C{None} values as 'to be
649 deleted' values
650 @rtype: dict
651 @return: the new parameter dictionary
652
653 """
654 params_copy = copy.deepcopy(old_params)
655 for key, val in update_dict.iteritems():
656 if ((use_default and val == constants.VALUE_DEFAULT) or
657 (use_none and val is None)):
658 try:
659 del params_copy[key]
660 except KeyError:
661 pass
662 else:
663 params_copy[key] = val
664 return params_copy
665
666
668 """Return the new version of an instance policy.
669
670 @param group_policy: whether this policy applies to a group and thus
671 we should support removal of policy entries
672
673 """
674 ipolicy = copy.deepcopy(old_ipolicy)
675 for key, value in new_ipolicy.items():
676 if key not in constants.IPOLICY_ALL_KEYS:
677 raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
678 errors.ECODE_INVAL)
679 if (not value or value == [constants.VALUE_DEFAULT] or
680 value == constants.VALUE_DEFAULT):
681 if group_policy:
682 if key in ipolicy:
683 del ipolicy[key]
684 else:
685 raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
686 " on the cluster'" % key,
687 errors.ECODE_INVAL)
688 else:
689 if key in constants.IPOLICY_PARAMETERS:
690
691 try:
692 ipolicy[key] = float(value)
693 except (TypeError, ValueError), err:
694 raise errors.OpPrereqError("Invalid value for attribute"
695 " '%s': '%s', error: %s" %
696 (key, value, err), errors.ECODE_INVAL)
697 elif key == constants.ISPECS_MINMAX:
698 for minmax in value:
699 for k in minmax.keys():
700 utils.ForceDictType(minmax[k], constants.ISPECS_PARAMETER_TYPES)
701 ipolicy[key] = value
702 elif key == constants.ISPECS_STD:
703 if group_policy:
704 msg = "%s cannot appear in group instance specs" % key
705 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
706 ipolicy[key] = GetUpdatedParams(old_ipolicy.get(key, {}), value,
707 use_none=False, use_default=False)
708 utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
709 else:
710
711
712 ipolicy[key] = list(value)
713 try:
714 objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
715 except errors.ConfigurationError, err:
716 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
717 errors.ECODE_INVAL)
718 return ipolicy
719
720
722 """Little helper wrapper to the rpc annotation method.
723
724 @param instance: The instance object
725 @type devs: List of L{objects.Disk}
726 @param devs: The root devices (not any of its children!)
727 @param cfg: The config object
728 @returns The annotated disk copies
729 @see L{rpc.AnnotateDiskParams}
730
731 """
732 return rpc.AnnotateDiskParams(devs, cfg.GetInstanceDiskParams(instance))
733
734
736 """Tells if node supports OOB.
737
738 @type cfg: L{config.ConfigWriter}
739 @param cfg: The cluster configuration
740 @type node: L{objects.Node}
741 @param node: The node
742 @return: The OOB script if supported or an empty string otherwise
743
744 """
745 return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
746
747
749 """Updates and verifies a dict with sub dicts of the same type.
750
751 @param base: The dict with the old data
752 @param updates: The dict with the new data
753 @param type_check: Dict suitable to ForceDictType to verify correct types
754 @returns: A new dict with updated and verified values
755
756 """
757 def fn(old, value):
758 new = GetUpdatedParams(old, value)
759 utils.ForceDictType(new, type_check)
760 return new
761
762 ret = copy.deepcopy(base)
763 ret.update(dict((key, fn(base.get(key, {}), value))
764 for key, value in updates.items()))
765 return ret
766
767
769 """Filters out non-vm_capable nodes from a list.
770
771 @type lu: L{LogicalUnit}
772 @param lu: the logical unit for which we check
773 @type node_uuids: list
774 @param node_uuids: the list of nodes on which we should check
775 @rtype: list
776 @return: the list of vm-capable nodes
777
778 """
779 vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
780 return [uuid for uuid in node_uuids if uuid not in vm_nodes]
781
782
784 """Decides on which iallocator to use.
785
786 @type cfg: L{config.ConfigWriter}
787 @param cfg: Cluster configuration object
788 @type ialloc: string or None
789 @param ialloc: Iallocator specified in opcode
790 @rtype: string
791 @return: Iallocator name
792
793 """
794 if not ialloc:
795
796 ialloc = cfg.GetDefaultIAllocator()
797
798 if not ialloc:
799 raise errors.OpPrereqError("No iallocator was specified, neither in the"
800 " opcode nor as a cluster-wide default",
801 errors.ECODE_INVAL)
802
803 return ialloc
804
805
808 """Checks if node groups for locked instances are still correct.
809
810 @type cfg: L{config.ConfigWriter}
811 @param cfg: Cluster configuration
812 @type instances: dict; string as key, L{objects.Instance} as value
813 @param instances: Dictionary, instance UUID as key, instance object as value
814 @type owned_groups: iterable of string
815 @param owned_groups: List of owned groups
816 @type owned_node_uuids: iterable of string
817 @param owned_node_uuids: List of owned nodes
818 @type cur_group_uuid: string or None
819 @param cur_group_uuid: Optional group UUID to check against instance's groups
820
821 """
822 for (uuid, inst) in instances.items():
823 assert owned_node_uuids.issuperset(inst.all_nodes), \
824 "Instance %s's nodes changed while we kept the lock" % inst.name
825
826 inst_groups = CheckInstanceNodeGroups(cfg, uuid, owned_groups)
827
828 assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
829 "Instance %s has no node in group %s" % (inst.name, cur_group_uuid)
830
831
833 """Checks if the owned node groups are still correct for an instance.
834
835 @type cfg: L{config.ConfigWriter}
836 @param cfg: The cluster configuration
837 @type inst_uuid: string
838 @param inst_uuid: Instance UUID
839 @type owned_groups: set or frozenset
840 @param owned_groups: List of currently owned node groups
841 @type primary_only: boolean
842 @param primary_only: Whether to check node groups for only the primary node
843
844 """
845 inst_groups = cfg.GetInstanceNodeGroups(inst_uuid, primary_only)
846
847 if not owned_groups.issuperset(inst_groups):
848 raise errors.OpPrereqError("Instance %s's node groups changed since"
849 " locks were acquired, current groups are"
850 " are '%s', owning groups '%s'; retry the"
851 " operation" %
852 (cfg.GetInstanceName(inst_uuid),
853 utils.CommaJoin(inst_groups),
854 utils.CommaJoin(owned_groups)),
855 errors.ECODE_STATE)
856
857 return inst_groups
858
859
861 """Unpacks the result of change-group and node-evacuate iallocator requests.
862
863 Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
864 L{constants.IALLOCATOR_MODE_CHG_GROUP}.
865
866 @type lu: L{LogicalUnit}
867 @param lu: Logical unit instance
868 @type alloc_result: tuple/list
869 @param alloc_result: Result from iallocator
870 @type early_release: bool
871 @param early_release: Whether to release locks early if possible
872 @type use_nodes: bool
873 @param use_nodes: Whether to display node names instead of groups
874
875 """
876 (moved, failed, jobs) = alloc_result
877
878 if failed:
879 failreason = utils.CommaJoin("%s (%s)" % (name, reason)
880 for (name, reason) in failed)
881 lu.LogWarning("Unable to evacuate instances %s", failreason)
882 raise errors.OpExecError("Unable to evacuate instances %s" % failreason)
883
884 if moved:
885 lu.LogInfo("Instances to be moved: %s",
886 utils.CommaJoin(
887 "%s (to %s)" %
888 (name, _NodeEvacDest(use_nodes, group, node_names))
889 for (name, group, node_names) in moved))
890
891 return [map(compat.partial(_SetOpEarlyRelease, early_release),
892 map(opcodes.OpCode.LoadOpCode, ops))
893 for ops in jobs]
894
895
897 """Returns group or nodes depending on caller's choice.
898
899 """
900 if use_nodes:
901 return utils.CommaJoin(node_names)
902 else:
903 return group
904
905
907 """Sets C{early_release} flag on opcodes if available.
908
909 """
910 try:
911 op.early_release = early_release
912 except AttributeError:
913 assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
914
915 return op
916
917
919 """Creates a map from (node, volume) to instance name.
920
921 @type instances: list of L{objects.Instance}
922 @rtype: dict; tuple of (node uuid, volume name) as key, L{objects.Instance}
923 object as value
924
925 """
926 return dict(((node_uuid, vol), inst)
927 for inst in instances
928 for (node_uuid, vols) in inst.MapLVsByNode().items()
929 for vol in vols)
930
931
933 """Make sure that none of the given paramters is global.
934
935 If a global parameter is found, an L{errors.OpPrereqError} exception is
936 raised. This is used to avoid setting global parameters for individual nodes.
937
938 @type params: dictionary
939 @param params: Parameters to check
940 @type glob_pars: dictionary
941 @param glob_pars: Forbidden parameters
942 @type kind: string
943 @param kind: Kind of parameters (e.g. "node")
944 @type bad_levels: string
945 @param bad_levels: Level(s) at which the parameters are forbidden (e.g.
946 "instance")
947 @type good_levels: strings
948 @param good_levels: Level(s) at which the parameters are allowed (e.g.
949 "cluster or group")
950
951 """
952 used_globals = glob_pars.intersection(params)
953 if used_globals:
954 msg = ("The following %s parameters are global and cannot"
955 " be customized at %s level, please modify them at"
956 " %s level: %s" %
957 (kind, bad_levels, good_levels, utils.CommaJoin(used_globals)))
958 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
959
960
962 """Whether exclusive_storage is in effect for the given node.
963
964 @type cfg: L{config.ConfigWriter}
965 @param cfg: The cluster configuration
966 @type node: L{objects.Node}
967 @param node: The node
968 @rtype: bool
969 @return: The effective value of exclusive_storage
970
971 """
972 return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE]
973
974
976 """Ensure that an instance is in one of the required states.
977
978 @param lu: the LU on behalf of which we make the check
979 @param instance: the instance to check
980 @param msg: if passed, should be a message to replace the default one
981 @raise errors.OpPrereqError: if the instance is not in the required state
982
983 """
984 if msg is None:
985 msg = ("can't use instance from outside %s states" %
986 utils.CommaJoin(req_states))
987 if instance.admin_state not in req_states:
988 raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
989 (instance.name, instance.admin_state, msg),
990 errors.ECODE_STATE)
991
992 if constants.ADMINST_UP not in req_states:
993 pnode_uuid = instance.primary_node
994 if not lu.cfg.GetNodeInfo(pnode_uuid).offline:
995 all_hvparams = lu.cfg.GetClusterInfo().hvparams
996 ins_l = lu.rpc.call_instance_list(
997 [pnode_uuid], [instance.hypervisor], all_hvparams)[pnode_uuid]
998 ins_l.Raise("Can't contact node %s for instance information" %
999 lu.cfg.GetNodeName(pnode_uuid),
1000 prereq=True, ecode=errors.ECODE_ENVIRON)
1001 if instance.name in ins_l.payload:
1002 raise errors.OpPrereqError("Instance %s is running, %s" %
1003 (instance.name, msg), errors.ECODE_STATE)
1004 else:
1005 lu.LogWarning("Primary node offline, ignoring check that instance"
1006 " is down")
1007
1008
1010 """Check the sanity of iallocator and node arguments and use the
1011 cluster-wide iallocator if appropriate.
1012
1013 Check that at most one of (iallocator, node) is specified. If none is
1014 specified, or the iallocator is L{constants.DEFAULT_IALLOCATOR_SHORTCUT},
1015 then the LU's opcode's iallocator slot is filled with the cluster-wide
1016 default iallocator.
1017
1018 @type iallocator_slot: string
1019 @param iallocator_slot: the name of the opcode iallocator slot
1020 @type node_slot: string
1021 @param node_slot: the name of the opcode target node slot
1022
1023 """
1024 node = getattr(lu.op, node_slot, None)
1025 ialloc = getattr(lu.op, iallocator_slot, None)
1026 if node == []:
1027 node = None
1028
1029 if node is not None and ialloc is not None:
1030 raise errors.OpPrereqError("Do not specify both, iallocator and node",
1031 errors.ECODE_INVAL)
1032 elif ((node is None and ialloc is None) or
1033 ialloc == constants.DEFAULT_IALLOCATOR_SHORTCUT):
1034 default_iallocator = lu.cfg.GetDefaultIAllocator()
1035 if default_iallocator:
1036 setattr(lu.op, iallocator_slot, default_iallocator)
1037 else:
1038 raise errors.OpPrereqError("No iallocator or node given and no"
1039 " cluster-wide default iallocator found;"
1040 " please specify either an iallocator or a"
1041 " node, or set a cluster-wide default"
1042 " iallocator", errors.ECODE_INVAL)
1043
1044
1059
1060
1062 """Ensure that a given node is online.
1063
1064 @param lu: the LU on behalf of which we make the check
1065 @param node_uuid: the node to check
1066 @param msg: if passed, should be a message to replace the default one
1067 @raise errors.OpPrereqError: if the node is offline
1068
1069 """
1070 if msg is None:
1071 msg = "Can't use offline node"
1072 if lu.cfg.GetNodeInfo(node_uuid).offline:
1073 raise errors.OpPrereqError("%s: %s" % (msg, lu.cfg.GetNodeName(node_uuid)),
1074 errors.ECODE_STATE)
1075
1076
1078 """Helper function to check if a disk template is enabled.
1079
1080 @type cluster: C{objects.Cluster}
1081 @param cluster: the cluster's configuration
1082 @type disk_template: str
1083 @param disk_template: the disk template to be checked
1084
1085 """
1086 assert disk_template is not None
1087 if disk_template not in constants.DISK_TEMPLATES:
1088 raise errors.OpPrereqError("'%s' is not a valid disk template."
1089 " Valid disk templates are: %s" %
1090 (disk_template,
1091 ",".join(constants.DISK_TEMPLATES)))
1092 if not disk_template in cluster.enabled_disk_templates:
1093 raise errors.OpPrereqError("Disk template '%s' is not enabled in cluster."
1094 " Enabled disk templates are: %s" %
1095 (disk_template,
1096 ",".join(cluster.enabled_disk_templates)))
1097
1098
1100 """Helper function to check if a storage type is enabled.
1101
1102 @type cluster: C{objects.Cluster}
1103 @param cluster: the cluster's configuration
1104 @type storage_type: str
1105 @param storage_type: the storage type to be checked
1106
1107 """
1108 assert storage_type is not None
1109 assert storage_type in constants.STORAGE_TYPES
1110
1111
1112 if storage_type == constants.ST_LVM_PV:
1113 CheckStorageTypeEnabled(cluster, constants.ST_LVM_VG)
1114 else:
1115 possible_disk_templates = \
1116 utils.storage.GetDiskTemplatesOfStorageType(storage_type)
1117 for disk_template in possible_disk_templates:
1118 if disk_template in cluster.enabled_disk_templates:
1119 return
1120 raise errors.OpPrereqError("No disk template of storage type '%s' is"
1121 " enabled in this cluster. Enabled disk"
1122 " templates are: %s" % (storage_type,
1123 ",".join(cluster.enabled_disk_templates)))
1124
1125
1127 """Checks ipolicy disk templates against enabled disk tempaltes.
1128
1129 @type ipolicy: dict
1130 @param ipolicy: the new ipolicy
1131 @type enabled_disk_templates: list of string
1132 @param enabled_disk_templates: list of enabled disk templates on the
1133 cluster
1134 @raises errors.OpPrereqError: if there is at least one allowed disk
1135 template that is not also enabled.
1136
1137 """
1138 assert constants.IPOLICY_DTS in ipolicy
1139 allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
1140 not_enabled = set(allowed_disk_templates) - set(enabled_disk_templates)
1141 if not_enabled:
1142 raise errors.OpPrereqError("The following disk template are allowed"
1143 " by the ipolicy, but not enabled on the"
1144 " cluster: %s" % utils.CommaJoin(not_enabled))
1145
1146
1164
1165
1167 """Checks if the access param is consistent with the cluster configuration.
1168
1169 @note: requires a configuration lock to run.
1170 @param parameters: the parameters to validate
1171 @param cfg: the cfg object of the cluster
1172 @param group: if set, only check for consistency within this group.
1173 @raise errors.OpPrereqError: if the LU attempts to change the access parameter
1174 to an invalid value, such as "pink bunny".
1175 @raise errors.OpPrereqError: if the LU attempts to change the access parameter
1176 to an inconsistent value, such as asking for RBD
1177 userspace access to the chroot hypervisor.
1178
1179 """
1180 CheckDiskAccessModeValidity(parameters)
1181
1182 if constants.DT_RBD in parameters:
1183 access = parameters[constants.DT_RBD].get(constants.RBD_ACCESS,
1184 constants.DISK_KERNELSPACE)
1185
1186
1187
1188 inst_uuids = cfg.GetNodeGroupInstances(group) if group else \
1189 cfg.GetInstanceList()
1190
1191 for entry in inst_uuids:
1192
1193 inst = cfg.GetInstanceInfo(entry)
1194 hv = inst.hypervisor
1195 dt = inst.disk_template
1196
1197
1198 if dt != constants.DT_RBD:
1199 continue
1200
1201 if not IsValidDiskAccessModeCombination(hv, dt, access):
1202 raise errors.OpPrereqError("Instance {i}: cannot use '{a}' access"
1203 " setting with {h} hypervisor and {d} disk"
1204 " type.".format(i=inst.name,
1205 a=access,
1206 h=hv,
1207 d=dt))
1208
1209
1211 """Checks if an hypervisor can read a disk template with given mode.
1212
1213 @param hv: the hypervisor that will access the data
1214 @param disk_template: the disk template the data is stored as
1215 @param mode: how the hypervisor should access the data
1216 @return: True if the hypervisor can read a given read disk_template
1217 in the specified mode.
1218
1219 """
1220 if mode == constants.DISK_KERNELSPACE:
1221 return True
1222
1223 if (hv == constants.HT_KVM and
1224 disk_template == constants.DT_RBD and
1225 mode == constants.DISK_USERSPACE):
1226 return True
1227
1228
1229 return False
1230