1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Common functions used by multiple logical units."""
32
33 import copy
34 import math
35 import os
36 import urllib2
37
38 from ganeti import compat
39 from ganeti import constants
40 from ganeti import errors
41 from ganeti import hypervisor
42 from ganeti import locking
43 from ganeti import objects
44 from ganeti import opcodes
45 from ganeti import pathutils
46 import ganeti.rpc.node as rpc
47 from ganeti.serializer import Private
48 from ganeti import ssconf
49 from ganeti import utils
50
51
52
53 INSTANCE_DOWN = [constants.ADMINST_DOWN]
54 INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
55 INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
56
57
59 """Expand an item name.
60
61 @param expand_fn: the function to use for expansion
62 @param name: requested item name
63 @param kind: text description ('Node' or 'Instance')
64 @return: the result of the expand_fn, if successful
65 @raise errors.OpPrereqError: if the item is not found
66
67 """
68 (uuid, full_name) = expand_fn(name)
69 if uuid is None or full_name is None:
70 raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
71 errors.ECODE_NOENT)
72 return (uuid, full_name)
73
74
76 """Wrapper over L{_ExpandItemName} for instance."""
77 (uuid, full_name) = _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
78 if expected_uuid is not None and uuid != expected_uuid:
79 raise errors.OpPrereqError(
80 "The instances UUID '%s' does not match the expected UUID '%s' for"
81 " instance '%s'. Maybe the instance changed since you submitted this"
82 " job." % (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
83 return (uuid, full_name)
84
85
87 """Expand a short node name into the node UUID and full name.
88
89 @type cfg: L{config.ConfigWriter}
90 @param cfg: The cluster configuration
91 @type expected_uuid: string
92 @param expected_uuid: expected UUID for the node (or None if there is no
93 expectation). If it does not match, a L{errors.OpPrereqError} is
94 raised.
95 @type name: string
96 @param name: the short node name
97
98 """
99 (uuid, full_name) = _ExpandItemName(cfg.ExpandNodeName, name, "Node")
100 if expected_uuid is not None and uuid != expected_uuid:
101 raise errors.OpPrereqError(
102 "The nodes UUID '%s' does not match the expected UUID '%s' for node"
103 " '%s'. Maybe the node changed since you submitted this job." %
104 (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
105 return (uuid, full_name)
106
107
109 """Returns a dict declaring all lock levels shared.
110
111 """
112 return dict.fromkeys(locking.LEVELS, 1)
113
114
116 """Checks if the instances in a node group are still correct.
117
118 @type cfg: L{config.ConfigWriter}
119 @param cfg: The cluster configuration
120 @type group_uuid: string
121 @param group_uuid: Node group UUID
122 @type owned_instance_names: set or frozenset
123 @param owned_instance_names: List of currently owned instances
124
125 """
126 wanted_instances = frozenset(cfg.GetInstanceNames(
127 cfg.GetNodeGroupInstances(group_uuid)))
128 if owned_instance_names != wanted_instances:
129 raise errors.OpPrereqError("Instances in node group '%s' changed since"
130 " locks were acquired, wanted '%s', have '%s';"
131 " retry the operation" %
132 (group_uuid,
133 utils.CommaJoin(wanted_instances),
134 utils.CommaJoin(owned_instance_names)),
135 errors.ECODE_STATE)
136
137 return wanted_instances
138
139
141 """Returns list of checked and expanded node names.
142
143 @type lu: L{LogicalUnit}
144 @param lu: the logical unit on whose behalf we execute
145 @type short_node_names: list
146 @param short_node_names: list of node names or None for all nodes
147 @rtype: tuple of lists
148 @return: tupe with (list of node UUIDs, list of node names)
149 @raise errors.ProgrammerError: if the nodes parameter is wrong type
150
151 """
152 if short_node_names:
153 node_uuids = [ExpandNodeUuidAndName(lu.cfg, None, name)[0]
154 for name in short_node_names]
155 else:
156 node_uuids = lu.cfg.GetNodeList()
157
158 return (node_uuids, [lu.cfg.GetNodeName(uuid) for uuid in node_uuids])
159
160
162 """Returns list of checked and expanded instance names.
163
164 @type lu: L{LogicalUnit}
165 @param lu: the logical unit on whose behalf we execute
166 @type short_inst_names: list
167 @param short_inst_names: list of instance names or None for all instances
168 @rtype: tuple of lists
169 @return: tuple of (instance UUIDs, instance names)
170 @raise errors.OpPrereqError: if the instances parameter is wrong type
171 @raise errors.OpPrereqError: if any of the passed instances is not found
172
173 """
174 if short_inst_names:
175 inst_uuids = [ExpandInstanceUuidAndName(lu.cfg, None, name)[0]
176 for name in short_inst_names]
177 else:
178 inst_uuids = lu.cfg.GetInstanceList()
179 return (inst_uuids, [lu.cfg.GetInstanceName(uuid) for uuid in inst_uuids])
180
181
182 -def RunPostHook(lu, node_name):
183 """Runs the post-hook for an opcode on a single node.
184
185 """
186 hm = lu.proc.BuildHooksManager(lu)
187 try:
188 hm.RunPhase(constants.HOOKS_PHASE_POST, node_names=[node_name])
189 except Exception, err:
190 lu.LogWarning("Errors occurred running hooks on %s: %s",
191 node_name, err)
192
193
195 """Distribute additional files which are part of the cluster configuration.
196
197 ConfigWriter takes care of distributing the config and ssconf files, but
198 there are more files which should be distributed to all nodes. This function
199 makes sure those are copied.
200
201 """
202
203 cluster = lu.cfg.GetClusterInfo()
204 master_info = lu.cfg.GetMasterNodeInfo()
205
206 online_node_uuids = lu.cfg.GetOnlineNodeList()
207 online_node_uuid_set = frozenset(online_node_uuids)
208 vm_node_uuids = list(online_node_uuid_set.intersection(
209 lu.cfg.GetVmCapableNodeList()))
210
211
212 for node_uuids in [online_node_uuids, vm_node_uuids]:
213 if master_info.uuid in node_uuids:
214 node_uuids.remove(master_info.uuid)
215
216
217 (files_all, _, files_mc, files_vm) = \
218 ComputeAncillaryFiles(cluster, True)
219
220
221 assert not (pathutils.CLUSTER_CONF_FILE in files_all or
222 pathutils.CLUSTER_CONF_FILE in files_vm)
223 assert not files_mc, "Master candidates not handled in this function"
224
225 filemap = [
226 (online_node_uuids, files_all),
227 (vm_node_uuids, files_vm),
228 ]
229
230
231 for (node_uuids, files) in filemap:
232 for fname in files:
233 UploadHelper(lu, node_uuids, fname)
234
235
237 """Compute files external to Ganeti which need to be consistent.
238
239 @type redist: boolean
240 @param redist: Whether to include files which need to be redistributed
241
242 """
243
244 files_all = set([
245 pathutils.SSH_KNOWN_HOSTS_FILE,
246 pathutils.CONFD_HMAC_KEY,
247 pathutils.CLUSTER_DOMAIN_SECRET_FILE,
248 pathutils.SPICE_CERT_FILE,
249 pathutils.SPICE_CACERT_FILE,
250 pathutils.RAPI_USERS_FILE,
251 ])
252
253 if redist:
254
255 files_all.add(pathutils.RAPI_CERT_FILE)
256 else:
257 files_all.update(pathutils.ALL_CERT_FILES)
258 files_all.update(ssconf.SimpleStore().GetFileList())
259
260 if cluster.modify_etc_hosts:
261 files_all.add(pathutils.ETC_HOSTS)
262
263 if cluster.use_external_mip_script:
264 files_all.add(pathutils.EXTERNAL_MASTER_SETUP_SCRIPT)
265
266
267
268
269 files_opt = set([
270 pathutils.RAPI_USERS_FILE,
271 ])
272
273
274 files_mc = set()
275
276 if not redist:
277 files_mc.add(pathutils.CLUSTER_CONF_FILE)
278
279
280 if (not redist and (cluster.IsFileStorageEnabled() or
281 cluster.IsSharedFileStorageEnabled())):
282 files_all.add(pathutils.FILE_STORAGE_PATHS_FILE)
283 files_opt.add(pathutils.FILE_STORAGE_PATHS_FILE)
284
285
286 files_vm = set(
287 filename
288 for hv_name in cluster.enabled_hypervisors
289 for filename in
290 hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[0])
291
292 files_opt |= set(
293 filename
294 for hv_name in cluster.enabled_hypervisors
295 for filename in
296 hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[1])
297
298
299 all_files_set = files_all | files_mc | files_vm
300 assert (len(all_files_set) ==
301 sum(map(len, [files_all, files_mc, files_vm]))), \
302 "Found file listed in more than one file list"
303
304
305 assert all_files_set.issuperset(files_opt), \
306 "Optional file not in a different required list"
307
308
309 assert not (redist and
310 pathutils.FILE_STORAGE_PATHS_FILE in all_files_set)
311
312 return (files_all, files_opt, files_mc, files_vm)
313
314
316 """Helper for uploading a file and showing warnings.
317
318 """
319 if os.path.exists(fname):
320 result = lu.rpc.call_upload_file(node_uuids, fname)
321 for to_node_uuids, to_result in result.items():
322 msg = to_result.fail_msg
323 if msg:
324 msg = ("Copy of file %s to node %s failed: %s" %
325 (fname, lu.cfg.GetNodeName(to_node_uuids), msg))
326 lu.LogWarning(msg)
327
328
330 """Combines the hv state from an opcode with the one of the object
331
332 @param op_input: The input dict from the opcode
333 @param obj_input: The input dict from the objects
334 @return: The verified and updated dict
335
336 """
337 if op_input:
338 invalid_hvs = set(op_input) - constants.HYPER_TYPES
339 if invalid_hvs:
340 raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:"
341 " %s" % utils.CommaJoin(invalid_hvs),
342 errors.ECODE_INVAL)
343 if obj_input is None:
344 obj_input = {}
345 type_check = constants.HVSTS_PARAMETER_TYPES
346 return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
347
348 return None
349
350
352 """Combines the disk state from an opcode with the one of the object
353
354 @param op_input: The input dict from the opcode
355 @param obj_input: The input dict from the objects
356 @return: The verified and updated dict
357 """
358 if op_input:
359 invalid_dst = set(op_input) - constants.DS_VALID_TYPES
360 if invalid_dst:
361 raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" %
362 utils.CommaJoin(invalid_dst),
363 errors.ECODE_INVAL)
364 type_check = constants.DSS_PARAMETER_TYPES
365 if obj_input is None:
366 obj_input = {}
367 return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value,
368 type_check))
369 for key, value in op_input.items())
370
371 return None
372
373
374 -def CheckOSParams(lu, required, node_uuids, osname, osparams, force_variant):
375 """OS parameters validation.
376
377 @type lu: L{LogicalUnit}
378 @param lu: the logical unit for which we check
379 @type required: boolean
380 @param required: whether the validation should fail if the OS is not
381 found
382 @type node_uuids: list
383 @param node_uuids: the list of nodes on which we should check
384 @type osname: string
385 @param osname: the name of the OS we should use
386 @type osparams: dict
387 @param osparams: the parameters which we need to check
388 @raise errors.OpPrereqError: if the parameters are not valid
389
390 """
391 node_uuids = _FilterVmNodes(lu, node_uuids)
392
393
394 for key in osparams:
395 if isinstance(osparams[key], Private):
396 osparams[key] = osparams[key].Get()
397
398 if osname:
399 result = lu.rpc.call_os_validate(node_uuids, required, osname,
400 [constants.OS_VALIDATE_PARAMETERS],
401 osparams, force_variant)
402 for node_uuid, nres in result.items():
403
404
405 nres.Raise("OS Parameters validation failed on node %s" %
406 lu.cfg.GetNodeName(node_uuid))
407 if not nres.payload:
408 lu.LogInfo("OS %s not found on node %s, validation skipped",
409 osname, lu.cfg.GetNodeName(node_uuid))
410
411
413 """Checks if a given image description is either a valid file path or a URL.
414
415 @type image: string
416 @param image: An absolute path or URL, the assumed location of a disk image.
417 @type error_message: string
418 @param error_message: The error message to show if the image is not valid.
419
420 @raise errors.OpPrereqError: If the validation fails.
421
422 """
423 if image is not None and not (utils.IsUrl(image) or os.path.isabs(image)):
424 raise errors.OpPrereqError(error_message)
425
426
428 """Checks if the OS image in the OS parameters of an opcode is
429 valid.
430
431 This function can also be used in LUs as they carry an opcode.
432
433 @type op: L{opcodes.OpCode}
434 @param op: opcode containing the OS params
435
436 @rtype: string or NoneType
437 @return:
438 None if the OS parameters in the opcode do not contain the OS
439 image, otherwise the OS image value contained in the OS parameters
440 @raise errors.OpPrereqError: if OS image is not a URL or an absolute path
441
442 """
443 os_image = objects.GetOSImage(op.osparams)
444 CheckImageValidity(os_image, "OS image must be an absolute path or a URL")
445 return os_image
446
447
449 """Hypervisor parameter validation.
450
451 This function abstracts the hypervisor parameter validation to be
452 used in both instance create and instance modify.
453
454 @type lu: L{LogicalUnit}
455 @param lu: the logical unit for which we check
456 @type node_uuids: list
457 @param node_uuids: the list of nodes on which we should check
458 @type hvname: string
459 @param hvname: the name of the hypervisor we should use
460 @type hvparams: dict
461 @param hvparams: the parameters which we need to check
462 @raise errors.OpPrereqError: if the parameters are not valid
463
464 """
465 node_uuids = _FilterVmNodes(lu, node_uuids)
466
467 cluster = lu.cfg.GetClusterInfo()
468 hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
469
470 hvinfo = lu.rpc.call_hypervisor_validate_params(node_uuids, hvname, hvfull)
471 for node_uuid in node_uuids:
472 info = hvinfo[node_uuid]
473 if info.offline:
474 continue
475 info.Raise("Hypervisor parameter validation failed on node %s" %
476 lu.cfg.GetNodeName(node_uuid))
477
478
494
495
497 """Check node PVs.
498
499 """
500 pvlist_dict = nresult.get(constants.NV_PVLIST, None)
501 if pvlist_dict is None:
502 return (["Can't get PV list from node"], None)
503 pvlist = map(objects.LvmPvInfo.FromDict, pvlist_dict)
504 errlist = []
505
506
507
508 for pv in pvlist:
509 if ":" in pv.name:
510 errlist.append("Invalid character ':' in PV '%s' of VG '%s'" %
511 (pv.name, pv.vg_name))
512 es_pvinfo = None
513 if exclusive_storage:
514 (errmsgs, es_pvinfo) = utils.LvmExclusiveCheckNodePvs(pvlist)
515 errlist.extend(errmsgs)
516 shared_pvs = nresult.get(constants.NV_EXCLUSIVEPVS, None)
517 if shared_pvs:
518 for (pvname, lvlist) in shared_pvs:
519
520 errlist.append("PV %s is shared among unrelated LVs (%s)" %
521 (pvname, utils.CommaJoin(lvlist)))
522 return (errlist, es_pvinfo)
523
524
526 """Computes if value is in the desired range.
527
528 @param name: name of the parameter for which we perform the check
529 @param qualifier: a qualifier used in the error message (e.g. 'disk/1',
530 not just 'disk')
531 @param ispecs: dictionary containing min and max values
532 @param value: actual value that we want to use
533 @return: None or an error string
534
535 """
536 if value in [None, constants.VALUE_AUTO]:
537 return None
538 max_v = ispecs[constants.ISPECS_MAX].get(name, value)
539 min_v = ispecs[constants.ISPECS_MIN].get(name, value)
540 if value > max_v or min_v > value:
541 if qualifier:
542 fqn = "%s/%s" % (name, qualifier)
543 else:
544 fqn = name
545 return ("%s value %s is not in range [%s, %s]" %
546 (fqn, value, min_v, max_v))
547 return None
548
549
554 """Verifies ipolicy against provided specs.
555
556 @type ipolicy: dict
557 @param ipolicy: The ipolicy
558 @type mem_size: int
559 @param mem_size: The memory size
560 @type cpu_count: int
561 @param cpu_count: Used cpu cores
562 @type disk_count: int
563 @param disk_count: Number of disks used
564 @type nic_count: int
565 @param nic_count: Number of nics used
566 @type disk_sizes: list of ints
567 @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
568 @type spindle_use: int
569 @param spindle_use: The number of spindles this instance uses
570 @type disk_template: string
571 @param disk_template: The disk template of the instance
572 @param _compute_fn: The compute function (unittest only)
573 @return: A list of violations, or an empty list of no violations are found
574
575 """
576 assert disk_count == len(disk_sizes)
577
578 test_settings = [
579 (constants.ISPEC_MEM_SIZE, "", mem_size),
580 (constants.ISPEC_CPU_COUNT, "", cpu_count),
581 (constants.ISPEC_NIC_COUNT, "", nic_count),
582 (constants.ISPEC_SPINDLE_USE, "", spindle_use),
583 ] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
584 for idx, d in enumerate(disk_sizes)]
585 if disk_template != constants.DT_DISKLESS:
586
587 test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
588 ret = []
589 allowed_dts = ipolicy[constants.IPOLICY_DTS]
590 if disk_template not in allowed_dts:
591 ret.append("Disk template %s is not allowed (allowed templates: %s)" %
592 (disk_template, utils.CommaJoin(allowed_dts)))
593
594 min_errs = None
595 for minmax in ipolicy[constants.ISPECS_MINMAX]:
596 errs = filter(None,
597 (_compute_fn(name, qualifier, minmax, value)
598 for (name, qualifier, value) in test_settings))
599 if min_errs is None or len(errs) < len(min_errs):
600 min_errs = errs
601 assert min_errs is not None
602 return ret + min_errs
603
604
608 """Verifies ipolicy against provided disk sizes.
609
610 No other specs except the disk sizes, the number of disks and the disk
611 template are checked.
612
613 @type ipolicy: dict
614 @param ipolicy: The ipolicy
615 @type disk_sizes: list of ints
616 @param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
617 @type disk_template: string
618 @param disk_template: The disk template of the instance
619 @param _compute_fn: The compute function (unittest only)
620 @return: A list of violations, or an empty list of no violations are found
621
622 """
623 return ComputeIPolicySpecViolation(ipolicy,
624
625 None, None, len(disk_sizes),
626 None, disk_sizes,
627 None,
628 disk_template,
629 _compute_fn=_compute_fn)
630
631
634 """Compute if instance meets the specs of ipolicy.
635
636 @type ipolicy: dict
637 @param ipolicy: The ipolicy to verify against
638 @type instance: L{objects.Instance}
639 @param instance: The instance to verify
640 @type cfg: L{config.ConfigWriter}
641 @param cfg: Cluster configuration
642 @param _compute_fn: The function to verify ipolicy (unittest only)
643 @see: L{ComputeIPolicySpecViolation}
644
645 """
646 ret = []
647 be_full = cfg.GetClusterInfo().FillBE(instance)
648 mem_size = be_full[constants.BE_MAXMEM]
649 cpu_count = be_full[constants.BE_VCPUS]
650 inst_nodes = cfg.GetInstanceNodes(instance.uuid)
651 es_flags = rpc.GetExclusiveStorageForNodes(cfg, inst_nodes)
652 disks = cfg.GetInstanceDisks(instance.uuid)
653 if any(es_flags.values()):
654
655 try:
656 spindle_use = sum([disk.spindles for disk in disks])
657 except TypeError:
658 ret.append("Number of spindles not configured for disks of instance %s"
659 " while exclusive storage is enabled, try running gnt-cluster"
660 " repair-disk-sizes" % instance.name)
661
662 spindle_use = None
663 else:
664 spindle_use = be_full[constants.BE_SPINDLE_USE]
665 disk_count = len(disks)
666 disk_sizes = [disk.size for disk in disks]
667 nic_count = len(instance.nics)
668 disk_template = instance.disk_template
669
670 return ret + _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
671 disk_sizes, spindle_use, disk_template)
672
673
675 """Computes a set of instances who violates given ipolicy.
676
677 @param ipolicy: The ipolicy to verify
678 @type instances: L{objects.Instance}
679 @param instances: List of instances to verify
680 @type cfg: L{config.ConfigWriter}
681 @param cfg: Cluster configuration
682 @return: A frozenset of instance names violating the ipolicy
683
684 """
685 return frozenset([inst.name for inst in instances
686 if ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
687
688
690 """Computes a set of any instances that would violate the new ipolicy.
691
692 @param old_ipolicy: The current (still in-place) ipolicy
693 @param new_ipolicy: The new (to become) ipolicy
694 @param instances: List of instances to verify
695 @type cfg: L{config.ConfigWriter}
696 @param cfg: Cluster configuration
697 @return: A list of instances which violates the new ipolicy but
698 did not before
699
700 """
701 return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) -
702 _ComputeViolatingInstances(old_ipolicy, instances, cfg))
703
704
705 -def GetUpdatedParams(old_params, update_dict,
706 use_default=True, use_none=False):
707 """Return the new version of a parameter dictionary.
708
709 @type old_params: dict
710 @param old_params: old parameters
711 @type update_dict: dict
712 @param update_dict: dict containing new parameter values, or
713 constants.VALUE_DEFAULT to reset the parameter to its default
714 value
715 @param use_default: boolean
716 @type use_default: whether to recognise L{constants.VALUE_DEFAULT}
717 values as 'to be deleted' values
718 @param use_none: boolean
719 @type use_none: whether to recognise C{None} values as 'to be
720 deleted' values
721 @rtype: dict
722 @return: the new parameter dictionary
723
724 """
725 params_copy = copy.deepcopy(old_params)
726 for key, val in update_dict.iteritems():
727 if ((use_default and val == constants.VALUE_DEFAULT) or
728 (use_none and val is None)):
729 try:
730 del params_copy[key]
731 except KeyError:
732 pass
733 else:
734 params_copy[key] = val
735 return params_copy
736
737
739 """Return the new version of an instance policy.
740
741 @param group_policy: whether this policy applies to a group and thus
742 we should support removal of policy entries
743
744 """
745 ipolicy = copy.deepcopy(old_ipolicy)
746 for key, value in new_ipolicy.items():
747 if key not in constants.IPOLICY_ALL_KEYS:
748 raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
749 errors.ECODE_INVAL)
750 if (not value or value == [constants.VALUE_DEFAULT] or
751 value == constants.VALUE_DEFAULT):
752 if group_policy:
753 if key in ipolicy:
754 del ipolicy[key]
755 else:
756 raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
757 " on the cluster'" % key,
758 errors.ECODE_INVAL)
759 else:
760 if key in constants.IPOLICY_PARAMETERS:
761
762 try:
763 ipolicy[key] = float(value)
764 except (TypeError, ValueError), err:
765 raise errors.OpPrereqError("Invalid value for attribute"
766 " '%s': '%s', error: %s" %
767 (key, value, err), errors.ECODE_INVAL)
768 elif key == constants.ISPECS_MINMAX:
769 for minmax in value:
770 for k in minmax.keys():
771 utils.ForceDictType(minmax[k], constants.ISPECS_PARAMETER_TYPES)
772 ipolicy[key] = value
773 elif key == constants.ISPECS_STD:
774 if group_policy:
775 msg = "%s cannot appear in group instance specs" % key
776 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
777 ipolicy[key] = GetUpdatedParams(old_ipolicy.get(key, {}), value,
778 use_none=False, use_default=False)
779 utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
780 else:
781
782
783 ipolicy[key] = list(value)
784 try:
785 objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
786 except errors.ConfigurationError, err:
787 raise errors.OpPrereqError("Invalid instance policy: %s" % err,
788 errors.ECODE_INVAL)
789 return ipolicy
790
791
793 """Little helper wrapper to the rpc annotation method.
794
795 @param instance: The instance object
796 @type devs: List of L{objects.Disk}
797 @param devs: The root devices (not any of its children!)
798 @param cfg: The config object
799 @returns The annotated disk copies
800 @see L{ganeti.rpc.node.AnnotateDiskParams}
801
802 """
803 return rpc.AnnotateDiskParams(devs, cfg.GetInstanceDiskParams(instance))
804
805
807 """Tells if node supports OOB.
808
809 @type cfg: L{config.ConfigWriter}
810 @param cfg: The cluster configuration
811 @type node: L{objects.Node}
812 @param node: The node
813 @return: The OOB script if supported or an empty string otherwise
814
815 """
816 return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
817
818
820 """Updates and verifies a dict with sub dicts of the same type.
821
822 @param base: The dict with the old data
823 @param updates: The dict with the new data
824 @param type_check: Dict suitable to ForceDictType to verify correct types
825 @returns: A new dict with updated and verified values
826
827 """
828 def fn(old, value):
829 new = GetUpdatedParams(old, value)
830 utils.ForceDictType(new, type_check)
831 return new
832
833 ret = copy.deepcopy(base)
834 ret.update(dict((key, fn(base.get(key, {}), value))
835 for key, value in updates.items()))
836 return ret
837
838
840 """Filters out non-vm_capable nodes from a list.
841
842 @type lu: L{LogicalUnit}
843 @param lu: the logical unit for which we check
844 @type node_uuids: list
845 @param node_uuids: the list of nodes on which we should check
846 @rtype: list
847 @return: the list of vm-capable nodes
848
849 """
850 vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
851 return [uuid for uuid in node_uuids if uuid not in vm_nodes]
852
853
855 """Decides on which iallocator to use.
856
857 @type cfg: L{config.ConfigWriter}
858 @param cfg: Cluster configuration object
859 @type ialloc: string or None
860 @param ialloc: Iallocator specified in opcode
861 @rtype: string
862 @return: Iallocator name
863
864 """
865 if not ialloc:
866
867 ialloc = cfg.GetDefaultIAllocator()
868
869 if not ialloc:
870 raise errors.OpPrereqError("No iallocator was specified, neither in the"
871 " opcode nor as a cluster-wide default",
872 errors.ECODE_INVAL)
873
874 return ialloc
875
876
879 """Checks if node groups for locked instances are still correct.
880
881 @type cfg: L{config.ConfigWriter}
882 @param cfg: Cluster configuration
883 @type instances: dict; string as key, L{objects.Instance} as value
884 @param instances: Dictionary, instance UUID as key, instance object as value
885 @type owned_groups: iterable of string
886 @param owned_groups: List of owned groups
887 @type owned_node_uuids: iterable of string
888 @param owned_node_uuids: List of owned nodes
889 @type cur_group_uuid: string or None
890 @param cur_group_uuid: Optional group UUID to check against instance's groups
891
892 """
893 for (uuid, inst) in instances.items():
894 inst_nodes = cfg.GetInstanceNodes(inst.uuid)
895 assert owned_node_uuids.issuperset(inst_nodes), \
896 "Instance %s's nodes changed while we kept the lock" % inst.name
897
898 inst_groups = CheckInstanceNodeGroups(cfg, uuid, owned_groups)
899
900 assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
901 "Instance %s has no node in group %s" % (inst.name, cur_group_uuid)
902
903
905 """Checks if the owned node groups are still correct for an instance.
906
907 @type cfg: L{config.ConfigWriter}
908 @param cfg: The cluster configuration
909 @type inst_uuid: string
910 @param inst_uuid: Instance UUID
911 @type owned_groups: set or frozenset
912 @param owned_groups: List of currently owned node groups
913 @type primary_only: boolean
914 @param primary_only: Whether to check node groups for only the primary node
915
916 """
917 inst_groups = cfg.GetInstanceNodeGroups(inst_uuid, primary_only)
918
919 if not owned_groups.issuperset(inst_groups):
920 raise errors.OpPrereqError("Instance %s's node groups changed since"
921 " locks were acquired, current groups are"
922 " are '%s', owning groups '%s'; retry the"
923 " operation" %
924 (cfg.GetInstanceName(inst_uuid),
925 utils.CommaJoin(inst_groups),
926 utils.CommaJoin(owned_groups)),
927 errors.ECODE_STATE)
928
929 return inst_groups
930
931
933 """Unpacks the result of change-group and node-evacuate iallocator requests.
934
935 Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
936 L{constants.IALLOCATOR_MODE_CHG_GROUP}.
937
938 @type lu: L{LogicalUnit}
939 @param lu: Logical unit instance
940 @type alloc_result: tuple/list
941 @param alloc_result: Result from iallocator
942 @type early_release: bool
943 @param early_release: Whether to release locks early if possible
944 @type use_nodes: bool
945 @param use_nodes: Whether to display node names instead of groups
946
947 """
948 (moved, failed, jobs) = alloc_result
949
950 if failed:
951 failreason = utils.CommaJoin("%s (%s)" % (name, reason)
952 for (name, reason) in failed)
953 lu.LogWarning("Unable to evacuate instances %s", failreason)
954 raise errors.OpExecError("Unable to evacuate instances %s" % failreason)
955
956 if moved:
957 lu.LogInfo("Instances to be moved: %s",
958 utils.CommaJoin(
959 "%s (to %s)" %
960 (name, _NodeEvacDest(use_nodes, group, node_names))
961 for (name, group, node_names) in moved))
962
963 return [map(compat.partial(_SetOpEarlyRelease, early_release),
964 map(opcodes.OpCode.LoadOpCode, ops))
965 for ops in jobs]
966
967
969 """Returns group or nodes depending on caller's choice.
970
971 """
972 if use_nodes:
973 return utils.CommaJoin(node_names)
974 else:
975 return group
976
977
979 """Sets C{early_release} flag on opcodes if available.
980
981 """
982 try:
983 op.early_release = early_release
984 except AttributeError:
985 assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
986
987 return op
988
989
991 """Creates a map from (node, volume) to instance name.
992
993 @type cfg: L{config.ConfigWriter}
994 @param cfg: The cluster configuration
995 @type instances: list of L{objects.Instance}
996 @rtype: dict; tuple of (node uuid, volume name) as key, L{objects.Instance}
997 object as value
998
999 """
1000 return dict(
1001 ((node_uuid, vol), inst)
1002 for inst in instances
1003 for (node_uuid, vols) in cfg.GetInstanceLVsByNode(inst.uuid).items()
1004 for vol in vols)
1005
1006
1008 """Make sure that none of the given paramters is global.
1009
1010 If a global parameter is found, an L{errors.OpPrereqError} exception is
1011 raised. This is used to avoid setting global parameters for individual nodes.
1012
1013 @type params: dictionary
1014 @param params: Parameters to check
1015 @type glob_pars: dictionary
1016 @param glob_pars: Forbidden parameters
1017 @type kind: string
1018 @param kind: Kind of parameters (e.g. "node")
1019 @type bad_levels: string
1020 @param bad_levels: Level(s) at which the parameters are forbidden (e.g.
1021 "instance")
1022 @type good_levels: strings
1023 @param good_levels: Level(s) at which the parameters are allowed (e.g.
1024 "cluster or group")
1025
1026 """
1027 used_globals = glob_pars.intersection(params)
1028 if used_globals:
1029 msg = ("The following %s parameters are global and cannot"
1030 " be customized at %s level, please modify them at"
1031 " %s level: %s" %
1032 (kind, bad_levels, good_levels, utils.CommaJoin(used_globals)))
1033 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1034
1035
1037 """Whether exclusive_storage is in effect for the given node.
1038
1039 @type cfg: L{config.ConfigWriter}
1040 @param cfg: The cluster configuration
1041 @type node: L{objects.Node}
1042 @param node: The node
1043 @rtype: bool
1044 @return: The effective value of exclusive_storage
1045
1046 """
1047 return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE]
1048
1049
1051 """Given an instance object, checks if the instance is running.
1052
1053 This function asks the backend whether the instance is running and
1054 user shutdown instances are considered not to be running.
1055
1056 @type lu: L{LogicalUnit}
1057 @param lu: LU on behalf of which we make the check
1058
1059 @type instance: L{objects.Instance}
1060 @param instance: instance to check whether it is running
1061
1062 @rtype: bool
1063 @return: 'True' if the instance is running, 'False' otherwise
1064
1065 """
1066 hvparams = lu.cfg.GetClusterInfo().FillHV(instance)
1067 result = lu.rpc.call_instance_info(instance.primary_node, instance.name,
1068 instance.hypervisor, hvparams)
1069
1070
1071 result.Raise("Can't retrieve instance information for instance '%s'" %
1072 instance.name, prereq=prereq, ecode=errors.ECODE_ENVIRON)
1073
1074 return result.payload and \
1075 "state" in result.payload and \
1076 (result.payload["state"] != hypervisor.hv_base.HvInstanceState.SHUTDOWN)
1077
1078
1080 """Ensure that an instance is in one of the required states.
1081
1082 @param lu: the LU on behalf of which we make the check
1083 @param instance: the instance to check
1084 @param msg: if passed, should be a message to replace the default one
1085 @raise errors.OpPrereqError: if the instance is not in the required state
1086
1087 """
1088 if msg is None:
1089 msg = ("can't use instance from outside %s states" %
1090 utils.CommaJoin(req_states))
1091 if instance.admin_state not in req_states:
1092 raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
1093 (instance.name, instance.admin_state, msg),
1094 errors.ECODE_STATE)
1095
1096 if constants.ADMINST_UP not in req_states:
1097 pnode_uuid = instance.primary_node
1098
1099 if not lu.cfg.GetNodeInfo(pnode_uuid).offline:
1100 if IsInstanceRunning(lu, instance):
1101 raise errors.OpPrereqError("Instance %s is running, %s" %
1102 (instance.name, msg), errors.ECODE_STATE)
1103 else:
1104 lu.LogWarning("Primary node offline, ignoring check that instance"
1105 " is down")
1106
1107
1109 """Check the sanity of iallocator and node arguments and use the
1110 cluster-wide iallocator if appropriate.
1111
1112 Check that at most one of (iallocator, node) is specified. If none is
1113 specified, or the iallocator is L{constants.DEFAULT_IALLOCATOR_SHORTCUT},
1114 then the LU's opcode's iallocator slot is filled with the cluster-wide
1115 default iallocator.
1116
1117 @type iallocator_slot: string
1118 @param iallocator_slot: the name of the opcode iallocator slot
1119 @type node_slot: string
1120 @param node_slot: the name of the opcode target node slot
1121
1122 """
1123 node = getattr(lu.op, node_slot, None)
1124 ialloc = getattr(lu.op, iallocator_slot, None)
1125 if node == []:
1126 node = None
1127
1128 if node is not None and ialloc is not None:
1129 raise errors.OpPrereqError("Do not specify both, iallocator and node",
1130 errors.ECODE_INVAL)
1131 elif ((node is None and ialloc is None) or
1132 ialloc == constants.DEFAULT_IALLOCATOR_SHORTCUT):
1133 default_iallocator = lu.cfg.GetDefaultIAllocator()
1134 if default_iallocator:
1135 setattr(lu.op, iallocator_slot, default_iallocator)
1136 else:
1137 raise errors.OpPrereqError("No iallocator or node given and no"
1138 " cluster-wide default iallocator found;"
1139 " please specify either an iallocator or a"
1140 " node, or set a cluster-wide default"
1141 " iallocator", errors.ECODE_INVAL)
1142
1143
1159
1160
1162 """Ensure that a given node is online.
1163
1164 @param lu: the LU on behalf of which we make the check
1165 @param node_uuid: the node to check
1166 @param msg: if passed, should be a message to replace the default one
1167 @raise errors.OpPrereqError: if the node is offline
1168
1169 """
1170 if msg is None:
1171 msg = "Can't use offline node"
1172 if lu.cfg.GetNodeInfo(node_uuid).offline:
1173 raise errors.OpPrereqError("%s: %s" % (msg, lu.cfg.GetNodeName(node_uuid)),
1174 errors.ECODE_STATE)
1175
1176
1178 """Helper function to check if a disk template is enabled.
1179
1180 @type cluster: C{objects.Cluster}
1181 @param cluster: the cluster's configuration
1182 @type disk_template: str
1183 @param disk_template: the disk template to be checked
1184
1185 """
1186 assert disk_template is not None
1187 if disk_template not in constants.DISK_TEMPLATES:
1188 raise errors.OpPrereqError("'%s' is not a valid disk template."
1189 " Valid disk templates are: %s" %
1190 (disk_template,
1191 ",".join(constants.DISK_TEMPLATES)))
1192 if not disk_template in cluster.enabled_disk_templates:
1193 raise errors.OpPrereqError("Disk template '%s' is not enabled in cluster."
1194 " Enabled disk templates are: %s" %
1195 (disk_template,
1196 ",".join(cluster.enabled_disk_templates)))
1197
1198
1200 """Helper function to check if a storage type is enabled.
1201
1202 @type cluster: C{objects.Cluster}
1203 @param cluster: the cluster's configuration
1204 @type storage_type: str
1205 @param storage_type: the storage type to be checked
1206
1207 """
1208 assert storage_type is not None
1209 assert storage_type in constants.STORAGE_TYPES
1210
1211
1212 if storage_type == constants.ST_LVM_PV:
1213 CheckStorageTypeEnabled(cluster, constants.ST_LVM_VG)
1214 else:
1215 possible_disk_templates = \
1216 utils.storage.GetDiskTemplatesOfStorageTypes(storage_type)
1217 for disk_template in possible_disk_templates:
1218 if disk_template in cluster.enabled_disk_templates:
1219 return
1220 raise errors.OpPrereqError("No disk template of storage type '%s' is"
1221 " enabled in this cluster. Enabled disk"
1222 " templates are: %s" % (storage_type,
1223 ",".join(cluster.enabled_disk_templates)))
1224
1225
1227 """Checks ipolicy disk templates against enabled disk tempaltes.
1228
1229 @type ipolicy: dict
1230 @param ipolicy: the new ipolicy
1231 @type enabled_disk_templates: list of string
1232 @param enabled_disk_templates: list of enabled disk templates on the
1233 cluster
1234 @raises errors.OpPrereqError: if there is at least one allowed disk
1235 template that is not also enabled.
1236
1237 """
1238 assert constants.IPOLICY_DTS in ipolicy
1239 allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
1240 not_enabled = set(allowed_disk_templates) - set(enabled_disk_templates)
1241 if not_enabled:
1242 raise errors.OpPrereqError("The following disk template are allowed"
1243 " by the ipolicy, but not enabled on the"
1244 " cluster: %s" % utils.CommaJoin(not_enabled))
1245
1246
1264
1265
1267 """Checks if the access param is consistent with the cluster configuration.
1268
1269 @note: requires a configuration lock to run.
1270 @param parameters: the parameters to validate
1271 @param cfg: the cfg object of the cluster
1272 @param group: if set, only check for consistency within this group.
1273 @raise errors.OpPrereqError: if the LU attempts to change the access parameter
1274 to an invalid value, such as "pink bunny".
1275 @raise errors.OpPrereqError: if the LU attempts to change the access parameter
1276 to an inconsistent value, such as asking for RBD
1277 userspace access to the chroot hypervisor.
1278
1279 """
1280 CheckDiskAccessModeValidity(parameters)
1281
1282 for disk_template in parameters:
1283 access = parameters[disk_template].get(constants.LDP_ACCESS,
1284 constants.DISK_KERNELSPACE)
1285
1286 if disk_template not in constants.DTS_HAVE_ACCESS:
1287 continue
1288
1289
1290
1291 inst_uuids = cfg.GetNodeGroupInstances(group) if group else \
1292 cfg.GetInstanceList()
1293
1294 for entry in inst_uuids:
1295 inst = cfg.GetInstanceInfo(entry)
1296 inst_template = inst.disk_template
1297
1298 if inst_template != disk_template:
1299 continue
1300
1301 hv = inst.hypervisor
1302
1303 if not IsValidDiskAccessModeCombination(hv, inst_template, access):
1304 raise errors.OpPrereqError("Instance {i}: cannot use '{a}' access"
1305 " setting with {h} hypervisor and {d} disk"
1306 " type.".format(i=inst.name,
1307 a=access,
1308 h=hv,
1309 d=inst_template))
1310
1311
1313 """Checks if an hypervisor can read a disk template with given mode.
1314
1315 @param hv: the hypervisor that will access the data
1316 @param disk_template: the disk template the data is stored as
1317 @param mode: how the hypervisor should access the data
1318 @return: True if the hypervisor can read a given read disk_template
1319 in the specified mode.
1320
1321 """
1322 if mode == constants.DISK_KERNELSPACE:
1323 return True
1324
1325 if (hv == constants.HT_KVM and
1326 disk_template in (constants.DT_RBD, constants.DT_GLUSTER) and
1327 mode == constants.DISK_USERSPACE):
1328 return True
1329
1330
1331 return False
1332
1333
1355
1356
1358 """Removes the node's certificate from the candidate certificates list.
1359
1360 @type cfg: C{config.ConfigWriter}
1361 @param cfg: the cluster's configuration
1362 @type node_uuid: string
1363 @param node_uuid: the node's UUID
1364
1365 """
1366 cfg.RemoveNodeFromCandidateCerts(node_uuid)
1367
1368
1393
1394
1418
1419
1421 """Create an OpCode that connects a group to the instance
1422 communication network.
1423
1424 This OpCode contains the configuration necessary for the instance
1425 communication network.
1426
1427 @type group_uuid: string
1428 @param group_uuid: UUID of the group to connect
1429
1430 @type network: string
1431 @param network: name or UUID of the network to connect to, i.e., the
1432 instance communication network
1433
1434 @rtype: L{ganeti.opcodes.OpCode}
1435 @return: OpCode that connects the group to the instance
1436 communication network
1437
1438 """
1439 return opcodes.OpNetworkConnect(
1440 group_name=group_uuid,
1441 network_name=network,
1442 network_mode=constants.INSTANCE_COMMUNICATION_NETWORK_MODE,
1443 network_link=constants.INSTANCE_COMMUNICATION_NETWORK_LINK,
1444 conflicts_check=True)
1445
1446
1448 """Determines the size of the specified image.
1449
1450 @type image: string
1451 @param image: absolute filepath or URL of the image
1452
1453 @type node_uuid: string
1454 @param node_uuid: if L{image} is a filepath, this is the UUID of the
1455 node where the image is located
1456
1457 @rtype: int
1458 @return: size of the image in MB, rounded up
1459 @raise OpExecError: if the image does not exist
1460
1461 """
1462
1463 class _HeadRequest(urllib2.Request):
1464 def get_method(self):
1465 return "HEAD"
1466
1467 if utils.IsUrl(image):
1468 try:
1469 response = urllib2.urlopen(_HeadRequest(image))
1470 except urllib2.URLError:
1471 raise errors.OpExecError("Could not retrieve image from given url '%s'" %
1472 image)
1473
1474 content_length_str = response.info().getheader('content-length')
1475
1476 if not content_length_str:
1477 raise errors.OpExecError("Could not determine image size from given url"
1478 " '%s'" % image)
1479
1480 byte_size = int(content_length_str)
1481 else:
1482
1483 result = lu.rpc.call_get_file_info(node_uuid, image)
1484 result.Raise("Could not determine size of file '%s'" % image)
1485
1486 success, attributes = result.payload
1487 if not success:
1488 raise errors.OpExecError("Could not open file '%s'" % image)
1489 byte_size = attributes[constants.STAT_SIZE]
1490
1491
1492 return math.ceil(byte_size / 1024. / 1024.)
1493
1494
1496 """Ensure KVM daemon is running on nodes with KVM instances.
1497
1498 If user shutdown is enabled in the cluster:
1499 - The KVM daemon will be started on VM capable nodes containing
1500 KVM instances.
1501 - The KVM daemon will be stopped on non VM capable nodes.
1502
1503 If user shutdown is disabled in the cluster:
1504 - The KVM daemon will be stopped on all nodes
1505
1506 Issues a warning for each failed RPC call.
1507
1508 @type lu: L{LogicalUnit}
1509 @param lu: logical unit on whose behalf we execute
1510
1511 @type feedback_fn: callable
1512 @param feedback_fn: feedback function
1513
1514 @type nodes: list of string
1515 @param nodes: if supplied, it overrides the node uuids to start/stop;
1516 this is used mainly for optimization
1517
1518 """
1519 cluster = lu.cfg.GetClusterInfo()
1520
1521
1522 if nodes is not None:
1523 node_uuids = set(nodes)
1524 else:
1525 node_uuids = lu.cfg.GetNodeList()
1526
1527
1528 if constants.HT_KVM in cluster.enabled_hypervisors and \
1529 cluster.enabled_user_shutdown:
1530 start_nodes = []
1531 stop_nodes = []
1532
1533 for node_uuid in node_uuids:
1534 if lu.cfg.GetNodeInfo(node_uuid).vm_capable:
1535 start_nodes.append(node_uuid)
1536 else:
1537 stop_nodes.append(node_uuid)
1538 else:
1539 start_nodes = []
1540 stop_nodes = node_uuids
1541
1542
1543 if start_nodes:
1544 results = lu.rpc.call_node_ensure_daemon(start_nodes, constants.KVMD, True)
1545 for node_uuid in start_nodes:
1546 results[node_uuid].Warn("Failed to start KVM daemon in node '%s'" %
1547 node_uuid, feedback_fn)
1548
1549
1550 if stop_nodes:
1551 results = lu.rpc.call_node_ensure_daemon(stop_nodes, constants.KVMD, False)
1552 for node_uuid in stop_nodes:
1553 results[node_uuid].Warn("Failed to stop KVM daemon in node '%s'" %
1554 node_uuid, feedback_fn)
1555