1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Utility function mainly, but not only used by instance LU's."""
32
33 import logging
34 import os
35
36 from ganeti import constants
37 from ganeti import errors
38 from ganeti import locking
39 from ganeti import network
40 from ganeti import objects
41 from ganeti import pathutils
42 from ganeti import utils
43 from ganeti.cmdlib.common import AnnotateDiskParams, \
44 ComputeIPolicyInstanceViolation, CheckDiskTemplateEnabled
45
46
47 -def BuildInstanceHookEnv(name, primary_node_name, secondary_node_names, os_type,
48 status, minmem, maxmem, vcpus, nics, disk_template,
49 disks, bep, hvp, hypervisor_name, tags):
50 """Builds instance related env variables for hooks
51
52 This builds the hook environment from individual variables.
53
54 @type name: string
55 @param name: the name of the instance
56 @type primary_node_name: string
57 @param primary_node_name: the name of the instance's primary node
58 @type secondary_node_names: list
59 @param secondary_node_names: list of secondary nodes as strings
60 @type os_type: string
61 @param os_type: the name of the instance's OS
62 @type status: string
63 @param status: the desired status of the instance
64 @type minmem: string
65 @param minmem: the minimum memory size of the instance
66 @type maxmem: string
67 @param maxmem: the maximum memory size of the instance
68 @type vcpus: string
69 @param vcpus: the count of VCPUs the instance has
70 @type nics: list
71 @param nics: list of tuples (name, uuid, ip, mac, mode, link, vlan, net,
72 netinfo) representing the NICs the instance has
73 @type disk_template: string
74 @param disk_template: the disk template of the instance
75 @type disks: list
76 @param disks: list of tuples (name, uuid, size, mode)
77 @type bep: dict
78 @param bep: the backend parameters for the instance
79 @type hvp: dict
80 @param hvp: the hypervisor parameters for the instance
81 @type hypervisor_name: string
82 @param hypervisor_name: the hypervisor for the instance
83 @type tags: list
84 @param tags: list of instance tags as strings
85 @rtype: dict
86 @return: the hook environment for this instance
87
88 """
89 env = {
90 "OP_TARGET": name,
91 "INSTANCE_NAME": name,
92 "INSTANCE_PRIMARY": primary_node_name,
93 "INSTANCE_SECONDARIES": " ".join(secondary_node_names),
94 "INSTANCE_OS_TYPE": os_type,
95 "INSTANCE_STATUS": status,
96 "INSTANCE_MINMEM": minmem,
97 "INSTANCE_MAXMEM": maxmem,
98
99 "INSTANCE_MEMORY": maxmem,
100 "INSTANCE_VCPUS": vcpus,
101 "INSTANCE_DISK_TEMPLATE": disk_template,
102 "INSTANCE_HYPERVISOR": hypervisor_name,
103 }
104 if nics:
105 nic_count = len(nics)
106 for idx, (name, uuid, ip, mac, mode, link, vlan, net, netinfo) \
107 in enumerate(nics):
108 if ip is None:
109 ip = ""
110 if name:
111 env["INSTANCE_NIC%d_NAME" % idx] = name
112 env["INSTANCE_NIC%d_UUID" % idx] = uuid
113 env["INSTANCE_NIC%d_IP" % idx] = ip
114 env["INSTANCE_NIC%d_MAC" % idx] = mac
115 env["INSTANCE_NIC%d_MODE" % idx] = mode
116 env["INSTANCE_NIC%d_LINK" % idx] = link
117 env["INSTANCE_NIC%d_VLAN" % idx] = vlan
118 if netinfo:
119 nobj = objects.Network.FromDict(netinfo)
120 env.update(nobj.HooksDict("INSTANCE_NIC%d_" % idx))
121 elif network:
122
123
124
125 env["INSTANCE_NIC%d_NETWORK_NAME" % idx] = net
126 if mode == constants.NIC_MODE_BRIDGED or \
127 mode == constants.NIC_MODE_OVS:
128 env["INSTANCE_NIC%d_BRIDGE" % idx] = link
129 else:
130 nic_count = 0
131
132 env["INSTANCE_NIC_COUNT"] = nic_count
133
134 if disks:
135 disk_count = len(disks)
136 for idx, (name, uuid, size, mode) in enumerate(disks):
137 if name:
138 env["INSTANCE_DISK%d_NAME" % idx] = name
139 env["INSTANCE_DISK%d_UUID" % idx] = uuid
140 env["INSTANCE_DISK%d_SIZE" % idx] = size
141 env["INSTANCE_DISK%d_MODE" % idx] = mode
142 else:
143 disk_count = 0
144
145 env["INSTANCE_DISK_COUNT"] = disk_count
146
147 if not tags:
148 tags = []
149
150 env["INSTANCE_TAGS"] = " ".join(tags)
151
152 for source, kind in [(bep, "BE"), (hvp, "HV")]:
153 for key, value in source.items():
154 env["INSTANCE_%s_%s" % (kind, key)] = value
155
156 return env
157
158
160 """Builds instance related env variables for hooks from an object.
161
162 @type lu: L{LogicalUnit}
163 @param lu: the logical unit on whose behalf we execute
164 @type instance: L{objects.Instance}
165 @param instance: the instance for which we should build the
166 environment
167 @type override: dict
168 @param override: dictionary with key/values that will override
169 our values
170 @rtype: dict
171 @return: the hook environment dictionary
172
173 """
174 cluster = lu.cfg.GetClusterInfo()
175 bep = cluster.FillBE(instance)
176 hvp = cluster.FillHV(instance)
177 args = {
178 "name": instance.name,
179 "primary_node_name": lu.cfg.GetNodeName(instance.primary_node),
180 "secondary_node_names": lu.cfg.GetNodeNames(instance.secondary_nodes),
181 "os_type": instance.os,
182 "status": instance.admin_state,
183 "maxmem": bep[constants.BE_MAXMEM],
184 "minmem": bep[constants.BE_MINMEM],
185 "vcpus": bep[constants.BE_VCPUS],
186 "nics": NICListToTuple(lu, instance.nics),
187 "disk_template": instance.disk_template,
188 "disks": [(disk.name, disk.uuid, disk.size, disk.mode)
189 for disk in instance.disks],
190 "bep": bep,
191 "hvp": hvp,
192 "hypervisor_name": instance.hypervisor,
193 "tags": instance.tags,
194 }
195 if override:
196 args.update(override)
197 return BuildInstanceHookEnv(**args)
198
199
201 """Reads the cluster domain secret.
202
203 """
204 return utils.ReadOneLineFile(pathutils.CLUSTER_DOMAIN_SECRET_FILE,
205 strict=True)
206
207
209 """Ensure that a given node is not drained.
210
211 @param lu: the LU on behalf of which we make the check
212 @param node_uuid: the node to check
213 @raise errors.OpPrereqError: if the node is drained
214
215 """
216 node = lu.cfg.GetNodeInfo(node_uuid)
217 if node.drained:
218 raise errors.OpPrereqError("Can't use drained node %s" % node.name,
219 errors.ECODE_STATE)
220
221
223 """Ensure that a given node is vm capable.
224
225 @param lu: the LU on behalf of which we make the check
226 @param node_uuid: the node to check
227 @raise errors.OpPrereqError: if the node is not vm capable
228
229 """
230 if not lu.cfg.GetNodeInfo(node_uuid).vm_capable:
231 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node_uuid,
232 errors.ECODE_STATE)
233
234
236 """Utility function to remove an instance.
237
238 """
239 logging.info("Removing block devices for instance %s", instance.name)
240
241 if not RemoveDisks(lu, instance, ignore_failures=ignore_failures):
242 if not ignore_failures:
243 raise errors.OpExecError("Can't remove instance's disks")
244 feedback_fn("Warning: can't remove instance's disks")
245
246 logging.info("Removing instance %s out of cluster config", instance.name)
247
248 lu.cfg.RemoveInstance(instance.uuid)
249
250 assert not lu.remove_locks.get(locking.LEVEL_INSTANCE), \
251 "Instance lock removal conflict"
252
253
254 lu.remove_locks[locking.LEVEL_INSTANCE] = instance.name
255
256
257 -def RemoveDisks(lu, instance, target_node_uuid=None, ignore_failures=False):
258 """Remove all disks for an instance.
259
260 This abstracts away some work from `AddInstance()` and
261 `RemoveInstance()`. Note that in case some of the devices couldn't
262 be removed, the removal will continue with the other ones.
263
264 @type lu: L{LogicalUnit}
265 @param lu: the logical unit on whose behalf we execute
266 @type instance: L{objects.Instance}
267 @param instance: the instance whose disks we should remove
268 @type target_node_uuid: string
269 @param target_node_uuid: used to override the node on which to remove the
270 disks
271 @rtype: boolean
272 @return: the success of the removal
273
274 """
275 logging.info("Removing block devices for instance %s", instance.name)
276
277 all_result = True
278 ports_to_release = set()
279 anno_disks = AnnotateDiskParams(instance, instance.disks, lu.cfg)
280 for (idx, device) in enumerate(anno_disks):
281 if target_node_uuid:
282 edata = [(target_node_uuid, device)]
283 else:
284 edata = device.ComputeNodeTree(instance.primary_node)
285 for node_uuid, disk in edata:
286 result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance))
287 if result.fail_msg:
288 lu.LogWarning("Could not remove disk %s on node %s,"
289 " continuing anyway: %s", idx,
290 lu.cfg.GetNodeName(node_uuid), result.fail_msg)
291 if not (result.offline and node_uuid != instance.primary_node):
292 all_result = False
293
294
295 if device.dev_type in constants.DTS_DRBD:
296 ports_to_release.add(device.logical_id[2])
297
298 if all_result or ignore_failures:
299 for port in ports_to_release:
300 lu.cfg.AddTcpUdpPort(port)
301
302 CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template)
303
304 if instance.disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]:
305 if len(instance.disks) > 0:
306 file_storage_dir = os.path.dirname(instance.disks[0].logical_id[1])
307 else:
308 if instance.disk_template == constants.DT_SHARED_FILE:
309 file_storage_dir = utils.PathJoin(lu.cfg.GetSharedFileStorageDir(),
310 instance.name)
311 else:
312 file_storage_dir = utils.PathJoin(lu.cfg.GetFileStorageDir(),
313 instance.name)
314 if target_node_uuid:
315 tgt = target_node_uuid
316 else:
317 tgt = instance.primary_node
318 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
319 if result.fail_msg:
320 lu.LogWarning("Could not remove directory '%s' on node %s: %s",
321 file_storage_dir, lu.cfg.GetNodeName(tgt), result.fail_msg)
322 all_result = False
323
324 return all_result
325
326
347
348
350 """Build a list of nic information tuples.
351
352 This list is suitable to be passed to _BuildInstanceHookEnv or as a return
353 value in LUInstanceQueryData.
354
355 @type lu: L{LogicalUnit}
356 @param lu: the logical unit on whose behalf we execute
357 @type nics: list of L{objects.NIC}
358 @param nics: list of nics to convert to hooks tuples
359
360 """
361 hooks_nics = []
362 for nic in nics:
363 hooks_nics.append(NICToTuple(lu, nic))
364 return hooks_nics
365
366
368 """Makes a copy of a list of lock names.
369
370 Handles L{locking.ALL_SET} correctly.
371
372 """
373 if names == locking.ALL_SET:
374 return locking.ALL_SET
375 else:
376 return names[:]
377
378
380 """Releases locks owned by an LU.
381
382 @type lu: L{LogicalUnit}
383 @param level: Lock level
384 @type names: list or None
385 @param names: Names of locks to release
386 @type keep: list or None
387 @param keep: Names of locks to retain
388
389 """
390 assert not (keep is not None and names is not None), \
391 "Only one of the 'names' and the 'keep' parameters can be given"
392
393 if names is not None:
394 should_release = names.__contains__
395 elif keep:
396 should_release = lambda name: name not in keep
397 else:
398 should_release = None
399
400 owned = lu.owned_locks(level)
401 if not owned:
402
403 pass
404
405 elif should_release:
406 retain = []
407 release = []
408
409
410 for name in owned:
411 if should_release(name):
412 release.append(name)
413 else:
414 retain.append(name)
415
416 assert len(lu.owned_locks(level)) == (len(retain) + len(release))
417
418
419 lu.glm.release(level, names=release)
420
421 assert frozenset(lu.owned_locks(level)) == frozenset(retain)
422 else:
423
424 lu.glm.release(level)
425
426 assert not lu.glm.is_owned(level), "No locks should be owned"
427
428
432 """Compute if instance meets the specs of the new target group.
433
434 @param ipolicy: The ipolicy to verify
435 @param instance: The instance object to verify
436 @param current_group: The current group of the instance
437 @param target_group: The new group of the instance
438 @type cfg: L{config.ConfigWriter}
439 @param cfg: Cluster configuration
440 @param _compute_fn: The function to verify ipolicy (unittest only)
441 @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
442
443 """
444 if current_group == target_group:
445 return []
446 else:
447 return _compute_fn(ipolicy, instance, cfg)
448
449
452 """Checks that the target node is correct in terms of instance policy.
453
454 @param ipolicy: The ipolicy to verify
455 @param instance: The instance object to verify
456 @param node: The new node to relocate
457 @type cfg: L{config.ConfigWriter}
458 @param cfg: Cluster configuration
459 @param ignore: Ignore violations of the ipolicy
460 @param _compute_fn: The function to verify ipolicy (unittest only)
461 @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
462
463 """
464 primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
465 res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg)
466
467 if res:
468 msg = ("Instance does not meet target node group's (%s) instance"
469 " policy: %s") % (node.group, utils.CommaJoin(res))
470 if ignore:
471 lu.LogWarning(msg)
472 else:
473 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
474
475
477 """Compute that text that should be added to the disk's metadata.
478
479 """
480 return "originstname+%s" % instance.name
481
482
484 """Checks if a node has enough free memory.
485
486 This function checks if a given node has the needed amount of free
487 memory. In case the node has less memory or we cannot get the
488 information from the node, this function raises an OpPrereqError
489 exception.
490
491 @type lu: C{LogicalUnit}
492 @param lu: a logical unit from which we get configuration data
493 @type node_uuid: C{str}
494 @param node_uuid: the node to check
495 @type reason: C{str}
496 @param reason: string to use in the error message
497 @type requested: C{int}
498 @param requested: the amount of memory in MiB to check for
499 @type hvname: string
500 @param hvname: the hypervisor's name
501 @type hvparams: dict of strings
502 @param hvparams: the hypervisor's parameters
503 @rtype: integer
504 @return: node current free memory
505 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
506 we cannot check the node
507
508 """
509 node_name = lu.cfg.GetNodeName(node_uuid)
510 nodeinfo = lu.rpc.call_node_info([node_uuid], None, [(hvname, hvparams)])
511 nodeinfo[node_uuid].Raise("Can't get data from node %s" % node_name,
512 prereq=True, ecode=errors.ECODE_ENVIRON)
513 (_, _, (hv_info, )) = nodeinfo[node_uuid].payload
514
515 free_mem = hv_info.get("memory_free", None)
516 if not isinstance(free_mem, int):
517 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
518 " was '%s'" % (node_name, free_mem),
519 errors.ECODE_ENVIRON)
520 if requested > free_mem:
521 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
522 " needed %s MiB, available %s MiB" %
523 (node_name, reason, requested, free_mem),
524 errors.ECODE_NORES)
525 return free_mem
526
527
529 """Check that the brigdes needed by an instance exist.
530
531 """
532 if node_uuid is None:
533 node_uuid = instance.primary_node
534 CheckNicsBridgesExist(lu, instance.nics, node_uuid)
535
536
550
551
553 """Ensure that a node supports a given OS.
554
555 @param lu: the LU on behalf of which we make the check
556 @param node_uuid: the node to check
557 @param os_name: the OS to query about
558 @param force_variant: whether to ignore variant errors
559 @raise errors.OpPrereqError: if the node is not supporting the OS
560
561 """
562 result = lu.rpc.call_os_get(node_uuid, os_name)
563 result.Raise("OS '%s' not in supported OS list for node %s" %
564 (os_name, lu.cfg.GetNodeName(node_uuid)),
565 prereq=True, ecode=errors.ECODE_INVAL)
566 if not force_variant:
567 _CheckOSVariant(result.payload, os_name)
568
569
571 """Check whether an OS name conforms to the os variants specification.
572
573 @type os_obj: L{objects.OS}
574 @param os_obj: OS object to check
575 @type name: string
576 @param name: OS name passed by the user, to check for validity
577
578 """
579 variant = objects.OS.GetVariant(name)
580 if not os_obj.supported_variants:
581 if variant:
582 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
583 " passed)" % (os_obj.name, variant),
584 errors.ECODE_INVAL)
585 return
586 if not variant:
587 raise errors.OpPrereqError("OS name must include a variant",
588 errors.ECODE_INVAL)
589
590 if variant not in os_obj.supported_variants:
591 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
592