1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Utility function mainly, but not only used by instance LU's."""
32
33 import logging
34 import os
35
36 from ganeti import constants
37 from ganeti import errors
38 from ganeti import locking
39 from ganeti import network
40 from ganeti import objects
41 from ganeti import pathutils
42 from ganeti import utils
43 from ganeti.cmdlib.common import AnnotateDiskParams, \
44 ComputeIPolicyInstanceViolation, CheckDiskTemplateEnabled
45
46
47 -def BuildInstanceHookEnv(name, primary_node_name, secondary_node_names, os_type,
48 status, minmem, maxmem, vcpus, nics, disk_template,
49 disks, bep, hvp, hypervisor_name, tags):
50 """Builds instance related env variables for hooks
51
52 This builds the hook environment from individual variables.
53
54 @type name: string
55 @param name: the name of the instance
56 @type primary_node_name: string
57 @param primary_node_name: the name of the instance's primary node
58 @type secondary_node_names: list
59 @param secondary_node_names: list of secondary nodes as strings
60 @type os_type: string
61 @param os_type: the name of the instance's OS
62 @type status: string
63 @param status: the desired status of the instance
64 @type minmem: string
65 @param minmem: the minimum memory size of the instance
66 @type maxmem: string
67 @param maxmem: the maximum memory size of the instance
68 @type vcpus: string
69 @param vcpus: the count of VCPUs the instance has
70 @type nics: list
71 @param nics: list of tuples (name, uuid, ip, mac, mode, link, vlan, net,
72 netinfo) representing the NICs the instance has
73 @type disk_template: string
74 @param disk_template: the disk template of the instance
75 @type disks: list
76 @param disks: list of tuples (name, uuid, size, mode)
77 @type bep: dict
78 @param bep: the backend parameters for the instance
79 @type hvp: dict
80 @param hvp: the hypervisor parameters for the instance
81 @type hypervisor_name: string
82 @param hypervisor_name: the hypervisor for the instance
83 @type tags: list
84 @param tags: list of instance tags as strings
85 @rtype: dict
86 @return: the hook environment for this instance
87
88 """
89 env = {
90 "OP_TARGET": name,
91 "INSTANCE_NAME": name,
92 "INSTANCE_PRIMARY": primary_node_name,
93 "INSTANCE_SECONDARIES": " ".join(secondary_node_names),
94 "INSTANCE_OS_TYPE": os_type,
95 "INSTANCE_STATUS": status,
96 "INSTANCE_MINMEM": minmem,
97 "INSTANCE_MAXMEM": maxmem,
98
99 "INSTANCE_MEMORY": maxmem,
100 "INSTANCE_VCPUS": vcpus,
101 "INSTANCE_DISK_TEMPLATE": disk_template,
102 "INSTANCE_HYPERVISOR": hypervisor_name,
103 }
104 if nics:
105 nic_count = len(nics)
106 for idx, (name, uuid, ip, mac, mode, link, vlan, net, netinfo) \
107 in enumerate(nics):
108 if ip is None:
109 ip = ""
110 if name:
111 env["INSTANCE_NIC%d_NAME" % idx] = name
112 env["INSTANCE_NIC%d_UUID" % idx] = uuid
113 env["INSTANCE_NIC%d_IP" % idx] = ip
114 env["INSTANCE_NIC%d_MAC" % idx] = mac
115 env["INSTANCE_NIC%d_MODE" % idx] = mode
116 env["INSTANCE_NIC%d_LINK" % idx] = link
117 env["INSTANCE_NIC%d_VLAN" % idx] = vlan
118 if netinfo:
119 nobj = objects.Network.FromDict(netinfo)
120 env.update(nobj.HooksDict("INSTANCE_NIC%d_" % idx))
121 elif network:
122
123
124
125 env["INSTANCE_NIC%d_NETWORK_NAME" % idx] = net
126 if mode == constants.NIC_MODE_BRIDGED or \
127 mode == constants.NIC_MODE_OVS:
128 env["INSTANCE_NIC%d_BRIDGE" % idx] = link
129 else:
130 nic_count = 0
131
132 env["INSTANCE_NIC_COUNT"] = nic_count
133
134 if disks:
135 disk_count = len(disks)
136 for idx, (name, uuid, size, mode) in enumerate(disks):
137 if name:
138 env["INSTANCE_DISK%d_NAME" % idx] = name
139 env["INSTANCE_DISK%d_UUID" % idx] = uuid
140 env["INSTANCE_DISK%d_SIZE" % idx] = size
141 env["INSTANCE_DISK%d_MODE" % idx] = mode
142 else:
143 disk_count = 0
144
145 env["INSTANCE_DISK_COUNT"] = disk_count
146
147 if not tags:
148 tags = []
149
150 env["INSTANCE_TAGS"] = " ".join(tags)
151
152 for source, kind in [(bep, "BE"), (hvp, "HV")]:
153 for key, value in source.items():
154 env["INSTANCE_%s_%s" % (kind, key)] = value
155
156 return env
157
158
161 """Builds instance related env variables for hooks from an object.
162
163 @type lu: L{LogicalUnit}
164 @param lu: the logical unit on whose behalf we execute
165 @type instance: L{objects.Instance}
166 @param instance: the instance for which we should build the
167 environment
168 @type override: dict
169 @param override: dictionary with key/values that will override
170 our values
171 @rtype: dict
172 @return: the hook environment dictionary
173
174 """
175 cluster = lu.cfg.GetClusterInfo()
176 bep = cluster.FillBE(instance)
177 hvp = cluster.FillHV(instance)
178
179
180 if secondary_nodes is None:
181 secondary_nodes = lu.cfg.GetInstanceSecondaryNodes(instance.uuid)
182
183
184 if disks is None:
185 disks = lu.cfg.GetInstanceDisks(instance.uuid)
186
187 args = {
188 "name": instance.name,
189 "primary_node_name": lu.cfg.GetNodeName(instance.primary_node),
190 "secondary_node_names": lu.cfg.GetNodeNames(secondary_nodes),
191 "os_type": instance.os,
192 "status": instance.admin_state,
193 "maxmem": bep[constants.BE_MAXMEM],
194 "minmem": bep[constants.BE_MINMEM],
195 "vcpus": bep[constants.BE_VCPUS],
196 "nics": NICListToTuple(lu, instance.nics),
197 "disk_template": instance.disk_template,
198 "disks": [(disk.name, disk.uuid, disk.size, disk.mode)
199 for disk in disks],
200 "bep": bep,
201 "hvp": hvp,
202 "hypervisor_name": instance.hypervisor,
203 "tags": instance.tags,
204 }
205 if override:
206 args.update(override)
207 return BuildInstanceHookEnv(**args)
208
209
211 """Reads the cluster domain secret.
212
213 """
214 return utils.ReadOneLineFile(pathutils.CLUSTER_DOMAIN_SECRET_FILE,
215 strict=True)
216
217
219 """Ensure that a given node is not drained.
220
221 @param lu: the LU on behalf of which we make the check
222 @param node_uuid: the node to check
223 @raise errors.OpPrereqError: if the node is drained
224
225 """
226 node = lu.cfg.GetNodeInfo(node_uuid)
227 if node.drained:
228 raise errors.OpPrereqError("Can't use drained node %s" % node.name,
229 errors.ECODE_STATE)
230
231
233 """Ensure that a given node is vm capable.
234
235 @param lu: the LU on behalf of which we make the check
236 @param node_uuid: the node to check
237 @raise errors.OpPrereqError: if the node is not vm capable
238
239 """
240 if not lu.cfg.GetNodeInfo(node_uuid).vm_capable:
241 raise errors.OpPrereqError("Can't use non-vm_capable node %s" % node_uuid,
242 errors.ECODE_STATE)
243
244
246 """Utility function to remove an instance.
247
248 """
249 logging.info("Removing block devices for instance %s", instance.name)
250
251 if not RemoveDisks(lu, instance, ignore_failures=ignore_failures):
252 if not ignore_failures:
253 raise errors.OpExecError("Can't remove instance's disks")
254 feedback_fn("Warning: can't remove instance's disks")
255
256 logging.info("Removing instance's disks")
257 for disk in instance.disks:
258 lu.cfg.RemoveInstanceDisk(instance.uuid, disk)
259
260 logging.info("Removing instance %s out of cluster config", instance.name)
261 lu.cfg.RemoveInstance(instance.uuid)
262
263
264 -def RemoveDisks(lu, instance, target_node_uuid=None, ignore_failures=False):
265 """Remove all disks for an instance.
266
267 This abstracts away some work from `AddInstance()` and
268 `RemoveInstance()`. Note that in case some of the devices couldn't
269 be removed, the removal will continue with the other ones.
270
271 @type lu: L{LogicalUnit}
272 @param lu: the logical unit on whose behalf we execute
273 @type instance: L{objects.Instance}
274 @param instance: the instance whose disks we should remove
275 @type target_node_uuid: string
276 @param target_node_uuid: used to override the node on which to remove the
277 disks
278 @rtype: boolean
279 @return: the success of the removal
280
281 """
282 logging.info("Removing block devices for instance %s", instance.name)
283
284 all_result = True
285 ports_to_release = set()
286 inst_disks = lu.cfg.GetInstanceDisks(instance.uuid)
287 anno_disks = AnnotateDiskParams(instance, inst_disks, lu.cfg)
288 for (idx, device) in enumerate(anno_disks):
289 if target_node_uuid:
290 edata = [(target_node_uuid, device)]
291 else:
292 edata = device.ComputeNodeTree(instance.primary_node)
293 for node_uuid, disk in edata:
294 result = lu.rpc.call_blockdev_remove(node_uuid, (disk, instance))
295 if result.fail_msg:
296 lu.LogWarning("Could not remove disk %s on node %s,"
297 " continuing anyway: %s", idx,
298 lu.cfg.GetNodeName(node_uuid), result.fail_msg)
299 if not (result.offline and node_uuid != instance.primary_node):
300 all_result = False
301
302
303 if device.dev_type in constants.DTS_DRBD:
304 ports_to_release.add(device.logical_id[2])
305
306 if all_result or ignore_failures:
307 for port in ports_to_release:
308 lu.cfg.AddTcpUdpPort(port)
309
310 CheckDiskTemplateEnabled(lu.cfg.GetClusterInfo(), instance.disk_template)
311
312 if instance.disk_template in [constants.DT_FILE, constants.DT_SHARED_FILE]:
313 if len(inst_disks) > 0:
314 file_storage_dir = os.path.dirname(inst_disks[0].logical_id[1])
315 else:
316 if instance.disk_template == constants.DT_SHARED_FILE:
317 file_storage_dir = utils.PathJoin(lu.cfg.GetSharedFileStorageDir(),
318 instance.name)
319 else:
320 file_storage_dir = utils.PathJoin(lu.cfg.GetFileStorageDir(),
321 instance.name)
322 if target_node_uuid:
323 tgt = target_node_uuid
324 else:
325 tgt = instance.primary_node
326 result = lu.rpc.call_file_storage_dir_remove(tgt, file_storage_dir)
327 if result.fail_msg:
328 lu.LogWarning("Could not remove directory '%s' on node %s: %s",
329 file_storage_dir, lu.cfg.GetNodeName(tgt), result.fail_msg)
330 all_result = False
331
332 return all_result
333
334
355
356
358 """Build a list of nic information tuples.
359
360 This list is suitable to be passed to _BuildInstanceHookEnv or as a return
361 value in LUInstanceQueryData.
362
363 @type lu: L{LogicalUnit}
364 @param lu: the logical unit on whose behalf we execute
365 @type nics: list of L{objects.NIC}
366 @param nics: list of nics to convert to hooks tuples
367
368 """
369 hooks_nics = []
370 for nic in nics:
371 hooks_nics.append(NICToTuple(lu, nic))
372 return hooks_nics
373
374
376 """Makes a copy of a list of lock names.
377
378 Handles L{locking.ALL_SET} correctly.
379
380 """
381 if names == locking.ALL_SET:
382 return locking.ALL_SET
383 else:
384 return names[:]
385
386
388 """Releases locks owned by an LU.
389
390 @type lu: L{LogicalUnit}
391 @param level: Lock level
392 @type names: list or None
393 @param names: Names of locks to release
394 @type keep: list or None
395 @param keep: Names of locks to retain
396
397 """
398 logging.debug("Lu %s ReleaseLocks %s names=%s, keep=%s",
399 lu.wconfdcontext, level, names, keep)
400 assert not (keep is not None and names is not None), \
401 "Only one of the 'names' and the 'keep' parameters can be given"
402
403 if names is not None:
404 should_release = names.__contains__
405 elif keep:
406 should_release = lambda name: name not in keep
407 else:
408 should_release = None
409
410 levelname = locking.LEVEL_NAMES[level]
411
412 owned = lu.owned_locks(level)
413 if not owned:
414
415 pass
416
417 elif should_release:
418 retain = []
419 release = []
420
421
422 for name in owned:
423 if should_release(name):
424 release.append(name)
425 else:
426 retain.append(name)
427
428 assert len(lu.owned_locks(level)) == (len(retain) + len(release))
429
430
431 lu.WConfdClient().TryUpdateLocks(
432 lu.release_request(level, release))
433 assert frozenset(lu.owned_locks(level)) == frozenset(retain)
434 else:
435 lu.WConfdClient().FreeLocksLevel(levelname)
436
437
441 """Compute if instance meets the specs of the new target group.
442
443 @param ipolicy: The ipolicy to verify
444 @param instance: The instance object to verify
445 @param current_group: The current group of the instance
446 @param target_group: The new group of the instance
447 @type cfg: L{config.ConfigWriter}
448 @param cfg: Cluster configuration
449 @param _compute_fn: The function to verify ipolicy (unittest only)
450 @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
451
452 """
453 if current_group == target_group:
454 return []
455 else:
456 return _compute_fn(ipolicy, instance, cfg)
457
458
461 """Checks that the target node is correct in terms of instance policy.
462
463 @param ipolicy: The ipolicy to verify
464 @param instance: The instance object to verify
465 @param node: The new node to relocate
466 @type cfg: L{config.ConfigWriter}
467 @param cfg: Cluster configuration
468 @param ignore: Ignore violations of the ipolicy
469 @param _compute_fn: The function to verify ipolicy (unittest only)
470 @see: L{ganeti.cmdlib.common.ComputeIPolicySpecViolation}
471
472 """
473 primary_node = lu.cfg.GetNodeInfo(instance.primary_node)
474 res = _compute_fn(ipolicy, instance, primary_node.group, node.group, cfg)
475
476 if res:
477 msg = ("Instance does not meet target node group's (%s) instance"
478 " policy: %s") % (node.group, utils.CommaJoin(res))
479 if ignore:
480 lu.LogWarning(msg)
481 else:
482 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
483
484
486 """Compute that text that should be added to the disk's metadata.
487
488 """
489 return "originstname+%s" % instance.name
490
491
493 """Checks if a node has enough free memory.
494
495 This function checks if a given node has the needed amount of free
496 memory. In case the node has less memory or we cannot get the
497 information from the node, this function raises an OpPrereqError
498 exception.
499
500 @type lu: C{LogicalUnit}
501 @param lu: a logical unit from which we get configuration data
502 @type node_uuid: C{str}
503 @param node_uuid: the node to check
504 @type reason: C{str}
505 @param reason: string to use in the error message
506 @type requested: C{int}
507 @param requested: the amount of memory in MiB to check for
508 @type hvname: string
509 @param hvname: the hypervisor's name
510 @type hvparams: dict of strings
511 @param hvparams: the hypervisor's parameters
512 @rtype: integer
513 @return: node current free memory
514 @raise errors.OpPrereqError: if the node doesn't have enough memory, or
515 we cannot check the node
516
517 """
518 node_name = lu.cfg.GetNodeName(node_uuid)
519 nodeinfo = lu.rpc.call_node_info([node_uuid], None, [(hvname, hvparams)])
520 nodeinfo[node_uuid].Raise("Can't get data from node %s" % node_name,
521 prereq=True, ecode=errors.ECODE_ENVIRON)
522 (_, _, (hv_info, )) = nodeinfo[node_uuid].payload
523
524 free_mem = hv_info.get("memory_free", None)
525 if not isinstance(free_mem, int):
526 raise errors.OpPrereqError("Can't compute free memory on node %s, result"
527 " was '%s'" % (node_name, free_mem),
528 errors.ECODE_ENVIRON)
529 if requested > free_mem:
530 raise errors.OpPrereqError("Not enough memory on node %s for %s:"
531 " needed %s MiB, available %s MiB" %
532 (node_name, reason, requested, free_mem),
533 errors.ECODE_NORES)
534 return free_mem
535
536
538 """Check that the brigdes needed by an instance exist.
539
540 """
541 if node_uuid is None:
542 node_uuid = instance.primary_node
543 CheckNicsBridgesExist(lu, instance.nics, node_uuid)
544
545
559
560
617
618
640
641
643 """Raises an error if an instance with the given name exists already.
644
645 @type instance_name: string
646 @param instance_name: The name of the instance.
647
648 To be used in the locking phase.
649
650 """
651 if instance_name in \
652 [inst.name for inst in lu.cfg.GetAllInstancesInfo().values()]:
653 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
654 instance_name, errors.ECODE_EXISTS)
655