1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Logical units for querying instances."""
32
33 import itertools
34 import logging
35 import operator
36
37 from ganeti import compat
38 from ganeti import constants
39 from ganeti import locking
40 from ganeti import qlang
41 from ganeti import query
42 from ganeti.cmdlib.base import QueryBase, NoHooksLU
43 from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
44 CheckInstanceNodeGroups, CheckInstancesNodeGroups, AnnotateDiskParams
45 from ganeti.cmdlib.instance_operation import GetInstanceConsole
46 from ganeti.cmdlib.instance_utils import NICListToTuple
47
48 import ganeti.masterd.instance
52 FIELDS = query.INSTANCE_FIELDS
53
55 lu.needed_locks = {}
56 lu.share_locks = ShareAll()
57
58 if self.names:
59 (_, self.wanted) = GetWantedInstances(lu, self.names)
60 else:
61 self.wanted = locking.ALL_SET
62
63 self.do_locking = (self.use_locking and
64 query.IQ_LIVE in self.requested_data)
65 if self.do_locking:
66 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
67 lu.needed_locks[locking.LEVEL_NODEGROUP] = []
68 lu.needed_locks[locking.LEVEL_NODE] = []
69 lu.needed_locks[locking.LEVEL_NETWORK] = []
70 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
71
72 self.do_grouplocks = (self.do_locking and
73 query.IQ_NODES in self.requested_data)
74
98
99 @staticmethod
108
110 """Computes the list of instances and their attributes.
111
112 """
113 if self.do_grouplocks:
114 self._CheckGroupLocks(lu)
115
116 cluster = lu.cfg.GetClusterInfo()
117 insts_by_name = dict((inst.name, inst) for
118 inst in lu.cfg.GetAllInstancesInfo().values())
119
120 instance_names = self._GetNames(lu, insts_by_name.keys(),
121 locking.LEVEL_INSTANCE)
122
123 instance_list = [insts_by_name[node] for node in instance_names]
124 node_uuids = frozenset(itertools.chain(*(inst.all_nodes
125 for inst in instance_list)))
126 hv_list = list(set([inst.hypervisor for inst in instance_list]))
127 bad_node_uuids = []
128 offline_node_uuids = []
129 wrongnode_inst_uuids = set()
130
131
132 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
133 live_data = {}
134 node_data = lu.rpc.call_all_instances_info(node_uuids, hv_list,
135 cluster.hvparams)
136 for node_uuid in node_uuids:
137 result = node_data[node_uuid]
138 if result.offline:
139
140 assert result.fail_msg
141 offline_node_uuids.append(node_uuid)
142 if result.fail_msg:
143 bad_node_uuids.append(node_uuid)
144 elif result.payload:
145 for inst_name in result.payload:
146 if inst_name in insts_by_name:
147 instance = insts_by_name[inst_name]
148 if instance.primary_node == node_uuid:
149 live_data[insts_by_name[inst_name].uuid] = \
150 result.payload[inst_name]
151 else:
152 wrongnode_inst_uuids.add(instance.uuid)
153 else:
154
155
156 logging.warning("Orphan instance '%s' found on node %s",
157 inst_name, lu.cfg.GetNodeName(node_uuid))
158
159 else:
160 live_data = {}
161
162 if query.IQ_DISKUSAGE in self.requested_data:
163 gmi = ganeti.masterd.instance
164 disk_usage = dict((inst.uuid,
165 gmi.ComputeDiskSize(inst.disk_template,
166 [{constants.IDISK_SIZE: disk.size}
167 for disk in inst.disks]))
168 for inst in instance_list)
169 else:
170 disk_usage = None
171
172 if query.IQ_CONSOLE in self.requested_data:
173 consinfo = {}
174 for inst in instance_list:
175 if inst.uuid in live_data:
176
177 consinfo[inst.uuid] = \
178 GetInstanceConsole(cluster, inst,
179 lu.cfg.GetNodeInfo(inst.primary_node))
180 else:
181 consinfo[inst.uuid] = None
182 else:
183 consinfo = None
184
185 if query.IQ_NODES in self.requested_data:
186 nodes = dict(lu.cfg.GetMultiNodeInfo(node_uuids))
187 groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
188 for uuid in set(map(operator.attrgetter("group"),
189 nodes.values())))
190 else:
191 nodes = None
192 groups = None
193
194 if query.IQ_NETWORKS in self.requested_data:
195 net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.uuid)
196 for i in instance_list))
197 networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
198 else:
199 networks = None
200
201 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
202 disk_usage, offline_node_uuids,
203 bad_node_uuids, live_data,
204 wrongnode_inst_uuids, consinfo, nodes,
205 groups, networks)
206
209 """Logical unit for querying instances.
210
211 """
212
213 REQ_BGL = False
214
218
221
224
225 - def Exec(self, feedback_fn):
227
230 """Query runtime instance data.
231
232 """
233 REQ_BGL = False
234
236 self.needed_locks = {}
237
238
239 if not (self.op.static or self.op.use_locking):
240 self.LogWarning("Non-static data requested, locks need to be acquired")
241 self.op.use_locking = True
242
243 if self.op.instances or not self.op.use_locking:
244
245 (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
246 else:
247
248 self.wanted_names = None
249
250 if self.op.use_locking:
251 self.share_locks = ShareAll()
252
253 if self.wanted_names is None:
254 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
255 else:
256 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
257
258 self.needed_locks[locking.LEVEL_NODEGROUP] = []
259 self.needed_locks[locking.LEVEL_NODE] = []
260 self.needed_locks[locking.LEVEL_NETWORK] = []
261 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
262
264 if self.op.use_locking:
265 owned_instances = dict(self.cfg.GetMultiInstanceInfoByName(
266 self.owned_locks(locking.LEVEL_INSTANCE)))
267 if level == locking.LEVEL_NODEGROUP:
268
269
270
271 self.needed_locks[locking.LEVEL_NODEGROUP] = \
272 frozenset(group_uuid
273 for instance_uuid in owned_instances.keys()
274 for group_uuid in
275 self.cfg.GetInstanceNodeGroups(instance_uuid))
276
277 elif level == locking.LEVEL_NODE:
278 self._LockInstancesNodes()
279
280 elif level == locking.LEVEL_NETWORK:
281 self.needed_locks[locking.LEVEL_NETWORK] = \
282 frozenset(net_uuid
283 for instance_uuid in owned_instances.keys()
284 for net_uuid in
285 self.cfg.GetInstanceNetworks(instance_uuid))
286
288 """Check prerequisites.
289
290 This only checks the optional instance list against the existing names.
291
292 """
293 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
294 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
295 owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
296 owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
297
298 if self.wanted_names is None:
299 assert self.op.use_locking, "Locking was not used"
300 self.wanted_names = owned_instances
301
302 instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
303
304 if self.op.use_locking:
305 CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
306 owned_node_uuids, None)
307 else:
308 assert not (owned_instances or owned_groups or
309 owned_node_uuids or owned_networks)
310
311 self.wanted_instances = instances.values()
312
314 """Returns the status of a block device
315
316 """
317 if self.op.static or not node_uuid:
318 return None
319
320 result = self.rpc.call_blockdev_find(node_uuid, (dev, instance))
321 if result.offline:
322 return None
323
324 result.Raise("Can't compute disk status for %s" % instance.name)
325
326 status = result.payload
327 if status is None:
328 return None
329
330 return (status.dev_path, status.major, status.minor,
331 status.sync_percent, status.estimated_time,
332 status.is_degraded, status.ldisk_status)
333
342
345 """Compute block device status.
346
347 @attention: The device has to be annotated already.
348
349 """
350 drbd_info = None
351 output_logical_id = dev.logical_id
352 if dev.dev_type in constants.DTS_DRBD:
353
354 if dev.logical_id[0] == instance.primary_node:
355 snode_uuid = dev.logical_id[1]
356 snode_minor = dev.logical_id[4]
357 pnode_minor = dev.logical_id[3]
358 else:
359 snode_uuid = dev.logical_id[0]
360 snode_minor = dev.logical_id[3]
361 pnode_minor = dev.logical_id[4]
362 drbd_info = {
363 "primary_node": node_uuid2name_fn(instance.primary_node),
364 "primary_minor": pnode_minor,
365 "secondary_node": node_uuid2name_fn(snode_uuid),
366 "secondary_minor": snode_minor,
367 "port": dev.logical_id[2],
368 }
369
370 output_logical_id = dev.logical_id[:-1] + (None,)
371
372 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
373 instance, dev)
374 dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
375
376 if dev.children:
377 dev_children = map(compat.partial(self._ComputeDiskStatusInner,
378 instance, snode_uuid,
379 node_uuid2name_fn),
380 dev.children)
381 else:
382 dev_children = []
383
384 return {
385 "iv_name": dev.iv_name,
386 "dev_type": dev.dev_type,
387 "logical_id": output_logical_id,
388 "drbd_info": drbd_info,
389 "pstatus": dev_pstatus,
390 "sstatus": dev_sstatus,
391 "children": dev_children,
392 "mode": dev.mode,
393 "size": dev.size,
394 "spindles": dev.spindles,
395 "name": dev.name,
396 "uuid": dev.uuid,
397 }
398
399 - def Exec(self, feedback_fn):
400 """Gather and return data"""
401 result = {}
402
403 cluster = self.cfg.GetClusterInfo()
404
405 node_uuids = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
406 nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
407
408 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
409 for node in nodes.values()))
410
411 for instance in self.wanted_instances:
412 pnode = nodes[instance.primary_node]
413
414 if self.op.static or pnode.offline:
415 remote_state = None
416 if pnode.offline:
417 self.LogWarning("Primary node %s is marked offline, returning static"
418 " information only for instance %s" %
419 (pnode.name, instance.name))
420 else:
421 remote_info = self.rpc.call_instance_info(
422 instance.primary_node, instance.name, instance.hypervisor,
423 cluster.hvparams[instance.hypervisor])
424 remote_info.Raise("Error checking node %s" % pnode.name)
425 remote_info = remote_info.payload
426 if remote_info and "state" in remote_info:
427 remote_state = "up"
428 else:
429 if instance.admin_state == constants.ADMINST_UP:
430 remote_state = "down"
431 else:
432 remote_state = instance.admin_state
433
434 group2name_fn = lambda uuid: groups[uuid].name
435 node_uuid2name_fn = lambda uuid: nodes[uuid].name
436
437 disks = map(compat.partial(self._ComputeDiskStatus, instance,
438 node_uuid2name_fn),
439 instance.disks)
440
441 snodes_group_uuids = [nodes[snode_uuid].group
442 for snode_uuid in instance.secondary_nodes]
443
444 result[instance.name] = {
445 "name": instance.name,
446 "config_state": instance.admin_state,
447 "run_state": remote_state,
448 "pnode": pnode.name,
449 "pnode_group_uuid": pnode.group,
450 "pnode_group_name": group2name_fn(pnode.group),
451 "snodes": map(node_uuid2name_fn, instance.secondary_nodes),
452 "snodes_group_uuids": snodes_group_uuids,
453 "snodes_group_names": map(group2name_fn, snodes_group_uuids),
454 "os": instance.os,
455
456 "nics": NICListToTuple(self, instance.nics),
457 "disk_template": instance.disk_template,
458 "disks": disks,
459 "hypervisor": instance.hypervisor,
460 "network_port": instance.network_port,
461 "hv_instance": instance.hvparams,
462 "hv_actual": cluster.FillHV(instance, skip_globals=True),
463 "be_instance": instance.beparams,
464 "be_actual": cluster.FillBE(instance),
465 "os_instance": instance.osparams,
466 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
467 "serial_no": instance.serial_no,
468 "mtime": instance.mtime,
469 "ctime": instance.ctime,
470 "uuid": instance.uuid,
471 }
472
473 return result
474