1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 """Logical units for querying instances."""
23
24 import itertools
25 import logging
26 import operator
27
28 from ganeti import compat
29 from ganeti import constants
30 from ganeti import locking
31 from ganeti import qlang
32 from ganeti import query
33 from ganeti.cmdlib.base import QueryBase, NoHooksLU
34 from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
35 CheckInstanceNodeGroups, CheckInstancesNodeGroups, AnnotateDiskParams
36 from ganeti.cmdlib.instance_operation import GetInstanceConsole
37 from ganeti.cmdlib.instance_utils import NICListToTuple
38
39 import ganeti.masterd.instance
43 FIELDS = query.INSTANCE_FIELDS
44
46 lu.needed_locks = {}
47 lu.share_locks = ShareAll()
48
49 if self.names:
50 self.wanted = GetWantedInstances(lu, self.names)
51 else:
52 self.wanted = locking.ALL_SET
53
54 self.do_locking = (self.use_locking and
55 query.IQ_LIVE in self.requested_data)
56 if self.do_locking:
57 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
58 lu.needed_locks[locking.LEVEL_NODEGROUP] = []
59 lu.needed_locks[locking.LEVEL_NODE] = []
60 lu.needed_locks[locking.LEVEL_NETWORK] = []
61 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
62
63 self.do_grouplocks = (self.do_locking and
64 query.IQ_NODES in self.requested_data)
65
67 if self.do_locking:
68 if level == locking.LEVEL_NODEGROUP and self.do_grouplocks:
69 assert not lu.needed_locks[locking.LEVEL_NODEGROUP]
70
71
72
73 lu.needed_locks[locking.LEVEL_NODEGROUP] = \
74 set(group_uuid
75 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
76 for group_uuid in lu.cfg.GetInstanceNodeGroups(instance_name))
77 elif level == locking.LEVEL_NODE:
78 lu._LockInstancesNodes()
79
80 elif level == locking.LEVEL_NETWORK:
81 lu.needed_locks[locking.LEVEL_NETWORK] = \
82 frozenset(net_uuid
83 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE)
84 for net_uuid in lu.cfg.GetInstanceNetworks(instance_name))
85
86 @staticmethod
88 owned_instances = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE))
89 owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP))
90
91
92 for instance_name in owned_instances:
93 CheckInstanceNodeGroups(lu.cfg, instance_name, owned_groups)
94
96 """Computes the list of instances and their attributes.
97
98 """
99 if self.do_grouplocks:
100 self._CheckGroupLocks(lu)
101
102 cluster = lu.cfg.GetClusterInfo()
103 all_info = lu.cfg.GetAllInstancesInfo()
104
105 instance_names = self._GetNames(lu, all_info.keys(), locking.LEVEL_INSTANCE)
106
107 instance_list = [all_info[name] for name in instance_names]
108 nodes = frozenset(itertools.chain(*(inst.all_nodes
109 for inst in instance_list)))
110 hv_list = list(set([inst.hypervisor for inst in instance_list]))
111 bad_nodes = []
112 offline_nodes = []
113 wrongnode_inst = set()
114
115
116 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
117 live_data = {}
118 node_data = lu.rpc.call_all_instances_info(nodes, hv_list)
119 for name in nodes:
120 result = node_data[name]
121 if result.offline:
122
123 assert result.fail_msg
124 offline_nodes.append(name)
125 if result.fail_msg:
126 bad_nodes.append(name)
127 elif result.payload:
128 for inst in result.payload:
129 if inst in all_info:
130 if all_info[inst].primary_node == name:
131 live_data.update(result.payload)
132 else:
133 wrongnode_inst.add(inst)
134 else:
135
136
137 logging.warning("Orphan instance '%s' found on node %s",
138 inst, name)
139
140 else:
141 live_data = {}
142
143 if query.IQ_DISKUSAGE in self.requested_data:
144 gmi = ganeti.masterd.instance
145 disk_usage = dict((inst.name,
146 gmi.ComputeDiskSize(inst.disk_template,
147 [{constants.IDISK_SIZE: disk.size}
148 for disk in inst.disks]))
149 for inst in instance_list)
150 else:
151 disk_usage = None
152
153 if query.IQ_CONSOLE in self.requested_data:
154 consinfo = {}
155 for inst in instance_list:
156 if inst.name in live_data:
157
158 consinfo[inst.name] = GetInstanceConsole(cluster, inst)
159 else:
160 consinfo[inst.name] = None
161 assert set(consinfo.keys()) == set(instance_names)
162 else:
163 consinfo = None
164
165 if query.IQ_NODES in self.requested_data:
166 node_names = set(itertools.chain(*map(operator.attrgetter("all_nodes"),
167 instance_list)))
168 nodes = dict(lu.cfg.GetMultiNodeInfo(node_names))
169 groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
170 for uuid in set(map(operator.attrgetter("group"),
171 nodes.values())))
172 else:
173 nodes = None
174 groups = None
175
176 if query.IQ_NETWORKS in self.requested_data:
177 net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.name)
178 for i in instance_list))
179 networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
180 else:
181 networks = None
182
183 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
184 disk_usage, offline_nodes, bad_nodes,
185 live_data, wrongnode_inst, consinfo,
186 nodes, groups, networks)
187
190 """Logical unit for querying instances.
191
192 """
193
194 REQ_BGL = False
195
199
202
205
206 - def Exec(self, feedback_fn):
208
211 """Query runtime instance data.
212
213 """
214 REQ_BGL = False
215
217 self.needed_locks = {}
218
219
220 if not (self.op.static or self.op.use_locking):
221 self.LogWarning("Non-static data requested, locks need to be acquired")
222 self.op.use_locking = True
223
224 if self.op.instances or not self.op.use_locking:
225
226 self.wanted_names = GetWantedInstances(self, self.op.instances)
227 else:
228
229 self.wanted_names = None
230
231 if self.op.use_locking:
232 self.share_locks = ShareAll()
233
234 if self.wanted_names is None:
235 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
236 else:
237 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
238
239 self.needed_locks[locking.LEVEL_NODEGROUP] = []
240 self.needed_locks[locking.LEVEL_NODE] = []
241 self.needed_locks[locking.LEVEL_NETWORK] = []
242 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
243
245 if self.op.use_locking:
246 owned_instances = self.owned_locks(locking.LEVEL_INSTANCE)
247 if level == locking.LEVEL_NODEGROUP:
248
249
250
251 self.needed_locks[locking.LEVEL_NODEGROUP] = \
252 frozenset(group_uuid
253 for instance_name in owned_instances
254 for group_uuid in
255 self.cfg.GetInstanceNodeGroups(instance_name))
256
257 elif level == locking.LEVEL_NODE:
258 self._LockInstancesNodes()
259
260 elif level == locking.LEVEL_NETWORK:
261 self.needed_locks[locking.LEVEL_NETWORK] = \
262 frozenset(net_uuid
263 for instance_name in owned_instances
264 for net_uuid in
265 self.cfg.GetInstanceNetworks(instance_name))
266
268 """Check prerequisites.
269
270 This only checks the optional instance list against the existing names.
271
272 """
273 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
274 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
275 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
276 owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
277
278 if self.wanted_names is None:
279 assert self.op.use_locking, "Locking was not used"
280 self.wanted_names = owned_instances
281
282 instances = dict(self.cfg.GetMultiInstanceInfo(self.wanted_names))
283
284 if self.op.use_locking:
285 CheckInstancesNodeGroups(self.cfg, instances, owned_groups, owned_nodes,
286 None)
287 else:
288 assert not (owned_instances or owned_groups or
289 owned_nodes or owned_networks)
290
291 self.wanted_instances = instances.values()
292
294 """Returns the status of a block device
295
296 """
297 if self.op.static or not node:
298 return None
299
300 self.cfg.SetDiskID(dev, node)
301
302 result = self.rpc.call_blockdev_find(node, dev)
303 if result.offline:
304 return None
305
306 result.Raise("Can't compute disk status for %s" % instance.name)
307
308 status = result.payload
309 if status is None:
310 return None
311
312 return (status.dev_path, status.major, status.minor,
313 status.sync_percent, status.estimated_time,
314 status.is_degraded, status.ldisk_status)
315
323
325 """Compute block device status.
326
327 @attention: The device has to be annotated already.
328
329 """
330 if dev.dev_type in constants.LDS_DRBD:
331
332 if dev.logical_id[0] == instance.primary_node:
333 snode = dev.logical_id[1]
334 else:
335 snode = dev.logical_id[0]
336
337 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
338 instance, dev)
339 dev_sstatus = self._ComputeBlockdevStatus(snode, instance, dev)
340
341 if dev.children:
342 dev_children = map(compat.partial(self._ComputeDiskStatusInner,
343 instance, snode),
344 dev.children)
345 else:
346 dev_children = []
347
348 return {
349 "iv_name": dev.iv_name,
350 "dev_type": dev.dev_type,
351 "logical_id": dev.logical_id,
352 "physical_id": dev.physical_id,
353 "pstatus": dev_pstatus,
354 "sstatus": dev_sstatus,
355 "children": dev_children,
356 "mode": dev.mode,
357 "size": dev.size,
358 "name": dev.name,
359 "uuid": dev.uuid,
360 }
361
362 - def Exec(self, feedback_fn):
363 """Gather and return data"""
364 result = {}
365
366 cluster = self.cfg.GetClusterInfo()
367
368 node_names = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
369 nodes = dict(self.cfg.GetMultiNodeInfo(node_names))
370
371 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
372 for node in nodes.values()))
373
374 group2name_fn = lambda uuid: groups[uuid].name
375 for instance in self.wanted_instances:
376 pnode = nodes[instance.primary_node]
377
378 if self.op.static or pnode.offline:
379 remote_state = None
380 if pnode.offline:
381 self.LogWarning("Primary node %s is marked offline, returning static"
382 " information only for instance %s" %
383 (pnode.name, instance.name))
384 else:
385 remote_info = self.rpc.call_instance_info(instance.primary_node,
386 instance.name,
387 instance.hypervisor)
388 remote_info.Raise("Error checking node %s" % instance.primary_node)
389 remote_info = remote_info.payload
390 if remote_info and "state" in remote_info:
391 remote_state = "up"
392 else:
393 if instance.admin_state == constants.ADMINST_UP:
394 remote_state = "down"
395 else:
396 remote_state = instance.admin_state
397
398 disks = map(compat.partial(self._ComputeDiskStatus, instance, None),
399 instance.disks)
400
401 snodes_group_uuids = [nodes[snode_name].group
402 for snode_name in instance.secondary_nodes]
403
404 result[instance.name] = {
405 "name": instance.name,
406 "config_state": instance.admin_state,
407 "run_state": remote_state,
408 "pnode": instance.primary_node,
409 "pnode_group_uuid": pnode.group,
410 "pnode_group_name": group2name_fn(pnode.group),
411 "snodes": instance.secondary_nodes,
412 "snodes_group_uuids": snodes_group_uuids,
413 "snodes_group_names": map(group2name_fn, snodes_group_uuids),
414 "os": instance.os,
415
416 "nics": NICListToTuple(self, instance.nics),
417 "disk_template": instance.disk_template,
418 "disks": disks,
419 "hypervisor": instance.hypervisor,
420 "network_port": instance.network_port,
421 "hv_instance": instance.hvparams,
422 "hv_actual": cluster.FillHV(instance, skip_globals=True),
423 "be_instance": instance.beparams,
424 "be_actual": cluster.FillBE(instance),
425 "os_instance": instance.osparams,
426 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
427 "serial_no": instance.serial_no,
428 "mtime": instance.mtime,
429 "ctime": instance.ctime,
430 "uuid": instance.uuid,
431 }
432
433 return result
434