1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 """Logical units for querying instances."""
23
24 import itertools
25 import logging
26 import operator
27
28 from ganeti import compat
29 from ganeti import constants
30 from ganeti import locking
31 from ganeti import qlang
32 from ganeti import query
33 from ganeti.cmdlib.base import QueryBase, NoHooksLU
34 from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
35 CheckInstanceNodeGroups, CheckInstancesNodeGroups, AnnotateDiskParams
36 from ganeti.cmdlib.instance_operation import GetInstanceConsole
37 from ganeti.cmdlib.instance_utils import NICListToTuple
38
39 import ganeti.masterd.instance
43 FIELDS = query.INSTANCE_FIELDS
44
46 lu.needed_locks = {}
47 lu.share_locks = ShareAll()
48
49 if self.names:
50 (_, self.wanted) = GetWantedInstances(lu, self.names)
51 else:
52 self.wanted = locking.ALL_SET
53
54 self.do_locking = (self.use_locking and
55 query.IQ_LIVE in self.requested_data)
56 if self.do_locking:
57 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted
58 lu.needed_locks[locking.LEVEL_NODEGROUP] = []
59 lu.needed_locks[locking.LEVEL_NODE] = []
60 lu.needed_locks[locking.LEVEL_NETWORK] = []
61 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
62
63 self.do_grouplocks = (self.do_locking and
64 query.IQ_NODES in self.requested_data)
65
89
90 @staticmethod
99
101 """Computes the list of instances and their attributes.
102
103 """
104 if self.do_grouplocks:
105 self._CheckGroupLocks(lu)
106
107 cluster = lu.cfg.GetClusterInfo()
108 insts_by_name = dict((inst.name, inst) for
109 inst in lu.cfg.GetAllInstancesInfo().values())
110
111 instance_names = self._GetNames(lu, insts_by_name.keys(),
112 locking.LEVEL_INSTANCE)
113
114 instance_list = [insts_by_name[node] for node in instance_names]
115 node_uuids = frozenset(itertools.chain(*(inst.all_nodes
116 for inst in instance_list)))
117 hv_list = list(set([inst.hypervisor for inst in instance_list]))
118 bad_node_uuids = []
119 offline_node_uuids = []
120 wrongnode_inst_uuids = set()
121
122
123 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]):
124 live_data = {}
125 node_data = lu.rpc.call_all_instances_info(node_uuids, hv_list,
126 cluster.hvparams)
127 for node_uuid in node_uuids:
128 result = node_data[node_uuid]
129 if result.offline:
130
131 assert result.fail_msg
132 offline_node_uuids.append(node_uuid)
133 if result.fail_msg:
134 bad_node_uuids.append(node_uuid)
135 elif result.payload:
136 for inst_name in result.payload:
137 if inst_name in insts_by_name:
138 instance = insts_by_name[inst_name]
139 if instance.primary_node == node_uuid:
140 for iname in result.payload:
141 live_data[insts_by_name[iname].uuid] = result.payload[iname]
142 else:
143 wrongnode_inst_uuids.add(instance.uuid)
144 else:
145
146
147 logging.warning("Orphan instance '%s' found on node %s",
148 inst_name, lu.cfg.GetNodeName(node_uuid))
149
150 else:
151 live_data = {}
152
153 if query.IQ_DISKUSAGE in self.requested_data:
154 gmi = ganeti.masterd.instance
155 disk_usage = dict((inst.uuid,
156 gmi.ComputeDiskSize(inst.disk_template,
157 [{constants.IDISK_SIZE: disk.size}
158 for disk in inst.disks]))
159 for inst in instance_list)
160 else:
161 disk_usage = None
162
163 if query.IQ_CONSOLE in self.requested_data:
164 consinfo = {}
165 for inst in instance_list:
166 if inst.uuid in live_data:
167
168 consinfo[inst.uuid] = \
169 GetInstanceConsole(cluster, inst,
170 lu.cfg.GetNodeInfo(inst.primary_node))
171 else:
172 consinfo[inst.uuid] = None
173 else:
174 consinfo = None
175
176 if query.IQ_NODES in self.requested_data:
177 nodes = dict(lu.cfg.GetMultiNodeInfo(node_uuids))
178 groups = dict((uuid, lu.cfg.GetNodeGroup(uuid))
179 for uuid in set(map(operator.attrgetter("group"),
180 nodes.values())))
181 else:
182 nodes = None
183 groups = None
184
185 if query.IQ_NETWORKS in self.requested_data:
186 net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.uuid)
187 for i in instance_list))
188 networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids)
189 else:
190 networks = None
191
192 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(),
193 disk_usage, offline_node_uuids,
194 bad_node_uuids, live_data,
195 wrongnode_inst_uuids, consinfo, nodes,
196 groups, networks)
197
200 """Logical unit for querying instances.
201
202 """
203
204 REQ_BGL = False
205
209
212
215
216 - def Exec(self, feedback_fn):
218
221 """Query runtime instance data.
222
223 """
224 REQ_BGL = False
225
227 self.needed_locks = {}
228
229
230 if not (self.op.static or self.op.use_locking):
231 self.LogWarning("Non-static data requested, locks need to be acquired")
232 self.op.use_locking = True
233
234 if self.op.instances or not self.op.use_locking:
235
236 (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
237 else:
238
239 self.wanted_names = None
240
241 if self.op.use_locking:
242 self.share_locks = ShareAll()
243
244 if self.wanted_names is None:
245 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
246 else:
247 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
248
249 self.needed_locks[locking.LEVEL_NODEGROUP] = []
250 self.needed_locks[locking.LEVEL_NODE] = []
251 self.needed_locks[locking.LEVEL_NETWORK] = []
252 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
253
255 if self.op.use_locking:
256 owned_instances = dict(self.cfg.GetMultiInstanceInfoByName(
257 self.owned_locks(locking.LEVEL_INSTANCE)))
258 if level == locking.LEVEL_NODEGROUP:
259
260
261
262 self.needed_locks[locking.LEVEL_NODEGROUP] = \
263 frozenset(group_uuid
264 for instance_uuid in owned_instances.keys()
265 for group_uuid in
266 self.cfg.GetInstanceNodeGroups(instance_uuid))
267
268 elif level == locking.LEVEL_NODE:
269 self._LockInstancesNodes()
270
271 elif level == locking.LEVEL_NETWORK:
272 self.needed_locks[locking.LEVEL_NETWORK] = \
273 frozenset(net_uuid
274 for instance_uuid in owned_instances.keys()
275 for net_uuid in
276 self.cfg.GetInstanceNetworks(instance_uuid))
277
279 """Check prerequisites.
280
281 This only checks the optional instance list against the existing names.
282
283 """
284 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
285 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
286 owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
287 owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
288
289 if self.wanted_names is None:
290 assert self.op.use_locking, "Locking was not used"
291 self.wanted_names = owned_instances
292
293 instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
294
295 if self.op.use_locking:
296 CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
297 owned_node_uuids, None)
298 else:
299 assert not (owned_instances or owned_groups or
300 owned_node_uuids or owned_networks)
301
302 self.wanted_instances = instances.values()
303
305 """Returns the status of a block device
306
307 """
308 if self.op.static or not node_uuid:
309 return None
310
311 self.cfg.SetDiskID(dev, node_uuid)
312
313 result = self.rpc.call_blockdev_find(node_uuid, dev)
314 if result.offline:
315 return None
316
317 result.Raise("Can't compute disk status for %s" % instance.name)
318
319 status = result.payload
320 if status is None:
321 return None
322
323 return (status.dev_path, status.major, status.minor,
324 status.sync_percent, status.estimated_time,
325 status.is_degraded, status.ldisk_status)
326
335
338 """Compute block device status.
339
340 @attention: The device has to be annotated already.
341
342 """
343 drbd_info = None
344 output_logical_id = dev.logical_id
345 output_physical_id = dev.physical_id
346 if dev.dev_type in constants.DTS_DRBD:
347
348 if dev.logical_id[0] == instance.primary_node:
349 snode_uuid = dev.logical_id[1]
350 else:
351 snode_uuid = dev.logical_id[0]
352 drbd_info = {
353 "primary_node": node_uuid2name_fn(instance.primary_node),
354 "primary_minor": dev.logical_id[3],
355 "secondary_node": node_uuid2name_fn(snode_uuid),
356 "secondary_minor": dev.logical_id[4],
357 "port": dev.logical_id[2],
358 }
359
360 output_logical_id = dev.logical_id[:-1] + (None,)
361 output_physical_id = dev.physical_id[:-1] + (None,)
362
363 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
364 instance, dev)
365 dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
366
367 if dev.children:
368 dev_children = map(compat.partial(self._ComputeDiskStatusInner,
369 instance, snode_uuid,
370 node_uuid2name_fn),
371 dev.children)
372 else:
373 dev_children = []
374
375 return {
376 "iv_name": dev.iv_name,
377 "dev_type": dev.dev_type,
378 "logical_id": output_logical_id,
379 "drbd_info": drbd_info,
380 "physical_id": output_physical_id,
381 "pstatus": dev_pstatus,
382 "sstatus": dev_sstatus,
383 "children": dev_children,
384 "mode": dev.mode,
385 "size": dev.size,
386 "spindles": dev.spindles,
387 "name": dev.name,
388 "uuid": dev.uuid,
389 }
390
391 - def Exec(self, feedback_fn):
392 """Gather and return data"""
393 result = {}
394
395 cluster = self.cfg.GetClusterInfo()
396
397 node_uuids = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
398 nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
399
400 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
401 for node in nodes.values()))
402
403 for instance in self.wanted_instances:
404 pnode = nodes[instance.primary_node]
405
406 if self.op.static or pnode.offline:
407 remote_state = None
408 if pnode.offline:
409 self.LogWarning("Primary node %s is marked offline, returning static"
410 " information only for instance %s" %
411 (pnode.name, instance.name))
412 else:
413 remote_info = self.rpc.call_instance_info(
414 instance.primary_node, instance.name, instance.hypervisor,
415 cluster.hvparams[instance.hypervisor])
416 remote_info.Raise("Error checking node %s" % pnode.name)
417 remote_info = remote_info.payload
418 if remote_info and "state" in remote_info:
419 remote_state = "up"
420 else:
421 if instance.admin_state == constants.ADMINST_UP:
422 remote_state = "down"
423 else:
424 remote_state = instance.admin_state
425
426 group2name_fn = lambda uuid: groups[uuid].name
427 node_uuid2name_fn = lambda uuid: nodes[uuid].name
428
429 disks = map(compat.partial(self._ComputeDiskStatus, instance,
430 node_uuid2name_fn),
431 instance.disks)
432
433 snodes_group_uuids = [nodes[snode_uuid].group
434 for snode_uuid in instance.secondary_nodes]
435
436 result[instance.name] = {
437 "name": instance.name,
438 "config_state": instance.admin_state,
439 "run_state": remote_state,
440 "pnode": pnode.name,
441 "pnode_group_uuid": pnode.group,
442 "pnode_group_name": group2name_fn(pnode.group),
443 "snodes": map(node_uuid2name_fn, instance.secondary_nodes),
444 "snodes_group_uuids": snodes_group_uuids,
445 "snodes_group_names": map(group2name_fn, snodes_group_uuids),
446 "os": instance.os,
447
448 "nics": NICListToTuple(self, instance.nics),
449 "disk_template": instance.disk_template,
450 "disks": disks,
451 "hypervisor": instance.hypervisor,
452 "network_port": instance.network_port,
453 "hv_instance": instance.hvparams,
454 "hv_actual": cluster.FillHV(instance, skip_globals=True),
455 "be_instance": instance.beparams,
456 "be_actual": cluster.FillBE(instance),
457 "os_instance": instance.osparams,
458 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
459 "serial_no": instance.serial_no,
460 "mtime": instance.mtime,
461 "ctime": instance.ctime,
462 "uuid": instance.uuid,
463 }
464
465 return result
466