1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Logical units for querying instances."""
32
33 import itertools
34
35 from ganeti import compat
36 from ganeti import constants
37 from ganeti import locking
38 from ganeti import utils
39 from ganeti.cmdlib.base import NoHooksLU
40 from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
41 CheckInstancesNodeGroups, AnnotateDiskParams
42 from ganeti.cmdlib.instance_utils import NICListToTuple
43 from ganeti.hypervisor import hv_base
44
45
47 """Query runtime instance data.
48
49 """
50 REQ_BGL = False
51
53 self.needed_locks = {}
54
55
56 if not (self.op.static or self.op.use_locking):
57 self.LogWarning("Non-static data requested, locks need to be acquired")
58 self.op.use_locking = True
59
60 if self.op.instances or not self.op.use_locking:
61
62 (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
63 else:
64
65 self.wanted_names = None
66
67 if self.op.use_locking:
68 self.share_locks = ShareAll()
69
70 if self.wanted_names is None:
71 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
72 else:
73 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
74
75 self.needed_locks[locking.LEVEL_NODEGROUP] = []
76 self.needed_locks[locking.LEVEL_NODE] = []
77 self.needed_locks[locking.LEVEL_NETWORK] = []
78 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
79 self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True
80 self.dont_collate_locks[locking.LEVEL_NODE] = True
81 self.dont_collate_locks[locking.LEVEL_NETWORK] = True
82
106
108 """Check prerequisites.
109
110 This only checks the optional instance list against the existing names.
111
112 """
113 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
114 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
115 owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
116 owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
117
118 if self.wanted_names is None:
119 assert self.op.use_locking, "Locking was not used"
120 self.wanted_names = owned_instances
121
122 instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
123
124 if self.op.use_locking:
125 CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
126 owned_node_uuids, None)
127 else:
128 assert not (owned_instances or owned_groups or
129 owned_node_uuids or owned_networks)
130
131 self.wanted_instances = instances.values()
132
134 """Returns the status of a block device
135
136 """
137 if self.op.static or not node_uuid:
138 return None
139
140 result = self.rpc.call_blockdev_find(node_uuid, (dev, instance))
141 if result.offline:
142 return None
143
144 result.Raise("Can't compute disk status for %s" % instance.name)
145
146 status = result.payload
147 if status is None:
148 return None
149
150 return (status.dev_path, status.major, status.minor,
151 status.sync_percent, status.estimated_time,
152 status.is_degraded, status.ldisk_status)
153
162
165 """Compute block device status.
166
167 @attention: The device has to be annotated already.
168
169 """
170 drbd_info = None
171 output_logical_id = dev.logical_id
172 if dev.dev_type in constants.DTS_DRBD:
173
174 if dev.logical_id[0] == instance.primary_node:
175 snode_uuid = dev.logical_id[1]
176 snode_minor = dev.logical_id[4]
177 pnode_minor = dev.logical_id[3]
178 else:
179 snode_uuid = dev.logical_id[0]
180 snode_minor = dev.logical_id[3]
181 pnode_minor = dev.logical_id[4]
182 drbd_info = {
183 "primary_node": node_uuid2name_fn(instance.primary_node),
184 "primary_minor": pnode_minor,
185 "secondary_node": node_uuid2name_fn(snode_uuid),
186 "secondary_minor": snode_minor,
187 "port": dev.logical_id[2],
188 }
189
190 output_logical_id = dev.logical_id[:-1] + (None,)
191
192 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
193 instance, dev)
194 dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
195
196 if dev.children:
197 dev_children = map(compat.partial(self._ComputeDiskStatusInner,
198 instance, snode_uuid,
199 node_uuid2name_fn),
200 dev.children)
201 else:
202 dev_children = []
203
204 return {
205 "iv_name": dev.iv_name,
206 "dev_type": dev.dev_type,
207 "logical_id": output_logical_id,
208 "drbd_info": drbd_info,
209 "pstatus": dev_pstatus,
210 "sstatus": dev_sstatus,
211 "children": dev_children,
212 "mode": dev.mode,
213 "size": dev.size,
214 "spindles": dev.spindles,
215 "name": dev.name,
216 "uuid": dev.uuid,
217 }
218
219 - def Exec(self, feedback_fn):
220 """Gather and return data"""
221 result = {}
222
223 cluster = self.cfg.GetClusterInfo()
224
225 node_uuids = itertools.chain(*(self.cfg.GetInstanceNodes(i.uuid)
226 for i in self.wanted_instances))
227 nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
228
229 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
230 for node in nodes.values()))
231
232 for instance in self.wanted_instances:
233 pnode = nodes[instance.primary_node]
234 hvparams = cluster.FillHV(instance, skip_globals=True)
235
236 if self.op.static or pnode.offline:
237 remote_state = None
238 if pnode.offline:
239 self.LogWarning("Primary node %s is marked offline, returning static"
240 " information only for instance %s" %
241 (pnode.name, instance.name))
242 else:
243 remote_info = self.rpc.call_instance_info(
244 instance.primary_node, instance.name, instance.hypervisor,
245 cluster.hvparams[instance.hypervisor])
246 remote_info.Raise("Error checking node %s" % pnode.name)
247 remote_info = remote_info.payload
248
249 allow_userdown = \
250 cluster.enabled_user_shutdown and \
251 (instance.hypervisor != constants.HT_KVM or
252 hvparams[constants.HV_KVM_USER_SHUTDOWN])
253
254 if remote_info and "state" in remote_info:
255 if hv_base.HvInstanceState.IsShutdown(remote_info["state"]):
256 if allow_userdown:
257 remote_state = "user down"
258 else:
259 remote_state = "down"
260 else:
261 remote_state = "up"
262 else:
263 if instance.admin_state == constants.ADMINST_UP:
264 remote_state = "down"
265 elif instance.admin_state == constants.ADMINST_DOWN:
266 if instance.admin_state_source == constants.USER_SOURCE:
267 remote_state = "user down"
268 else:
269 remote_state = "down"
270 else:
271 remote_state = "offline"
272
273 group2name_fn = lambda uuid: groups[uuid].name
274 node_uuid2name_fn = lambda uuid: nodes[uuid].name
275
276 disk_objects = self.cfg.GetInstanceDisks(instance.uuid)
277 output_disks = map(compat.partial(self._ComputeDiskStatus, instance,
278 node_uuid2name_fn),
279 disk_objects)
280
281 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
282 snodes_group_uuids = [nodes[snode_uuid].group
283 for snode_uuid in secondary_nodes]
284
285 result[instance.name] = {
286 "name": instance.name,
287 "config_state": instance.admin_state,
288 "run_state": remote_state,
289 "pnode": pnode.name,
290 "pnode_group_uuid": pnode.group,
291 "pnode_group_name": group2name_fn(pnode.group),
292 "snodes": map(node_uuid2name_fn, secondary_nodes),
293 "snodes_group_uuids": snodes_group_uuids,
294 "snodes_group_names": map(group2name_fn, snodes_group_uuids),
295 "os": instance.os,
296
297 "nics": NICListToTuple(self, instance.nics),
298 "disk_template": utils.GetDiskTemplate(disk_objects),
299 "disks": output_disks,
300 "hypervisor": instance.hypervisor,
301 "network_port": instance.network_port,
302 "hv_instance": instance.hvparams,
303 "hv_actual": hvparams,
304 "be_instance": instance.beparams,
305 "be_actual": cluster.FillBE(instance),
306 "os_instance": instance.osparams,
307 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
308 "serial_no": instance.serial_no,
309 "mtime": instance.mtime,
310 "ctime": instance.ctime,
311 "uuid": instance.uuid,
312 }
313
314 return result
315