1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Logical units for querying instances."""
32
33 import itertools
34
35 from ganeti import compat
36 from ganeti import constants
37 from ganeti import locking
38 from ganeti.cmdlib.base import NoHooksLU
39 from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
40 CheckInstancesNodeGroups, AnnotateDiskParams
41 from ganeti.cmdlib.instance_utils import NICListToTuple
42 from ganeti.hypervisor import hv_base
43
44
46 """Query runtime instance data.
47
48 """
49 REQ_BGL = False
50
52 self.needed_locks = {}
53
54
55 if not (self.op.static or self.op.use_locking):
56 self.LogWarning("Non-static data requested, locks need to be acquired")
57 self.op.use_locking = True
58
59 if self.op.instances or not self.op.use_locking:
60
61 (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
62 else:
63
64 self.wanted_names = None
65
66 if self.op.use_locking:
67 self.share_locks = ShareAll()
68
69 if self.wanted_names is None:
70 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
71 else:
72 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
73
74 self.needed_locks[locking.LEVEL_NODEGROUP] = []
75 self.needed_locks[locking.LEVEL_NODE] = []
76 self.needed_locks[locking.LEVEL_NETWORK] = []
77 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
78 self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True
79 self.dont_collate_locks[locking.LEVEL_NODE] = True
80 self.dont_collate_locks[locking.LEVEL_NETWORK] = True
81
105
107 """Check prerequisites.
108
109 This only checks the optional instance list against the existing names.
110
111 """
112 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
113 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
114 owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
115 owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
116
117 if self.wanted_names is None:
118 assert self.op.use_locking, "Locking was not used"
119 self.wanted_names = owned_instances
120
121 instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
122
123 if self.op.use_locking:
124 CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
125 owned_node_uuids, None)
126 else:
127 assert not (owned_instances or owned_groups or
128 owned_node_uuids or owned_networks)
129
130 self.wanted_instances = instances.values()
131
133 """Returns the status of a block device
134
135 """
136 if self.op.static or not node_uuid:
137 return None
138
139 result = self.rpc.call_blockdev_find(node_uuid, (dev, instance))
140 if result.offline:
141 return None
142
143 result.Raise("Can't compute disk status for %s" % instance.name)
144
145 status = result.payload
146 if status is None:
147 return None
148
149 return (status.dev_path, status.major, status.minor,
150 status.sync_percent, status.estimated_time,
151 status.is_degraded, status.ldisk_status)
152
161
164 """Compute block device status.
165
166 @attention: The device has to be annotated already.
167
168 """
169 drbd_info = None
170 output_logical_id = dev.logical_id
171 if dev.dev_type in constants.DTS_DRBD:
172
173 if dev.logical_id[0] == instance.primary_node:
174 snode_uuid = dev.logical_id[1]
175 snode_minor = dev.logical_id[4]
176 pnode_minor = dev.logical_id[3]
177 else:
178 snode_uuid = dev.logical_id[0]
179 snode_minor = dev.logical_id[3]
180 pnode_minor = dev.logical_id[4]
181 drbd_info = {
182 "primary_node": node_uuid2name_fn(instance.primary_node),
183 "primary_minor": pnode_minor,
184 "secondary_node": node_uuid2name_fn(snode_uuid),
185 "secondary_minor": snode_minor,
186 "port": dev.logical_id[2],
187 }
188
189 output_logical_id = dev.logical_id[:-1] + (None,)
190
191 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
192 instance, dev)
193 dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
194
195 if dev.children:
196 dev_children = map(compat.partial(self._ComputeDiskStatusInner,
197 instance, snode_uuid,
198 node_uuid2name_fn),
199 dev.children)
200 else:
201 dev_children = []
202
203 return {
204 "iv_name": dev.iv_name,
205 "dev_type": dev.dev_type,
206 "logical_id": output_logical_id,
207 "drbd_info": drbd_info,
208 "pstatus": dev_pstatus,
209 "sstatus": dev_sstatus,
210 "children": dev_children,
211 "mode": dev.mode,
212 "size": dev.size,
213 "spindles": dev.spindles,
214 "name": dev.name,
215 "uuid": dev.uuid,
216 }
217
218 - def Exec(self, feedback_fn):
219 """Gather and return data"""
220 result = {}
221
222 cluster = self.cfg.GetClusterInfo()
223
224 node_uuids = itertools.chain(*(self.cfg.GetInstanceNodes(i.uuid)
225 for i in self.wanted_instances))
226 nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
227
228 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
229 for node in nodes.values()))
230
231 for instance in self.wanted_instances:
232 pnode = nodes[instance.primary_node]
233 hvparams = cluster.FillHV(instance, skip_globals=True)
234
235 if self.op.static or pnode.offline:
236 remote_state = None
237 if pnode.offline:
238 self.LogWarning("Primary node %s is marked offline, returning static"
239 " information only for instance %s" %
240 (pnode.name, instance.name))
241 else:
242 remote_info = self.rpc.call_instance_info(
243 instance.primary_node, instance.name, instance.hypervisor,
244 cluster.hvparams[instance.hypervisor])
245 remote_info.Raise("Error checking node %s" % pnode.name)
246 remote_info = remote_info.payload
247
248 allow_userdown = \
249 cluster.enabled_user_shutdown and \
250 (instance.hypervisor != constants.HT_KVM or
251 hvparams[constants.HV_KVM_USER_SHUTDOWN])
252
253 if remote_info and "state" in remote_info:
254 if hv_base.HvInstanceState.IsShutdown(remote_info["state"]):
255 if allow_userdown:
256 remote_state = "user down"
257 else:
258 remote_state = "down"
259 else:
260 remote_state = "up"
261 else:
262 if instance.admin_state == constants.ADMINST_UP:
263 remote_state = "down"
264 elif instance.admin_state == constants.ADMINST_DOWN:
265 if instance.admin_state_source == constants.USER_SOURCE:
266 remote_state = "user down"
267 else:
268 remote_state = "down"
269 else:
270 remote_state = "offline"
271
272 group2name_fn = lambda uuid: groups[uuid].name
273 node_uuid2name_fn = lambda uuid: nodes[uuid].name
274
275 disks = map(compat.partial(self._ComputeDiskStatus, instance,
276 node_uuid2name_fn),
277 self.cfg.GetInstanceDisks(instance.uuid))
278
279 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
280 snodes_group_uuids = [nodes[snode_uuid].group
281 for snode_uuid in secondary_nodes]
282
283 result[instance.name] = {
284 "name": instance.name,
285 "config_state": instance.admin_state,
286 "run_state": remote_state,
287 "pnode": pnode.name,
288 "pnode_group_uuid": pnode.group,
289 "pnode_group_name": group2name_fn(pnode.group),
290 "snodes": map(node_uuid2name_fn, secondary_nodes),
291 "snodes_group_uuids": snodes_group_uuids,
292 "snodes_group_names": map(group2name_fn, snodes_group_uuids),
293 "os": instance.os,
294
295 "nics": NICListToTuple(self, instance.nics),
296 "disk_template": instance.disk_template,
297 "disks": disks,
298 "hypervisor": instance.hypervisor,
299 "network_port": instance.network_port,
300 "hv_instance": instance.hvparams,
301 "hv_actual": hvparams,
302 "be_instance": instance.beparams,
303 "be_actual": cluster.FillBE(instance),
304 "os_instance": instance.osparams,
305 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
306 "serial_no": instance.serial_no,
307 "mtime": instance.mtime,
308 "ctime": instance.ctime,
309 "uuid": instance.uuid,
310 }
311
312 return result
313