1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Logical units for querying instances."""
32
33 import itertools
34
35 from ganeti import compat
36 from ganeti import constants
37 from ganeti import locking
38 from ganeti.cmdlib.base import NoHooksLU
39 from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
40 CheckInstancesNodeGroups, AnnotateDiskParams
41 from ganeti.cmdlib.instance_utils import NICListToTuple
42 from ganeti.hypervisor import hv_base
43
44
46 """Query runtime instance data.
47
48 """
49 REQ_BGL = False
50
52 self.needed_locks = {}
53
54
55 if not (self.op.static or self.op.use_locking):
56 self.LogWarning("Non-static data requested, locks need to be acquired")
57 self.op.use_locking = True
58
59 if self.op.instances or not self.op.use_locking:
60
61 (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
62 else:
63
64 self.wanted_names = None
65
66 if self.op.use_locking:
67 self.share_locks = ShareAll()
68
69 if self.wanted_names is None:
70 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
71 else:
72 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
73
74 self.needed_locks[locking.LEVEL_NODEGROUP] = []
75 self.needed_locks[locking.LEVEL_NODE] = []
76 self.needed_locks[locking.LEVEL_NETWORK] = []
77 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
78
102
104 """Check prerequisites.
105
106 This only checks the optional instance list against the existing names.
107
108 """
109 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
110 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
111 owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
112 owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
113
114 if self.wanted_names is None:
115 assert self.op.use_locking, "Locking was not used"
116 self.wanted_names = owned_instances
117
118 instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
119
120 if self.op.use_locking:
121 CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
122 owned_node_uuids, None)
123 else:
124 assert not (owned_instances or owned_groups or
125 owned_node_uuids or owned_networks)
126
127 self.wanted_instances = instances.values()
128
130 """Returns the status of a block device
131
132 """
133 if self.op.static or not node_uuid:
134 return None
135
136 result = self.rpc.call_blockdev_find(node_uuid, (dev, instance))
137 if result.offline:
138 return None
139
140 result.Raise("Can't compute disk status for %s" % instance.name)
141
142 status = result.payload
143 if status is None:
144 return None
145
146 return (status.dev_path, status.major, status.minor,
147 status.sync_percent, status.estimated_time,
148 status.is_degraded, status.ldisk_status)
149
158
161 """Compute block device status.
162
163 @attention: The device has to be annotated already.
164
165 """
166 drbd_info = None
167 output_logical_id = dev.logical_id
168 if dev.dev_type in constants.DTS_DRBD:
169
170 if dev.logical_id[0] == instance.primary_node:
171 snode_uuid = dev.logical_id[1]
172 snode_minor = dev.logical_id[4]
173 pnode_minor = dev.logical_id[3]
174 else:
175 snode_uuid = dev.logical_id[0]
176 snode_minor = dev.logical_id[3]
177 pnode_minor = dev.logical_id[4]
178 drbd_info = {
179 "primary_node": node_uuid2name_fn(instance.primary_node),
180 "primary_minor": pnode_minor,
181 "secondary_node": node_uuid2name_fn(snode_uuid),
182 "secondary_minor": snode_minor,
183 "port": dev.logical_id[2],
184 }
185
186 output_logical_id = dev.logical_id[:-1] + (None,)
187
188 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
189 instance, dev)
190 dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
191
192 if dev.children:
193 dev_children = map(compat.partial(self._ComputeDiskStatusInner,
194 instance, snode_uuid,
195 node_uuid2name_fn),
196 dev.children)
197 else:
198 dev_children = []
199
200 return {
201 "iv_name": dev.iv_name,
202 "dev_type": dev.dev_type,
203 "logical_id": output_logical_id,
204 "drbd_info": drbd_info,
205 "pstatus": dev_pstatus,
206 "sstatus": dev_sstatus,
207 "children": dev_children,
208 "mode": dev.mode,
209 "size": dev.size,
210 "spindles": dev.spindles,
211 "name": dev.name,
212 "uuid": dev.uuid,
213 }
214
215 - def Exec(self, feedback_fn):
216 """Gather and return data"""
217 result = {}
218
219 cluster = self.cfg.GetClusterInfo()
220
221 node_uuids = itertools.chain(*(self.cfg.GetInstanceNodes(i.uuid)
222 for i in self.wanted_instances))
223 nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
224
225 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
226 for node in nodes.values()))
227
228 for instance in self.wanted_instances:
229 pnode = nodes[instance.primary_node]
230 hvparams = cluster.FillHV(instance, skip_globals=True)
231
232 if self.op.static or pnode.offline:
233 remote_state = None
234 if pnode.offline:
235 self.LogWarning("Primary node %s is marked offline, returning static"
236 " information only for instance %s" %
237 (pnode.name, instance.name))
238 else:
239 remote_info = self.rpc.call_instance_info(
240 instance.primary_node, instance.name, instance.hypervisor,
241 cluster.hvparams[instance.hypervisor])
242 remote_info.Raise("Error checking node %s" % pnode.name)
243 remote_info = remote_info.payload
244
245 allow_userdown = \
246 cluster.enabled_user_shutdown and \
247 (instance.hypervisor != constants.HT_KVM or
248 hvparams[constants.HV_KVM_USER_SHUTDOWN])
249
250 if remote_info and "state" in remote_info:
251 if hv_base.HvInstanceState.IsShutdown(remote_info["state"]):
252 if allow_userdown:
253 remote_state = "user down"
254 else:
255 remote_state = "down"
256 else:
257 remote_state = "up"
258 else:
259 if instance.admin_state == constants.ADMINST_UP:
260 remote_state = "down"
261 elif instance.admin_state == constants.ADMINST_DOWN:
262 if instance.admin_state_source == constants.USER_SOURCE:
263 remote_state = "user down"
264 else:
265 remote_state = "down"
266 else:
267 remote_state = "offline"
268
269 group2name_fn = lambda uuid: groups[uuid].name
270 node_uuid2name_fn = lambda uuid: nodes[uuid].name
271
272 disks = map(compat.partial(self._ComputeDiskStatus, instance,
273 node_uuid2name_fn),
274 self.cfg.GetInstanceDisks(instance.uuid))
275
276 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid)
277 snodes_group_uuids = [nodes[snode_uuid].group
278 for snode_uuid in secondary_nodes]
279
280 result[instance.name] = {
281 "name": instance.name,
282 "config_state": instance.admin_state,
283 "run_state": remote_state,
284 "pnode": pnode.name,
285 "pnode_group_uuid": pnode.group,
286 "pnode_group_name": group2name_fn(pnode.group),
287 "snodes": map(node_uuid2name_fn, secondary_nodes),
288 "snodes_group_uuids": snodes_group_uuids,
289 "snodes_group_names": map(group2name_fn, snodes_group_uuids),
290 "os": instance.os,
291
292 "nics": NICListToTuple(self, instance.nics),
293 "disk_template": instance.disk_template,
294 "disks": disks,
295 "hypervisor": instance.hypervisor,
296 "network_port": instance.network_port,
297 "hv_instance": instance.hvparams,
298 "hv_actual": hvparams,
299 "be_instance": instance.beparams,
300 "be_actual": cluster.FillBE(instance),
301 "os_instance": instance.osparams,
302 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
303 "serial_no": instance.serial_no,
304 "mtime": instance.mtime,
305 "ctime": instance.ctime,
306 "uuid": instance.uuid,
307 }
308
309 return result
310