1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Logical units for querying instances."""
32
33 import itertools
34
35 from ganeti import compat
36 from ganeti import constants
37 from ganeti import locking
38 from ganeti.cmdlib.base import NoHooksLU
39 from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \
40 CheckInstancesNodeGroups, AnnotateDiskParams
41 from ganeti.cmdlib.instance_utils import NICListToTuple
42 from ganeti.hypervisor import hv_base
43
44
46 """Query runtime instance data.
47
48 """
49 REQ_BGL = False
50
52 self.needed_locks = {}
53
54
55 if not (self.op.static or self.op.use_locking):
56 self.LogWarning("Non-static data requested, locks need to be acquired")
57 self.op.use_locking = True
58
59 if self.op.instances or not self.op.use_locking:
60
61 (_, self.wanted_names) = GetWantedInstances(self, self.op.instances)
62 else:
63
64 self.wanted_names = None
65
66 if self.op.use_locking:
67 self.share_locks = ShareAll()
68
69 if self.wanted_names is None:
70 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET
71 else:
72 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names
73
74 self.needed_locks[locking.LEVEL_NODEGROUP] = []
75 self.needed_locks[locking.LEVEL_NODE] = []
76 self.needed_locks[locking.LEVEL_NETWORK] = []
77 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
78
80 if self.op.use_locking:
81 owned_instances = dict(self.cfg.GetMultiInstanceInfoByName(
82 self.owned_locks(locking.LEVEL_INSTANCE)))
83 if level == locking.LEVEL_NODEGROUP:
84
85
86
87 self.needed_locks[locking.LEVEL_NODEGROUP] = \
88 frozenset(group_uuid
89 for instance_uuid in owned_instances.keys()
90 for group_uuid in
91 self.cfg.GetInstanceNodeGroups(instance_uuid))
92
93 elif level == locking.LEVEL_NODE:
94 self._LockInstancesNodes()
95
96 elif level == locking.LEVEL_NETWORK:
97 self.needed_locks[locking.LEVEL_NETWORK] = \
98 frozenset(net_uuid
99 for instance_uuid in owned_instances.keys()
100 for net_uuid in
101 self.cfg.GetInstanceNetworks(instance_uuid))
102
104 """Check prerequisites.
105
106 This only checks the optional instance list against the existing names.
107
108 """
109 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
110 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
111 owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
112 owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK))
113
114 if self.wanted_names is None:
115 assert self.op.use_locking, "Locking was not used"
116 self.wanted_names = owned_instances
117
118 instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names))
119
120 if self.op.use_locking:
121 CheckInstancesNodeGroups(self.cfg, instances, owned_groups,
122 owned_node_uuids, None)
123 else:
124 assert not (owned_instances or owned_groups or
125 owned_node_uuids or owned_networks)
126
127 self.wanted_instances = instances.values()
128
130 """Returns the status of a block device
131
132 """
133 if self.op.static or not node_uuid:
134 return None
135
136 result = self.rpc.call_blockdev_find(node_uuid, (dev, instance))
137 if result.offline:
138 return None
139
140 result.Raise("Can't compute disk status for %s" % instance.name)
141
142 status = result.payload
143 if status is None:
144 return None
145
146 return (status.dev_path, status.major, status.minor,
147 status.sync_percent, status.estimated_time,
148 status.is_degraded, status.ldisk_status)
149
158
161 """Compute block device status.
162
163 @attention: The device has to be annotated already.
164
165 """
166 drbd_info = None
167 output_logical_id = dev.logical_id
168 if dev.dev_type in constants.DTS_DRBD:
169
170 if dev.logical_id[0] == instance.primary_node:
171 snode_uuid = dev.logical_id[1]
172 snode_minor = dev.logical_id[4]
173 pnode_minor = dev.logical_id[3]
174 else:
175 snode_uuid = dev.logical_id[0]
176 snode_minor = dev.logical_id[3]
177 pnode_minor = dev.logical_id[4]
178 drbd_info = {
179 "primary_node": node_uuid2name_fn(instance.primary_node),
180 "primary_minor": pnode_minor,
181 "secondary_node": node_uuid2name_fn(snode_uuid),
182 "secondary_minor": snode_minor,
183 "port": dev.logical_id[2],
184 }
185
186 output_logical_id = dev.logical_id[:-1] + (None,)
187
188 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node,
189 instance, dev)
190 dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev)
191
192 if dev.children:
193 dev_children = map(compat.partial(self._ComputeDiskStatusInner,
194 instance, snode_uuid,
195 node_uuid2name_fn),
196 dev.children)
197 else:
198 dev_children = []
199
200 return {
201 "iv_name": dev.iv_name,
202 "dev_type": dev.dev_type,
203 "logical_id": output_logical_id,
204 "drbd_info": drbd_info,
205 "pstatus": dev_pstatus,
206 "sstatus": dev_sstatus,
207 "children": dev_children,
208 "mode": dev.mode,
209 "size": dev.size,
210 "spindles": dev.spindles,
211 "name": dev.name,
212 "uuid": dev.uuid,
213 }
214
215 - def Exec(self, feedback_fn):
216 """Gather and return data"""
217 result = {}
218
219 cluster = self.cfg.GetClusterInfo()
220
221 node_uuids = itertools.chain(*(i.all_nodes for i in self.wanted_instances))
222 nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids))
223
224 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group
225 for node in nodes.values()))
226
227 for instance in self.wanted_instances:
228 pnode = nodes[instance.primary_node]
229 hvparams = cluster.FillHV(instance, skip_globals=True)
230
231 if self.op.static or pnode.offline:
232 remote_state = None
233 if pnode.offline:
234 self.LogWarning("Primary node %s is marked offline, returning static"
235 " information only for instance %s" %
236 (pnode.name, instance.name))
237 else:
238 remote_info = self.rpc.call_instance_info(
239 instance.primary_node, instance.name, instance.hypervisor,
240 cluster.hvparams[instance.hypervisor])
241 remote_info.Raise("Error checking node %s" % pnode.name)
242 remote_info = remote_info.payload
243
244 allow_userdown = \
245 cluster.enabled_user_shutdown and \
246 (instance.hypervisor != constants.HT_KVM or
247 hvparams[constants.HV_KVM_USER_SHUTDOWN])
248
249 if remote_info and "state" in remote_info:
250 if hv_base.HvInstanceState.IsShutdown(remote_info["state"]):
251 if allow_userdown:
252 remote_state = "user down"
253 else:
254 remote_state = "down"
255 else:
256 remote_state = "up"
257 else:
258 if instance.admin_state == constants.ADMINST_UP:
259 remote_state = "down"
260 elif instance.admin_state == constants.ADMINST_DOWN:
261 if instance.admin_state_source == constants.USER_SOURCE:
262 remote_state = "user down"
263 else:
264 remote_state = "down"
265 else:
266 remote_state = "offline"
267
268 group2name_fn = lambda uuid: groups[uuid].name
269 node_uuid2name_fn = lambda uuid: nodes[uuid].name
270
271 disks = map(compat.partial(self._ComputeDiskStatus, instance,
272 node_uuid2name_fn),
273 instance.disks)
274
275 snodes_group_uuids = [nodes[snode_uuid].group
276 for snode_uuid in instance.secondary_nodes]
277
278 result[instance.name] = {
279 "name": instance.name,
280 "config_state": instance.admin_state,
281 "run_state": remote_state,
282 "pnode": pnode.name,
283 "pnode_group_uuid": pnode.group,
284 "pnode_group_name": group2name_fn(pnode.group),
285 "snodes": map(node_uuid2name_fn, instance.secondary_nodes),
286 "snodes_group_uuids": snodes_group_uuids,
287 "snodes_group_names": map(group2name_fn, snodes_group_uuids),
288 "os": instance.os,
289
290 "nics": NICListToTuple(self, instance.nics),
291 "disk_template": instance.disk_template,
292 "disks": disks,
293 "hypervisor": instance.hypervisor,
294 "network_port": instance.network_port,
295 "hv_instance": instance.hvparams,
296 "hv_actual": hvparams,
297 "be_instance": instance.beparams,
298 "be_actual": cluster.FillBE(instance),
299 "os_instance": instance.osparams,
300 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams),
301 "serial_no": instance.serial_no,
302 "mtime": instance.mtime,
303 "ctime": instance.ctime,
304 "uuid": instance.uuid,
305 }
306
307 return result
308