Package ganeti :: Package cmdlib :: Module instance_query
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cmdlib.instance_query

  1  # 
  2  # 
  3   
  4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
  5  # All rights reserved. 
  6  # 
  7  # Redistribution and use in source and binary forms, with or without 
  8  # modification, are permitted provided that the following conditions are 
  9  # met: 
 10  # 
 11  # 1. Redistributions of source code must retain the above copyright notice, 
 12  # this list of conditions and the following disclaimer. 
 13  # 
 14  # 2. Redistributions in binary form must reproduce the above copyright 
 15  # notice, this list of conditions and the following disclaimer in the 
 16  # documentation and/or other materials provided with the distribution. 
 17  # 
 18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
 19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
 20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
 21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
 22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
 23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
 24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
 25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
 26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
 27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
 28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 29   
 30   
 31  """Logical units for querying instances.""" 
 32   
 33  import itertools 
 34  import logging 
 35  import operator 
 36   
 37  from ganeti import compat 
 38  from ganeti import constants 
 39  from ganeti import locking 
 40  from ganeti import qlang 
 41  from ganeti import query 
 42  from ganeti.cmdlib.base import QueryBase, NoHooksLU 
 43  from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \ 
 44    CheckInstanceNodeGroups, CheckInstancesNodeGroups, AnnotateDiskParams 
 45  from ganeti.cmdlib.instance_operation import GetInstanceConsole 
 46  from ganeti.cmdlib.instance_utils import NICListToTuple 
 47   
 48  import ganeti.masterd.instance 
49 50 51 -class InstanceQuery(QueryBase):
52 FIELDS = query.INSTANCE_FIELDS 53
54 - def ExpandNames(self, lu):
55 lu.needed_locks = {} 56 lu.share_locks = ShareAll() 57 58 if self.names: 59 (_, self.wanted) = GetWantedInstances(lu, self.names) 60 else: 61 self.wanted = locking.ALL_SET 62 63 self.do_locking = (self.use_locking and 64 query.IQ_LIVE in self.requested_data) 65 if self.do_locking: 66 lu.needed_locks[locking.LEVEL_INSTANCE] = self.wanted 67 lu.needed_locks[locking.LEVEL_NODEGROUP] = [] 68 lu.needed_locks[locking.LEVEL_NODE] = [] 69 lu.needed_locks[locking.LEVEL_NETWORK] = [] 70 lu.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE 71 72 self.do_grouplocks = (self.do_locking and 73 query.IQ_NODES in self.requested_data)
74
75 - def DeclareLocks(self, lu, level):
76 if self.do_locking: 77 if level == locking.LEVEL_NODEGROUP and self.do_grouplocks: 78 assert not lu.needed_locks[locking.LEVEL_NODEGROUP] 79 80 # Lock all groups used by instances optimistically; this requires going 81 # via the node before it's locked, requiring verification later on 82 lu.needed_locks[locking.LEVEL_NODEGROUP] = \ 83 set(group_uuid 84 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE) 85 for group_uuid in 86 lu.cfg.GetInstanceNodeGroups( 87 lu.cfg.GetInstanceInfoByName(instance_name).uuid)) 88 elif level == locking.LEVEL_NODE: 89 lu._LockInstancesNodes() # pylint: disable=W0212 90 91 elif level == locking.LEVEL_NETWORK: 92 lu.needed_locks[locking.LEVEL_NETWORK] = \ 93 frozenset(net_uuid 94 for instance_name in lu.owned_locks(locking.LEVEL_INSTANCE) 95 for net_uuid in 96 lu.cfg.GetInstanceNetworks( 97 lu.cfg.GetInstanceInfoByName(instance_name).uuid))
98 99 @staticmethod
100 - def _CheckGroupLocks(lu):
101 owned_instance_names = frozenset(lu.owned_locks(locking.LEVEL_INSTANCE)) 102 owned_groups = frozenset(lu.owned_locks(locking.LEVEL_NODEGROUP)) 103 104 # Check if node groups for locked instances are still correct 105 for instance_name in owned_instance_names: 106 instance = lu.cfg.GetInstanceInfoByName(instance_name) 107 CheckInstanceNodeGroups(lu.cfg, instance.uuid, owned_groups)
108
109 - def _GetQueryData(self, lu):
110 """Computes the list of instances and their attributes. 111 112 """ 113 if self.do_grouplocks: 114 self._CheckGroupLocks(lu) 115 116 cluster = lu.cfg.GetClusterInfo() 117 insts_by_name = dict((inst.name, inst) for 118 inst in lu.cfg.GetAllInstancesInfo().values()) 119 120 instance_names = self._GetNames(lu, insts_by_name.keys(), 121 locking.LEVEL_INSTANCE) 122 123 instance_list = [insts_by_name[node] for node in instance_names] 124 node_uuids = frozenset(itertools.chain(*(inst.all_nodes 125 for inst in instance_list))) 126 hv_list = list(set([inst.hypervisor for inst in instance_list])) 127 bad_node_uuids = [] 128 offline_node_uuids = [] 129 wrongnode_inst_uuids = set() 130 131 # Gather data as requested 132 if self.requested_data & set([query.IQ_LIVE, query.IQ_CONSOLE]): 133 live_data = {} 134 node_data = lu.rpc.call_all_instances_info(node_uuids, hv_list, 135 cluster.hvparams) 136 for node_uuid in node_uuids: 137 result = node_data[node_uuid] 138 if result.offline: 139 # offline nodes will be in both lists 140 assert result.fail_msg 141 offline_node_uuids.append(node_uuid) 142 if result.fail_msg: 143 bad_node_uuids.append(node_uuid) 144 elif result.payload: 145 for inst_name in result.payload: 146 if inst_name in insts_by_name: 147 instance = insts_by_name[inst_name] 148 if instance.primary_node == node_uuid: 149 live_data[insts_by_name[inst_name].uuid] = \ 150 result.payload[inst_name] 151 else: 152 wrongnode_inst_uuids.add(instance.uuid) 153 else: 154 # orphan instance; we don't list it here as we don't 155 # handle this case yet in the output of instance listing 156 logging.warning("Orphan instance '%s' found on node %s", 157 inst_name, lu.cfg.GetNodeName(node_uuid)) 158 # else no instance is alive 159 else: 160 live_data = {} 161 162 if query.IQ_DISKUSAGE in self.requested_data: 163 gmi = ganeti.masterd.instance 164 disk_usage = dict((inst.uuid, 165 gmi.ComputeDiskSize(inst.disk_template, 166 [{constants.IDISK_SIZE: disk.size} 167 for disk in inst.disks])) 168 for inst in instance_list) 169 else: 170 disk_usage = None 171 172 if query.IQ_CONSOLE in self.requested_data: 173 consinfo = {} 174 for inst in instance_list: 175 if inst.uuid in live_data: 176 # Instance is running 177 consinfo[inst.uuid] = \ 178 GetInstanceConsole(cluster, inst, 179 lu.cfg.GetNodeInfo(inst.primary_node)) 180 else: 181 consinfo[inst.uuid] = None 182 else: 183 consinfo = None 184 185 if query.IQ_NODES in self.requested_data: 186 nodes = dict(lu.cfg.GetMultiNodeInfo(node_uuids)) 187 groups = dict((uuid, lu.cfg.GetNodeGroup(uuid)) 188 for uuid in set(map(operator.attrgetter("group"), 189 nodes.values()))) 190 else: 191 nodes = None 192 groups = None 193 194 if query.IQ_NETWORKS in self.requested_data: 195 net_uuids = itertools.chain(*(lu.cfg.GetInstanceNetworks(i.uuid) 196 for i in instance_list)) 197 networks = dict((uuid, lu.cfg.GetNetwork(uuid)) for uuid in net_uuids) 198 else: 199 networks = None 200 201 return query.InstanceQueryData(instance_list, lu.cfg.GetClusterInfo(), 202 disk_usage, offline_node_uuids, 203 bad_node_uuids, live_data, 204 wrongnode_inst_uuids, consinfo, nodes, 205 groups, networks)
206
207 208 -class LUInstanceQuery(NoHooksLU):
209 """Logical unit for querying instances. 210 211 """ 212 # pylint: disable=W0142 213 REQ_BGL = False 214
215 - def CheckArguments(self):
216 self.iq = InstanceQuery(qlang.MakeSimpleFilter("name", self.op.names), 217 self.op.output_fields, self.op.use_locking)
218
219 - def ExpandNames(self):
220 self.iq.ExpandNames(self)
221
222 - def DeclareLocks(self, level):
223 self.iq.DeclareLocks(self, level)
224
225 - def Exec(self, feedback_fn):
226 return self.iq.OldStyleQuery(self)
227
228 229 -class LUInstanceQueryData(NoHooksLU):
230 """Query runtime instance data. 231 232 """ 233 REQ_BGL = False 234
235 - def ExpandNames(self):
236 self.needed_locks = {} 237 238 # Use locking if requested or when non-static information is wanted 239 if not (self.op.static or self.op.use_locking): 240 self.LogWarning("Non-static data requested, locks need to be acquired") 241 self.op.use_locking = True 242 243 if self.op.instances or not self.op.use_locking: 244 # Expand instance names right here 245 (_, self.wanted_names) = GetWantedInstances(self, self.op.instances) 246 else: 247 # Will use acquired locks 248 self.wanted_names = None 249 250 if self.op.use_locking: 251 self.share_locks = ShareAll() 252 253 if self.wanted_names is None: 254 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET 255 else: 256 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names 257 258 self.needed_locks[locking.LEVEL_NODEGROUP] = [] 259 self.needed_locks[locking.LEVEL_NODE] = [] 260 self.needed_locks[locking.LEVEL_NETWORK] = [] 261 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
262
263 - def DeclareLocks(self, level):
264 if self.op.use_locking: 265 owned_instances = dict(self.cfg.GetMultiInstanceInfoByName( 266 self.owned_locks(locking.LEVEL_INSTANCE))) 267 if level == locking.LEVEL_NODEGROUP: 268 269 # Lock all groups used by instances optimistically; this requires going 270 # via the node before it's locked, requiring verification later on 271 self.needed_locks[locking.LEVEL_NODEGROUP] = \ 272 frozenset(group_uuid 273 for instance_uuid in owned_instances.keys() 274 for group_uuid in 275 self.cfg.GetInstanceNodeGroups(instance_uuid)) 276 277 elif level == locking.LEVEL_NODE: 278 self._LockInstancesNodes() 279 280 elif level == locking.LEVEL_NETWORK: 281 self.needed_locks[locking.LEVEL_NETWORK] = \ 282 frozenset(net_uuid 283 for instance_uuid in owned_instances.keys() 284 for net_uuid in 285 self.cfg.GetInstanceNetworks(instance_uuid))
286
287 - def CheckPrereq(self):
288 """Check prerequisites. 289 290 This only checks the optional instance list against the existing names. 291 292 """ 293 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) 294 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) 295 owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE)) 296 owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK)) 297 298 if self.wanted_names is None: 299 assert self.op.use_locking, "Locking was not used" 300 self.wanted_names = owned_instances 301 302 instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names)) 303 304 if self.op.use_locking: 305 CheckInstancesNodeGroups(self.cfg, instances, owned_groups, 306 owned_node_uuids, None) 307 else: 308 assert not (owned_instances or owned_groups or 309 owned_node_uuids or owned_networks) 310 311 self.wanted_instances = instances.values()
312
313 - def _ComputeBlockdevStatus(self, node_uuid, instance, dev):
314 """Returns the status of a block device 315 316 """ 317 if self.op.static or not node_uuid: 318 return None 319 320 result = self.rpc.call_blockdev_find(node_uuid, (dev, instance)) 321 if result.offline: 322 return None 323 324 result.Raise("Can't compute disk status for %s" % instance.name) 325 326 status = result.payload 327 if status is None: 328 return None 329 330 return (status.dev_path, status.major, status.minor, 331 status.sync_percent, status.estimated_time, 332 status.is_degraded, status.ldisk_status)
333
334 - def _ComputeDiskStatus(self, instance, node_uuid2name_fn, dev):
335 """Compute block device status. 336 337 """ 338 (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg) 339 340 return self._ComputeDiskStatusInner(instance, None, node_uuid2name_fn, 341 anno_dev)
342
343 - def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn, 344 dev):
345 """Compute block device status. 346 347 @attention: The device has to be annotated already. 348 349 """ 350 drbd_info = None 351 output_logical_id = dev.logical_id 352 if dev.dev_type in constants.DTS_DRBD: 353 # we change the snode then (otherwise we use the one passed in) 354 if dev.logical_id[0] == instance.primary_node: 355 snode_uuid = dev.logical_id[1] 356 snode_minor = dev.logical_id[4] 357 pnode_minor = dev.logical_id[3] 358 else: 359 snode_uuid = dev.logical_id[0] 360 snode_minor = dev.logical_id[3] 361 pnode_minor = dev.logical_id[4] 362 drbd_info = { 363 "primary_node": node_uuid2name_fn(instance.primary_node), 364 "primary_minor": pnode_minor, 365 "secondary_node": node_uuid2name_fn(snode_uuid), 366 "secondary_minor": snode_minor, 367 "port": dev.logical_id[2], 368 } 369 # replace the secret present at the end of the ids with None 370 output_logical_id = dev.logical_id[:-1] + (None,) 371 372 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node, 373 instance, dev) 374 dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev) 375 376 if dev.children: 377 dev_children = map(compat.partial(self._ComputeDiskStatusInner, 378 instance, snode_uuid, 379 node_uuid2name_fn), 380 dev.children) 381 else: 382 dev_children = [] 383 384 return { 385 "iv_name": dev.iv_name, 386 "dev_type": dev.dev_type, 387 "logical_id": output_logical_id, 388 "drbd_info": drbd_info, 389 "pstatus": dev_pstatus, 390 "sstatus": dev_sstatus, 391 "children": dev_children, 392 "mode": dev.mode, 393 "size": dev.size, 394 "spindles": dev.spindles, 395 "name": dev.name, 396 "uuid": dev.uuid, 397 }
398
399 - def Exec(self, feedback_fn):
400 """Gather and return data""" 401 result = {} 402 403 cluster = self.cfg.GetClusterInfo() 404 405 node_uuids = itertools.chain(*(i.all_nodes for i in self.wanted_instances)) 406 nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids)) 407 408 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group 409 for node in nodes.values())) 410 411 for instance in self.wanted_instances: 412 pnode = nodes[instance.primary_node] 413 414 if self.op.static or pnode.offline: 415 remote_state = None 416 if pnode.offline: 417 self.LogWarning("Primary node %s is marked offline, returning static" 418 " information only for instance %s" % 419 (pnode.name, instance.name)) 420 else: 421 remote_info = self.rpc.call_instance_info( 422 instance.primary_node, instance.name, instance.hypervisor, 423 cluster.hvparams[instance.hypervisor]) 424 remote_info.Raise("Error checking node %s" % pnode.name) 425 remote_info = remote_info.payload 426 if remote_info and "state" in remote_info: 427 remote_state = "up" 428 else: 429 if instance.admin_state == constants.ADMINST_UP: 430 remote_state = "down" 431 else: 432 remote_state = instance.admin_state 433 434 group2name_fn = lambda uuid: groups[uuid].name 435 node_uuid2name_fn = lambda uuid: nodes[uuid].name 436 437 disks = map(compat.partial(self._ComputeDiskStatus, instance, 438 node_uuid2name_fn), 439 instance.disks) 440 441 snodes_group_uuids = [nodes[snode_uuid].group 442 for snode_uuid in instance.secondary_nodes] 443 444 result[instance.name] = { 445 "name": instance.name, 446 "config_state": instance.admin_state, 447 "run_state": remote_state, 448 "pnode": pnode.name, 449 "pnode_group_uuid": pnode.group, 450 "pnode_group_name": group2name_fn(pnode.group), 451 "snodes": map(node_uuid2name_fn, instance.secondary_nodes), 452 "snodes_group_uuids": snodes_group_uuids, 453 "snodes_group_names": map(group2name_fn, snodes_group_uuids), 454 "os": instance.os, 455 # this happens to be the same format used for hooks 456 "nics": NICListToTuple(self, instance.nics), 457 "disk_template": instance.disk_template, 458 "disks": disks, 459 "hypervisor": instance.hypervisor, 460 "network_port": instance.network_port, 461 "hv_instance": instance.hvparams, 462 "hv_actual": cluster.FillHV(instance, skip_globals=True), 463 "be_instance": instance.beparams, 464 "be_actual": cluster.FillBE(instance), 465 "os_instance": instance.osparams, 466 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams), 467 "serial_no": instance.serial_no, 468 "mtime": instance.mtime, 469 "ctime": instance.ctime, 470 "uuid": instance.uuid, 471 } 472 473 return result
474