Package ganeti :: Package cmdlib :: Module instance_query
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cmdlib.instance_query

  1  # 
  2  # 
  3   
  4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
  5  # All rights reserved. 
  6  # 
  7  # Redistribution and use in source and binary forms, with or without 
  8  # modification, are permitted provided that the following conditions are 
  9  # met: 
 10  # 
 11  # 1. Redistributions of source code must retain the above copyright notice, 
 12  # this list of conditions and the following disclaimer. 
 13  # 
 14  # 2. Redistributions in binary form must reproduce the above copyright 
 15  # notice, this list of conditions and the following disclaimer in the 
 16  # documentation and/or other materials provided with the distribution. 
 17  # 
 18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
 19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
 20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
 21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
 22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
 23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
 24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
 25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
 26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
 27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
 28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 29   
 30   
 31  """Logical units for querying instances.""" 
 32   
 33  import itertools 
 34   
 35  from ganeti import compat 
 36  from ganeti import constants 
 37  from ganeti import locking 
 38  from ganeti import utils 
 39  from ganeti.cmdlib.base import NoHooksLU 
 40  from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \ 
 41    CheckInstancesNodeGroups, AnnotateDiskParams 
 42  from ganeti.cmdlib.instance_utils import NICListToTuple 
 43  from ganeti.hypervisor import hv_base 
 44   
 45   
46 -class LUInstanceQueryData(NoHooksLU):
47 """Query runtime instance data. 48 49 """ 50 REQ_BGL = False 51
52 - def ExpandNames(self):
53 self.needed_locks = {} 54 55 # Use locking if requested or when non-static information is wanted 56 if not (self.op.static or self.op.use_locking): 57 self.LogWarning("Non-static data requested, locks need to be acquired") 58 self.op.use_locking = True 59 60 if self.op.instances or not self.op.use_locking: 61 # Expand instance names right here 62 (_, self.wanted_names) = GetWantedInstances(self, self.op.instances) 63 else: 64 # Will use acquired locks 65 self.wanted_names = None 66 67 if self.op.use_locking: 68 self.share_locks = ShareAll() 69 70 if self.wanted_names is None: 71 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET 72 else: 73 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names 74 75 self.needed_locks[locking.LEVEL_NODEGROUP] = [] 76 self.needed_locks[locking.LEVEL_NODE] = [] 77 self.needed_locks[locking.LEVEL_NETWORK] = [] 78 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE 79 self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True 80 self.dont_collate_locks[locking.LEVEL_NODE] = True 81 self.dont_collate_locks[locking.LEVEL_NETWORK] = True
82
83 - def DeclareLocks(self, level):
84 if self.op.use_locking: 85 owned_instances = dict(self.cfg.GetMultiInstanceInfoByName( 86 self.owned_locks(locking.LEVEL_INSTANCE))) 87 if level == locking.LEVEL_NODEGROUP: 88 89 # Lock all groups used by instances optimistically; this requires going 90 # via the node before it's locked, requiring verification later on 91 self.needed_locks[locking.LEVEL_NODEGROUP] = \ 92 frozenset(group_uuid 93 for instance_uuid in owned_instances.keys() 94 for group_uuid in 95 self.cfg.GetInstanceNodeGroups(instance_uuid)) 96 97 elif level == locking.LEVEL_NODE: 98 self._LockInstancesNodes() 99 100 elif level == locking.LEVEL_NETWORK: 101 self.needed_locks[locking.LEVEL_NETWORK] = \ 102 frozenset(net_uuid 103 for instance_uuid in owned_instances.keys() 104 for net_uuid in 105 self.cfg.GetInstanceNetworks(instance_uuid))
106
107 - def CheckPrereq(self):
108 """Check prerequisites. 109 110 This only checks the optional instance list against the existing names. 111 112 """ 113 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) 114 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) 115 owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE)) 116 owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK)) 117 118 if self.wanted_names is None: 119 assert self.op.use_locking, "Locking was not used" 120 self.wanted_names = owned_instances 121 122 instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names)) 123 124 if self.op.use_locking: 125 CheckInstancesNodeGroups(self.cfg, instances, owned_groups, 126 owned_node_uuids, None) 127 else: 128 assert not (owned_instances or owned_groups or 129 owned_node_uuids or owned_networks) 130 131 self.wanted_instances = instances.values()
132
133 - def _ComputeBlockdevStatus(self, node_uuid, instance, dev):
134 """Returns the status of a block device 135 136 """ 137 if self.op.static or not node_uuid: 138 return None 139 140 result = self.rpc.call_blockdev_find(node_uuid, (dev, instance)) 141 if result.offline: 142 return None 143 144 result.Raise("Can't compute disk status for %s" % instance.name) 145 146 status = result.payload 147 if status is None: 148 return None 149 150 return (status.dev_path, status.major, status.minor, 151 status.sync_percent, status.estimated_time, 152 status.is_degraded, status.ldisk_status)
153
154 - def _ComputeDiskStatus(self, instance, node_uuid2name_fn, dev):
155 """Compute block device status. 156 157 """ 158 (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg) 159 160 return self._ComputeDiskStatusInner(instance, None, node_uuid2name_fn, 161 anno_dev)
162
163 - def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn, 164 dev):
165 """Compute block device status. 166 167 @attention: The device has to be annotated already. 168 169 """ 170 drbd_info = None 171 output_logical_id = dev.logical_id 172 if dev.dev_type in constants.DTS_DRBD: 173 # we change the snode then (otherwise we use the one passed in) 174 if dev.logical_id[0] == instance.primary_node: 175 snode_uuid = dev.logical_id[1] 176 snode_minor = dev.logical_id[4] 177 pnode_minor = dev.logical_id[3] 178 else: 179 snode_uuid = dev.logical_id[0] 180 snode_minor = dev.logical_id[3] 181 pnode_minor = dev.logical_id[4] 182 drbd_info = { 183 "primary_node": node_uuid2name_fn(instance.primary_node), 184 "primary_minor": pnode_minor, 185 "secondary_node": node_uuid2name_fn(snode_uuid), 186 "secondary_minor": snode_minor, 187 "port": dev.logical_id[2], 188 } 189 # replace the secret present at the end of the ids with None 190 output_logical_id = dev.logical_id[:-1] + (None,) 191 192 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node, 193 instance, dev) 194 dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev) 195 196 if dev.children: 197 dev_children = map(compat.partial(self._ComputeDiskStatusInner, 198 instance, snode_uuid, 199 node_uuid2name_fn), 200 dev.children) 201 else: 202 dev_children = [] 203 204 return { 205 "iv_name": dev.iv_name, 206 "dev_type": dev.dev_type, 207 "logical_id": output_logical_id, 208 "drbd_info": drbd_info, 209 "pstatus": dev_pstatus, 210 "sstatus": dev_sstatus, 211 "children": dev_children, 212 "mode": dev.mode, 213 "size": dev.size, 214 "spindles": dev.spindles, 215 "name": dev.name, 216 "uuid": dev.uuid, 217 }
218
219 - def Exec(self, feedback_fn):
220 """Gather and return data""" 221 result = {} 222 223 cluster = self.cfg.GetClusterInfo() 224 225 node_uuids = itertools.chain(*(self.cfg.GetInstanceNodes(i.uuid) 226 for i in self.wanted_instances)) 227 nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids)) 228 229 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group 230 for node in nodes.values())) 231 232 for instance in self.wanted_instances: 233 pnode = nodes[instance.primary_node] 234 hvparams = cluster.FillHV(instance, skip_globals=True) 235 236 if self.op.static or pnode.offline: 237 remote_state = None 238 if pnode.offline: 239 self.LogWarning("Primary node %s is marked offline, returning static" 240 " information only for instance %s" % 241 (pnode.name, instance.name)) 242 else: 243 remote_info = self.rpc.call_instance_info( 244 instance.primary_node, instance.name, instance.hypervisor, 245 cluster.hvparams[instance.hypervisor]) 246 remote_info.Raise("Error checking node %s" % pnode.name) 247 remote_info = remote_info.payload 248 249 allow_userdown = \ 250 cluster.enabled_user_shutdown and \ 251 (instance.hypervisor != constants.HT_KVM or 252 hvparams[constants.HV_KVM_USER_SHUTDOWN]) 253 254 if remote_info and "state" in remote_info: 255 if hv_base.HvInstanceState.IsShutdown(remote_info["state"]): 256 if allow_userdown: 257 remote_state = "user down" 258 else: 259 remote_state = "down" 260 else: 261 remote_state = "up" 262 else: 263 if instance.admin_state == constants.ADMINST_UP: 264 remote_state = "down" 265 elif instance.admin_state == constants.ADMINST_DOWN: 266 if instance.admin_state_source == constants.USER_SOURCE: 267 remote_state = "user down" 268 else: 269 remote_state = "down" 270 else: 271 remote_state = "offline" 272 273 group2name_fn = lambda uuid: groups[uuid].name 274 node_uuid2name_fn = lambda uuid: nodes[uuid].name 275 276 disk_objects = self.cfg.GetInstanceDisks(instance.uuid) 277 output_disks = map(compat.partial(self._ComputeDiskStatus, instance, 278 node_uuid2name_fn), 279 disk_objects) 280 281 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid) 282 snodes_group_uuids = [nodes[snode_uuid].group 283 for snode_uuid in secondary_nodes] 284 285 result[instance.name] = { 286 "name": instance.name, 287 "config_state": instance.admin_state, 288 "run_state": remote_state, 289 "pnode": pnode.name, 290 "pnode_group_uuid": pnode.group, 291 "pnode_group_name": group2name_fn(pnode.group), 292 "snodes": map(node_uuid2name_fn, secondary_nodes), 293 "snodes_group_uuids": snodes_group_uuids, 294 "snodes_group_names": map(group2name_fn, snodes_group_uuids), 295 "os": instance.os, 296 # this happens to be the same format used for hooks 297 "nics": NICListToTuple(self, instance.nics), 298 "disk_template": utils.GetDiskTemplate(disk_objects), 299 "disks": output_disks, 300 "hypervisor": instance.hypervisor, 301 "network_port": instance.network_port, 302 "hv_instance": instance.hvparams, 303 "hv_actual": hvparams, 304 "be_instance": instance.beparams, 305 "be_actual": cluster.FillBE(instance), 306 "os_instance": instance.osparams, 307 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams), 308 "serial_no": instance.serial_no, 309 "mtime": instance.mtime, 310 "ctime": instance.ctime, 311 "uuid": instance.uuid, 312 } 313 314 return result
315