Package ganeti :: Package cmdlib :: Module instance_query
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cmdlib.instance_query

  1  # 
  2  # 
  3   
  4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
  5  # All rights reserved. 
  6  # 
  7  # Redistribution and use in source and binary forms, with or without 
  8  # modification, are permitted provided that the following conditions are 
  9  # met: 
 10  # 
 11  # 1. Redistributions of source code must retain the above copyright notice, 
 12  # this list of conditions and the following disclaimer. 
 13  # 
 14  # 2. Redistributions in binary form must reproduce the above copyright 
 15  # notice, this list of conditions and the following disclaimer in the 
 16  # documentation and/or other materials provided with the distribution. 
 17  # 
 18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
 19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
 20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
 21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
 22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
 23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
 24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
 25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
 26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
 27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
 28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 29   
 30   
 31  """Logical units for querying instances.""" 
 32   
 33  import itertools 
 34   
 35  from ganeti import compat 
 36  from ganeti import constants 
 37  from ganeti import locking 
 38  from ganeti.cmdlib.base import NoHooksLU 
 39  from ganeti.cmdlib.common import ShareAll, GetWantedInstances, \ 
 40    CheckInstancesNodeGroups, AnnotateDiskParams 
 41  from ganeti.cmdlib.instance_utils import NICListToTuple 
 42  from ganeti.hypervisor import hv_base 
 43   
 44   
45 -class LUInstanceQueryData(NoHooksLU):
46 """Query runtime instance data. 47 48 """ 49 REQ_BGL = False 50
51 - def ExpandNames(self):
52 self.needed_locks = {} 53 54 # Use locking if requested or when non-static information is wanted 55 if not (self.op.static or self.op.use_locking): 56 self.LogWarning("Non-static data requested, locks need to be acquired") 57 self.op.use_locking = True 58 59 if self.op.instances or not self.op.use_locking: 60 # Expand instance names right here 61 (_, self.wanted_names) = GetWantedInstances(self, self.op.instances) 62 else: 63 # Will use acquired locks 64 self.wanted_names = None 65 66 if self.op.use_locking: 67 self.share_locks = ShareAll() 68 69 if self.wanted_names is None: 70 self.needed_locks[locking.LEVEL_INSTANCE] = locking.ALL_SET 71 else: 72 self.needed_locks[locking.LEVEL_INSTANCE] = self.wanted_names 73 74 self.needed_locks[locking.LEVEL_NODEGROUP] = [] 75 self.needed_locks[locking.LEVEL_NODE] = [] 76 self.needed_locks[locking.LEVEL_NETWORK] = [] 77 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE 78 self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True 79 self.dont_collate_locks[locking.LEVEL_NODE] = True 80 self.dont_collate_locks[locking.LEVEL_NETWORK] = True
81
82 - def DeclareLocks(self, level):
83 if self.op.use_locking: 84 owned_instances = dict(self.cfg.GetMultiInstanceInfoByName( 85 self.owned_locks(locking.LEVEL_INSTANCE))) 86 if level == locking.LEVEL_NODEGROUP: 87 88 # Lock all groups used by instances optimistically; this requires going 89 # via the node before it's locked, requiring verification later on 90 self.needed_locks[locking.LEVEL_NODEGROUP] = \ 91 frozenset(group_uuid 92 for instance_uuid in owned_instances.keys() 93 for group_uuid in 94 self.cfg.GetInstanceNodeGroups(instance_uuid)) 95 96 elif level == locking.LEVEL_NODE: 97 self._LockInstancesNodes() 98 99 elif level == locking.LEVEL_NETWORK: 100 self.needed_locks[locking.LEVEL_NETWORK] = \ 101 frozenset(net_uuid 102 for instance_uuid in owned_instances.keys() 103 for net_uuid in 104 self.cfg.GetInstanceNetworks(instance_uuid))
105
106 - def CheckPrereq(self):
107 """Check prerequisites. 108 109 This only checks the optional instance list against the existing names. 110 111 """ 112 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) 113 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) 114 owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE)) 115 owned_networks = frozenset(self.owned_locks(locking.LEVEL_NETWORK)) 116 117 if self.wanted_names is None: 118 assert self.op.use_locking, "Locking was not used" 119 self.wanted_names = owned_instances 120 121 instances = dict(self.cfg.GetMultiInstanceInfoByName(self.wanted_names)) 122 123 if self.op.use_locking: 124 CheckInstancesNodeGroups(self.cfg, instances, owned_groups, 125 owned_node_uuids, None) 126 else: 127 assert not (owned_instances or owned_groups or 128 owned_node_uuids or owned_networks) 129 130 self.wanted_instances = instances.values()
131
132 - def _ComputeBlockdevStatus(self, node_uuid, instance, dev):
133 """Returns the status of a block device 134 135 """ 136 if self.op.static or not node_uuid: 137 return None 138 139 result = self.rpc.call_blockdev_find(node_uuid, (dev, instance)) 140 if result.offline: 141 return None 142 143 result.Raise("Can't compute disk status for %s" % instance.name) 144 145 status = result.payload 146 if status is None: 147 return None 148 149 return (status.dev_path, status.major, status.minor, 150 status.sync_percent, status.estimated_time, 151 status.is_degraded, status.ldisk_status)
152
153 - def _ComputeDiskStatus(self, instance, node_uuid2name_fn, dev):
154 """Compute block device status. 155 156 """ 157 (anno_dev,) = AnnotateDiskParams(instance, [dev], self.cfg) 158 159 return self._ComputeDiskStatusInner(instance, None, node_uuid2name_fn, 160 anno_dev)
161
162 - def _ComputeDiskStatusInner(self, instance, snode_uuid, node_uuid2name_fn, 163 dev):
164 """Compute block device status. 165 166 @attention: The device has to be annotated already. 167 168 """ 169 drbd_info = None 170 output_logical_id = dev.logical_id 171 if dev.dev_type in constants.DTS_DRBD: 172 # we change the snode then (otherwise we use the one passed in) 173 if dev.logical_id[0] == instance.primary_node: 174 snode_uuid = dev.logical_id[1] 175 snode_minor = dev.logical_id[4] 176 pnode_minor = dev.logical_id[3] 177 else: 178 snode_uuid = dev.logical_id[0] 179 snode_minor = dev.logical_id[3] 180 pnode_minor = dev.logical_id[4] 181 drbd_info = { 182 "primary_node": node_uuid2name_fn(instance.primary_node), 183 "primary_minor": pnode_minor, 184 "secondary_node": node_uuid2name_fn(snode_uuid), 185 "secondary_minor": snode_minor, 186 "port": dev.logical_id[2], 187 } 188 # replace the secret present at the end of the ids with None 189 output_logical_id = dev.logical_id[:-1] + (None,) 190 191 dev_pstatus = self._ComputeBlockdevStatus(instance.primary_node, 192 instance, dev) 193 dev_sstatus = self._ComputeBlockdevStatus(snode_uuid, instance, dev) 194 195 if dev.children: 196 dev_children = map(compat.partial(self._ComputeDiskStatusInner, 197 instance, snode_uuid, 198 node_uuid2name_fn), 199 dev.children) 200 else: 201 dev_children = [] 202 203 return { 204 "iv_name": dev.iv_name, 205 "dev_type": dev.dev_type, 206 "logical_id": output_logical_id, 207 "drbd_info": drbd_info, 208 "pstatus": dev_pstatus, 209 "sstatus": dev_sstatus, 210 "children": dev_children, 211 "mode": dev.mode, 212 "size": dev.size, 213 "spindles": dev.spindles, 214 "name": dev.name, 215 "uuid": dev.uuid, 216 }
217
218 - def Exec(self, feedback_fn):
219 """Gather and return data""" 220 result = {} 221 222 cluster = self.cfg.GetClusterInfo() 223 224 node_uuids = itertools.chain(*(self.cfg.GetInstanceNodes(i.uuid) 225 for i in self.wanted_instances)) 226 nodes = dict(self.cfg.GetMultiNodeInfo(node_uuids)) 227 228 groups = dict(self.cfg.GetMultiNodeGroupInfo(node.group 229 for node in nodes.values())) 230 231 for instance in self.wanted_instances: 232 pnode = nodes[instance.primary_node] 233 hvparams = cluster.FillHV(instance, skip_globals=True) 234 235 if self.op.static or pnode.offline: 236 remote_state = None 237 if pnode.offline: 238 self.LogWarning("Primary node %s is marked offline, returning static" 239 " information only for instance %s" % 240 (pnode.name, instance.name)) 241 else: 242 remote_info = self.rpc.call_instance_info( 243 instance.primary_node, instance.name, instance.hypervisor, 244 cluster.hvparams[instance.hypervisor]) 245 remote_info.Raise("Error checking node %s" % pnode.name) 246 remote_info = remote_info.payload 247 248 allow_userdown = \ 249 cluster.enabled_user_shutdown and \ 250 (instance.hypervisor != constants.HT_KVM or 251 hvparams[constants.HV_KVM_USER_SHUTDOWN]) 252 253 if remote_info and "state" in remote_info: 254 if hv_base.HvInstanceState.IsShutdown(remote_info["state"]): 255 if allow_userdown: 256 remote_state = "user down" 257 else: 258 remote_state = "down" 259 else: 260 remote_state = "up" 261 else: 262 if instance.admin_state == constants.ADMINST_UP: 263 remote_state = "down" 264 elif instance.admin_state == constants.ADMINST_DOWN: 265 if instance.admin_state_source == constants.USER_SOURCE: 266 remote_state = "user down" 267 else: 268 remote_state = "down" 269 else: 270 remote_state = "offline" 271 272 group2name_fn = lambda uuid: groups[uuid].name 273 node_uuid2name_fn = lambda uuid: nodes[uuid].name 274 275 disks = map(compat.partial(self._ComputeDiskStatus, instance, 276 node_uuid2name_fn), 277 self.cfg.GetInstanceDisks(instance.uuid)) 278 279 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(instance.uuid) 280 snodes_group_uuids = [nodes[snode_uuid].group 281 for snode_uuid in secondary_nodes] 282 283 result[instance.name] = { 284 "name": instance.name, 285 "config_state": instance.admin_state, 286 "run_state": remote_state, 287 "pnode": pnode.name, 288 "pnode_group_uuid": pnode.group, 289 "pnode_group_name": group2name_fn(pnode.group), 290 "snodes": map(node_uuid2name_fn, secondary_nodes), 291 "snodes_group_uuids": snodes_group_uuids, 292 "snodes_group_names": map(group2name_fn, snodes_group_uuids), 293 "os": instance.os, 294 # this happens to be the same format used for hooks 295 "nics": NICListToTuple(self, instance.nics), 296 "disk_template": instance.disk_template, 297 "disks": disks, 298 "hypervisor": instance.hypervisor, 299 "network_port": instance.network_port, 300 "hv_instance": instance.hvparams, 301 "hv_actual": hvparams, 302 "be_instance": instance.beparams, 303 "be_actual": cluster.FillBE(instance), 304 "os_instance": instance.osparams, 305 "os_actual": cluster.SimpleFillOS(instance.os, instance.osparams), 306 "serial_no": instance.serial_no, 307 "mtime": instance.mtime, 308 "ctime": instance.ctime, 309 "uuid": instance.uuid, 310 } 311 312 return result
313