Package ganeti :: Package cmdlib :: Module misc
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cmdlib.misc

  1  # 
  2  # 
  3   
  4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
  5  # 
  6  # This program is free software; you can redistribute it and/or modify 
  7  # it under the terms of the GNU General Public License as published by 
  8  # the Free Software Foundation; either version 2 of the License, or 
  9  # (at your option) any later version. 
 10  # 
 11  # This program is distributed in the hope that it will be useful, but 
 12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
 13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
 14  # General Public License for more details. 
 15  # 
 16  # You should have received a copy of the GNU General Public License 
 17  # along with this program; if not, write to the Free Software 
 18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
 19  # 02110-1301, USA. 
 20   
 21   
 22  """Miscellaneous logical units that don't fit into any category.""" 
 23   
 24  import logging 
 25  import time 
 26   
 27  from ganeti import compat 
 28  from ganeti import constants 
 29  from ganeti import errors 
 30  from ganeti import locking 
 31  from ganeti import qlang 
 32  from ganeti import query 
 33  from ganeti import utils 
 34  from ganeti.cmdlib.base import NoHooksLU, QueryBase 
 35  from ganeti.cmdlib.common import GetWantedNodes, SupportsOob 
36 37 38 -class LUOobCommand(NoHooksLU):
39 """Logical unit for OOB handling. 40 41 """ 42 REQ_BGL = False 43 _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE) 44
45 - def ExpandNames(self):
46 """Gather locks we need. 47 48 """ 49 if self.op.node_names: 50 (self.op.node_uuids, self.op.node_names) = \ 51 GetWantedNodes(self, self.op.node_names) 52 lock_node_uuids = self.op.node_uuids 53 else: 54 lock_node_uuids = locking.ALL_SET 55 56 self.needed_locks = { 57 locking.LEVEL_NODE: lock_node_uuids, 58 } 59 60 self.share_locks[locking.LEVEL_NODE_ALLOC] = 1 61 62 if not self.op.node_names: 63 # Acquire node allocation lock only if all nodes are affected 64 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
65
66 - def CheckPrereq(self):
67 """Check prerequisites. 68 69 This checks: 70 - the node exists in the configuration 71 - OOB is supported 72 73 Any errors are signaled by raising errors.OpPrereqError. 74 75 """ 76 self.nodes = [] 77 self.master_node_uuid = self.cfg.GetMasterNode() 78 master_node_obj = self.cfg.GetNodeInfo(self.master_node_uuid) 79 80 assert self.op.power_delay >= 0.0 81 82 if self.op.node_uuids: 83 if (self.op.command in self._SKIP_MASTER and 84 master_node_obj.uuid in self.op.node_uuids): 85 master_oob_handler = SupportsOob(self.cfg, master_node_obj) 86 87 if master_oob_handler: 88 additional_text = ("run '%s %s %s' if you want to operate on the" 89 " master regardless") % (master_oob_handler, 90 self.op.command, 91 master_node_obj.name) 92 else: 93 additional_text = "it does not support out-of-band operations" 94 95 raise errors.OpPrereqError(("Operating on the master node %s is not" 96 " allowed for %s; %s") % 97 (master_node_obj.name, self.op.command, 98 additional_text), errors.ECODE_INVAL) 99 else: 100 self.op.node_uuids = self.cfg.GetNodeList() 101 if self.op.command in self._SKIP_MASTER: 102 self.op.node_uuids.remove(master_node_obj.uuid) 103 104 if self.op.command in self._SKIP_MASTER: 105 assert master_node_obj.uuid not in self.op.node_uuids 106 107 for node_uuid in self.op.node_uuids: 108 node = self.cfg.GetNodeInfo(node_uuid) 109 if node is None: 110 raise errors.OpPrereqError("Node %s not found" % node_uuid, 111 errors.ECODE_NOENT) 112 113 self.nodes.append(node) 114 115 if (not self.op.ignore_status and 116 (self.op.command == constants.OOB_POWER_OFF and not node.offline)): 117 raise errors.OpPrereqError(("Cannot power off node %s because it is" 118 " not marked offline") % node.name, 119 errors.ECODE_STATE)
120
121 - def Exec(self, feedback_fn):
122 """Execute OOB and return result if we expect any. 123 124 """ 125 ret = [] 126 127 for idx, node in enumerate(utils.NiceSort(self.nodes, 128 key=lambda node: node.name)): 129 node_entry = [(constants.RS_NORMAL, node.name)] 130 ret.append(node_entry) 131 132 oob_program = SupportsOob(self.cfg, node) 133 134 if not oob_program: 135 node_entry.append((constants.RS_UNAVAIL, None)) 136 continue 137 138 logging.info("Executing out-of-band command '%s' using '%s' on %s", 139 self.op.command, oob_program, node.name) 140 result = self.rpc.call_run_oob(self.master_node_uuid, oob_program, 141 self.op.command, node.name, 142 self.op.timeout) 143 144 if result.fail_msg: 145 self.LogWarning("Out-of-band RPC failed on node '%s': %s", 146 node.name, result.fail_msg) 147 node_entry.append((constants.RS_NODATA, None)) 148 else: 149 try: 150 self._CheckPayload(result) 151 except errors.OpExecError, err: 152 self.LogWarning("Payload returned by node '%s' is not valid: %s", 153 node.name, err) 154 node_entry.append((constants.RS_NODATA, None)) 155 else: 156 if self.op.command == constants.OOB_HEALTH: 157 # For health we should log important events 158 for item, status in result.payload: 159 if status in [constants.OOB_STATUS_WARNING, 160 constants.OOB_STATUS_CRITICAL]: 161 self.LogWarning("Item '%s' on node '%s' has status '%s'", 162 item, node.name, status) 163 164 if self.op.command == constants.OOB_POWER_ON: 165 node.powered = True 166 elif self.op.command == constants.OOB_POWER_OFF: 167 node.powered = False 168 elif self.op.command == constants.OOB_POWER_STATUS: 169 powered = result.payload[constants.OOB_POWER_STATUS_POWERED] 170 if powered != node.powered: 171 logging.warning(("Recorded power state (%s) of node '%s' does not" 172 " match actual power state (%s)"), node.powered, 173 node.name, powered) 174 175 # For configuration changing commands we should update the node 176 if self.op.command in (constants.OOB_POWER_ON, 177 constants.OOB_POWER_OFF): 178 self.cfg.Update(node, feedback_fn) 179 180 node_entry.append((constants.RS_NORMAL, result.payload)) 181 182 if (self.op.command == constants.OOB_POWER_ON and 183 idx < len(self.nodes) - 1): 184 time.sleep(self.op.power_delay) 185 186 return ret
187
188 - def _CheckPayload(self, result):
189 """Checks if the payload is valid. 190 191 @param result: RPC result 192 @raises errors.OpExecError: If payload is not valid 193 194 """ 195 errs = [] 196 if self.op.command == constants.OOB_HEALTH: 197 if not isinstance(result.payload, list): 198 errs.append("command 'health' is expected to return a list but got %s" % 199 type(result.payload)) 200 else: 201 for item, status in result.payload: 202 if status not in constants.OOB_STATUSES: 203 errs.append("health item '%s' has invalid status '%s'" % 204 (item, status)) 205 206 if self.op.command == constants.OOB_POWER_STATUS: 207 if not isinstance(result.payload, dict): 208 errs.append("power-status is expected to return a dict but got %s" % 209 type(result.payload)) 210 211 if self.op.command in [ 212 constants.OOB_POWER_ON, 213 constants.OOB_POWER_OFF, 214 constants.OOB_POWER_CYCLE, 215 ]: 216 if result.payload is not None: 217 errs.append("%s is expected to not return payload but got '%s'" % 218 (self.op.command, result.payload)) 219 220 if errs: 221 raise errors.OpExecError("Check of out-of-band payload failed due to %s" % 222 utils.CommaJoin(errs))
223
224 225 -class ExtStorageQuery(QueryBase):
226 FIELDS = query.EXTSTORAGE_FIELDS 227
228 - def ExpandNames(self, lu):
229 # Lock all nodes in shared mode 230 # Temporary removal of locks, should be reverted later 231 # TODO: reintroduce locks when they are lighter-weight 232 lu.needed_locks = {} 233 #self.share_locks[locking.LEVEL_NODE] = 1 234 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET 235 236 # The following variables interact with _QueryBase._GetNames 237 if self.names: 238 self.wanted = [lu.cfg.GetNodeInfoByName(name).uuid for name in self.names] 239 else: 240 self.wanted = locking.ALL_SET 241 242 self.do_locking = self.use_locking
243
244 - def DeclareLocks(self, lu, level):
245 pass
246 247 @staticmethod
248 - def _DiagnoseByProvider(rlist):
249 """Remaps a per-node return list into an a per-provider per-node dictionary 250 251 @param rlist: a map with node uuids as keys and ExtStorage objects as values 252 253 @rtype: dict 254 @return: a dictionary with extstorage providers as keys and as 255 value another map, with node uuids as keys and tuples of 256 (path, status, diagnose, parameters) as values, eg:: 257 258 {"provider1": {"node_uuid1": [(/usr/lib/..., True, "", [])] 259 "node_uuid2": [(/srv/..., False, "missing file")] 260 "node_uuid3": [(/srv/..., True, "", [])] 261 } 262 263 """ 264 all_es = {} 265 # we build here the list of nodes that didn't fail the RPC (at RPC 266 # level), so that nodes with a non-responding node daemon don't 267 # make all OSes invalid 268 good_nodes = [node_uuid for node_uuid in rlist 269 if not rlist[node_uuid].fail_msg] 270 for node_uuid, nr in rlist.items(): 271 if nr.fail_msg or not nr.payload: 272 continue 273 for (name, path, status, diagnose, params) in nr.payload: 274 if name not in all_es: 275 # build a list of nodes for this os containing empty lists 276 # for each node in node_list 277 all_es[name] = {} 278 for nuuid in good_nodes: 279 all_es[name][nuuid] = [] 280 # convert params from [name, help] to (name, help) 281 params = [tuple(v) for v in params] 282 all_es[name][node_uuid].append((path, status, diagnose, params)) 283 return all_es
284
285 - def _GetQueryData(self, lu):
286 """Computes the list of nodes and their attributes. 287 288 """ 289 # Locking is not used 290 assert not (compat.any(lu.glm.is_owned(level) 291 for level in locking.LEVELS 292 if level != locking.LEVEL_CLUSTER) or 293 self.do_locking or self.use_locking) 294 295 valid_nodes = [node.uuid 296 for node in lu.cfg.GetAllNodesInfo().values() 297 if not node.offline and node.vm_capable] 298 pol = self._DiagnoseByProvider(lu.rpc.call_extstorage_diagnose(valid_nodes)) 299 300 data = {} 301 302 nodegroup_list = lu.cfg.GetNodeGroupList() 303 304 for (es_name, es_data) in pol.items(): 305 # For every provider compute the nodegroup validity. 306 # To do this we need to check the validity of each node in es_data 307 # and then construct the corresponding nodegroup dict: 308 # { nodegroup1: status 309 # nodegroup2: status 310 # } 311 ndgrp_data = {} 312 for nodegroup in nodegroup_list: 313 ndgrp = lu.cfg.GetNodeGroup(nodegroup) 314 315 nodegroup_nodes = ndgrp.members 316 nodegroup_name = ndgrp.name 317 node_statuses = [] 318 319 for node in nodegroup_nodes: 320 if node in valid_nodes: 321 if es_data[node] != []: 322 node_status = es_data[node][0][1] 323 node_statuses.append(node_status) 324 else: 325 node_statuses.append(False) 326 327 if False in node_statuses: 328 ndgrp_data[nodegroup_name] = False 329 else: 330 ndgrp_data[nodegroup_name] = True 331 332 # Compute the provider's parameters 333 parameters = set() 334 for idx, esl in enumerate(es_data.values()): 335 valid = bool(esl and esl[0][1]) 336 if not valid: 337 break 338 339 node_params = esl[0][3] 340 if idx == 0: 341 # First entry 342 parameters.update(node_params) 343 else: 344 # Filter out inconsistent values 345 parameters.intersection_update(node_params) 346 347 params = list(parameters) 348 349 # Now fill all the info for this provider 350 info = query.ExtStorageInfo(name=es_name, node_status=es_data, 351 nodegroup_status=ndgrp_data, 352 parameters=params) 353 354 data[es_name] = info 355 356 # Prepare data in requested order 357 return [data[name] for name in self._GetNames(lu, pol.keys(), None) 358 if name in data]
359
360 361 -class LUExtStorageDiagnose(NoHooksLU):
362 """Logical unit for ExtStorage diagnose/query. 363 364 """ 365 REQ_BGL = False 366
367 - def CheckArguments(self):
368 self.eq = ExtStorageQuery(qlang.MakeSimpleFilter("name", self.op.names), 369 self.op.output_fields, False)
370
371 - def ExpandNames(self):
372 self.eq.ExpandNames(self)
373
374 - def Exec(self, feedback_fn):
375 return self.eq.OldStyleQuery(self)
376
377 378 -class LURestrictedCommand(NoHooksLU):
379 """Logical unit for executing restricted commands. 380 381 """ 382 REQ_BGL = False 383
384 - def ExpandNames(self):
385 if self.op.nodes: 386 (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes) 387 388 self.needed_locks = { 389 locking.LEVEL_NODE: self.op.node_uuids, 390 } 391 self.share_locks = { 392 locking.LEVEL_NODE: not self.op.use_locking, 393 }
394
395 - def CheckPrereq(self):
396 """Check prerequisites. 397 398 """
399
400 - def Exec(self, feedback_fn):
401 """Execute restricted command and return output. 402 403 """ 404 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE)) 405 406 # Check if correct locks are held 407 assert set(self.op.node_uuids).issubset(owned_nodes) 408 409 rpcres = self.rpc.call_restricted_command(self.op.node_uuids, 410 self.op.command) 411 412 result = [] 413 414 for node_uuid in self.op.node_uuids: 415 nres = rpcres[node_uuid] 416 if nres.fail_msg: 417 msg = ("Command '%s' on node '%s' failed: %s" % 418 (self.op.command, self.cfg.GetNodeName(node_uuid), 419 nres.fail_msg)) 420 result.append((False, msg)) 421 else: 422 result.append((True, nres.payload)) 423 424 return result
425