Package ganeti :: Package cmdlib :: Module misc
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cmdlib.misc

  1  # 
  2  # 
  3   
  4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
  5  # All rights reserved. 
  6  # 
  7  # Redistribution and use in source and binary forms, with or without 
  8  # modification, are permitted provided that the following conditions are 
  9  # met: 
 10  # 
 11  # 1. Redistributions of source code must retain the above copyright notice, 
 12  # this list of conditions and the following disclaimer. 
 13  # 
 14  # 2. Redistributions in binary form must reproduce the above copyright 
 15  # notice, this list of conditions and the following disclaimer in the 
 16  # documentation and/or other materials provided with the distribution. 
 17  # 
 18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
 19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
 20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
 21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
 22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
 23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
 24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
 25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
 26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
 27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
 28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 29   
 30   
 31  """Miscellaneous logical units that don't fit into any category.""" 
 32   
 33  import logging 
 34  import time 
 35   
 36  from ganeti import constants 
 37  from ganeti import errors 
 38  from ganeti import locking 
 39  from ganeti import qlang 
 40  from ganeti import query 
 41  from ganeti import utils 
 42  from ganeti.cmdlib.base import NoHooksLU, QueryBase 
 43  from ganeti.cmdlib.common import GetWantedNodes, SupportsOob 
44 45 46 -class LUOobCommand(NoHooksLU):
47 """Logical unit for OOB handling. 48 49 """ 50 REQ_BGL = False 51 _SKIP_MASTER = (constants.OOB_POWER_OFF, constants.OOB_POWER_CYCLE) 52
53 - def ExpandNames(self):
54 """Gather locks we need. 55 56 """ 57 if self.op.node_names: 58 (self.op.node_uuids, self.op.node_names) = \ 59 GetWantedNodes(self, self.op.node_names) 60 lock_node_uuids = self.op.node_uuids 61 else: 62 lock_node_uuids = locking.ALL_SET 63 64 self.needed_locks = { 65 locking.LEVEL_NODE: lock_node_uuids, 66 }
67
68 - def CheckPrereq(self):
69 """Check prerequisites. 70 71 This checks: 72 - the node exists in the configuration 73 - OOB is supported 74 75 Any errors are signaled by raising errors.OpPrereqError. 76 77 """ 78 self.nodes = [] 79 self.master_node_uuid = self.cfg.GetMasterNode() 80 master_node_obj = self.cfg.GetNodeInfo(self.master_node_uuid) 81 82 assert self.op.power_delay >= 0.0 83 84 if self.op.node_uuids: 85 if (self.op.command in self._SKIP_MASTER and 86 master_node_obj.uuid in self.op.node_uuids): 87 master_oob_handler = SupportsOob(self.cfg, master_node_obj) 88 89 if master_oob_handler: 90 additional_text = ("run '%s %s %s' if you want to operate on the" 91 " master regardless") % (master_oob_handler, 92 self.op.command, 93 master_node_obj.name) 94 else: 95 additional_text = "it does not support out-of-band operations" 96 97 raise errors.OpPrereqError(("Operating on the master node %s is not" 98 " allowed for %s; %s") % 99 (master_node_obj.name, self.op.command, 100 additional_text), errors.ECODE_INVAL) 101 else: 102 self.op.node_uuids = self.cfg.GetNodeList() 103 if self.op.command in self._SKIP_MASTER: 104 self.op.node_uuids.remove(master_node_obj.uuid) 105 106 if self.op.command in self._SKIP_MASTER: 107 assert master_node_obj.uuid not in self.op.node_uuids 108 109 for node_uuid in self.op.node_uuids: 110 node = self.cfg.GetNodeInfo(node_uuid) 111 if node is None: 112 raise errors.OpPrereqError("Node %s not found" % node_uuid, 113 errors.ECODE_NOENT) 114 115 self.nodes.append(node) 116 117 if (not self.op.ignore_status and 118 (self.op.command == constants.OOB_POWER_OFF and not node.offline)): 119 raise errors.OpPrereqError(("Cannot power off node %s because it is" 120 " not marked offline") % node.name, 121 errors.ECODE_STATE)
122
123 - def Exec(self, feedback_fn):
124 """Execute OOB and return result if we expect any. 125 126 """ 127 ret = [] 128 129 for idx, node in enumerate(utils.NiceSort(self.nodes, 130 key=lambda node: node.name)): 131 node_entry = [(constants.RS_NORMAL, node.name)] 132 ret.append(node_entry) 133 134 oob_program = SupportsOob(self.cfg, node) 135 136 if not oob_program: 137 node_entry.append((constants.RS_UNAVAIL, None)) 138 continue 139 140 logging.info("Executing out-of-band command '%s' using '%s' on %s", 141 self.op.command, oob_program, node.name) 142 result = self.rpc.call_run_oob(self.master_node_uuid, oob_program, 143 self.op.command, node.name, 144 self.op.timeout) 145 146 if result.fail_msg: 147 self.LogWarning("Out-of-band RPC failed on node '%s': %s", 148 node.name, result.fail_msg) 149 node_entry.append((constants.RS_NODATA, None)) 150 else: 151 try: 152 self._CheckPayload(result) 153 except errors.OpExecError, err: 154 self.LogWarning("Payload returned by node '%s' is not valid: %s", 155 node.name, err) 156 node_entry.append((constants.RS_NODATA, None)) 157 else: 158 if self.op.command == constants.OOB_HEALTH: 159 # For health we should log important events 160 for item, status in result.payload: 161 if status in [constants.OOB_STATUS_WARNING, 162 constants.OOB_STATUS_CRITICAL]: 163 self.LogWarning("Item '%s' on node '%s' has status '%s'", 164 item, node.name, status) 165 166 if self.op.command == constants.OOB_POWER_ON: 167 node.powered = True 168 elif self.op.command == constants.OOB_POWER_OFF: 169 node.powered = False 170 elif self.op.command == constants.OOB_POWER_STATUS: 171 powered = result.payload[constants.OOB_POWER_STATUS_POWERED] 172 if powered != node.powered: 173 logging.warning(("Recorded power state (%s) of node '%s' does not" 174 " match actual power state (%s)"), node.powered, 175 node.name, powered) 176 177 # For configuration changing commands we should update the node 178 if self.op.command in (constants.OOB_POWER_ON, 179 constants.OOB_POWER_OFF): 180 self.cfg.Update(node, feedback_fn) 181 182 node_entry.append((constants.RS_NORMAL, result.payload)) 183 184 if (self.op.command == constants.OOB_POWER_ON and 185 idx < len(self.nodes) - 1): 186 time.sleep(self.op.power_delay) 187 188 return ret
189
190 - def _CheckPayload(self, result):
191 """Checks if the payload is valid. 192 193 @param result: RPC result 194 @raises errors.OpExecError: If payload is not valid 195 196 """ 197 errs = [] 198 if self.op.command == constants.OOB_HEALTH: 199 if not isinstance(result.payload, list): 200 errs.append("command 'health' is expected to return a list but got %s" % 201 type(result.payload)) 202 else: 203 for item, status in result.payload: 204 if status not in constants.OOB_STATUSES: 205 errs.append("health item '%s' has invalid status '%s'" % 206 (item, status)) 207 208 if self.op.command == constants.OOB_POWER_STATUS: 209 if not isinstance(result.payload, dict): 210 errs.append("power-status is expected to return a dict but got %s" % 211 type(result.payload)) 212 213 if self.op.command in [ 214 constants.OOB_POWER_ON, 215 constants.OOB_POWER_OFF, 216 constants.OOB_POWER_CYCLE, 217 ]: 218 if result.payload is not None: 219 errs.append("%s is expected to not return payload but got '%s'" % 220 (self.op.command, result.payload)) 221 222 if errs: 223 raise errors.OpExecError("Check of out-of-band payload failed due to %s" % 224 utils.CommaJoin(errs))
225
226 227 -class ExtStorageQuery(QueryBase):
228 FIELDS = query.EXTSTORAGE_FIELDS 229
230 - def ExpandNames(self, lu):
231 # Lock all nodes in shared mode 232 # Temporary removal of locks, should be reverted later 233 # TODO: reintroduce locks when they are lighter-weight 234 lu.needed_locks = {} 235 #self.share_locks[locking.LEVEL_NODE] = 1 236 #self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET 237 238 # The following variables interact with _QueryBase._GetNames 239 if self.names: 240 self.wanted = [lu.cfg.GetNodeInfoByName(name).uuid for name in self.names] 241 else: 242 self.wanted = locking.ALL_SET 243 244 self.do_locking = self.use_locking
245
246 - def DeclareLocks(self, lu, level):
247 pass
248 249 @staticmethod
250 - def _DiagnoseByProvider(rlist):
251 """Remaps a per-node return list into an a per-provider per-node dictionary 252 253 @param rlist: a map with node uuids as keys and ExtStorage objects as values 254 255 @rtype: dict 256 @return: a dictionary with extstorage providers as keys and as 257 value another map, with node uuids as keys and tuples of 258 (path, status, diagnose, parameters) as values, eg:: 259 260 {"provider1": {"node_uuid1": [(/usr/lib/..., True, "", [])] 261 "node_uuid2": [(/srv/..., False, "missing file")] 262 "node_uuid3": [(/srv/..., True, "", [])] 263 } 264 265 """ 266 all_es = {} 267 # we build here the list of nodes that didn't fail the RPC (at RPC 268 # level), so that nodes with a non-responding node daemon don't 269 # make all OSes invalid 270 good_nodes = [node_uuid for node_uuid in rlist 271 if not rlist[node_uuid].fail_msg] 272 for node_uuid, nr in rlist.items(): 273 if nr.fail_msg or not nr.payload: 274 continue 275 for (name, path, status, diagnose, params) in nr.payload: 276 if name not in all_es: 277 # build a list of nodes for this os containing empty lists 278 # for each node in node_list 279 all_es[name] = {} 280 for nuuid in good_nodes: 281 all_es[name][nuuid] = [] 282 # convert params from [name, help] to (name, help) 283 params = [tuple(v) for v in params] 284 all_es[name][node_uuid].append((path, status, diagnose, params)) 285 return all_es
286
287 - def _GetQueryData(self, lu):
288 """Computes the list of nodes and their attributes. 289 290 """ 291 valid_nodes = [node.uuid 292 for node in lu.cfg.GetAllNodesInfo().values() 293 if not node.offline and node.vm_capable] 294 pol = self._DiagnoseByProvider(lu.rpc.call_extstorage_diagnose(valid_nodes)) 295 296 data = {} 297 298 nodegroup_list = lu.cfg.GetNodeGroupList() 299 300 for (es_name, es_data) in pol.items(): 301 # For every provider compute the nodegroup validity. 302 # To do this we need to check the validity of each node in es_data 303 # and then construct the corresponding nodegroup dict: 304 # { nodegroup1: status 305 # nodegroup2: status 306 # } 307 ndgrp_data = {} 308 for nodegroup in nodegroup_list: 309 ndgrp = lu.cfg.GetNodeGroup(nodegroup) 310 311 nodegroup_nodes = ndgrp.members 312 nodegroup_name = ndgrp.name 313 node_statuses = [] 314 315 for node in nodegroup_nodes: 316 if node in valid_nodes: 317 if es_data[node] != []: 318 node_status = es_data[node][0][1] 319 node_statuses.append(node_status) 320 else: 321 node_statuses.append(False) 322 323 if False in node_statuses: 324 ndgrp_data[nodegroup_name] = False 325 else: 326 ndgrp_data[nodegroup_name] = True 327 328 # Compute the provider's parameters 329 parameters = set() 330 for idx, esl in enumerate(es_data.values()): 331 valid = bool(esl and esl[0][1]) 332 if not valid: 333 break 334 335 node_params = esl[0][3] 336 if idx == 0: 337 # First entry 338 parameters.update(node_params) 339 else: 340 # Filter out inconsistent values 341 parameters.intersection_update(node_params) 342 343 params = list(parameters) 344 345 # Now fill all the info for this provider 346 info = query.ExtStorageInfo(name=es_name, node_status=es_data, 347 nodegroup_status=ndgrp_data, 348 parameters=params) 349 350 data[es_name] = info 351 352 # Prepare data in requested order 353 return [data[name] for name in self._GetNames(lu, pol.keys(), None) 354 if name in data]
355
356 357 -class LUExtStorageDiagnose(NoHooksLU):
358 """Logical unit for ExtStorage diagnose/query. 359 360 """ 361 REQ_BGL = False 362
363 - def CheckArguments(self):
364 self.eq = ExtStorageQuery(qlang.MakeSimpleFilter("name", self.op.names), 365 self.op.output_fields, False)
366
367 - def ExpandNames(self):
368 self.eq.ExpandNames(self)
369
370 - def Exec(self, feedback_fn):
371 return self.eq.OldStyleQuery(self)
372
373 374 -class LURestrictedCommand(NoHooksLU):
375 """Logical unit for executing restricted commands. 376 377 """ 378 REQ_BGL = False 379
380 - def ExpandNames(self):
381 if self.op.nodes: 382 (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes) 383 384 self.needed_locks = { 385 locking.LEVEL_NODE: self.op.node_uuids, 386 } 387 self.share_locks = { 388 locking.LEVEL_NODE: not self.op.use_locking, 389 }
390
391 - def CheckPrereq(self):
392 """Check prerequisites. 393 394 """
395
396 - def Exec(self, feedback_fn):
397 """Execute restricted command and return output. 398 399 """ 400 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE)) 401 402 # Check if correct locks are held 403 assert set(self.op.node_uuids).issubset(owned_nodes) 404 405 rpcres = self.rpc.call_restricted_command(self.op.node_uuids, 406 self.op.command) 407 408 result = [] 409 410 for node_uuid in self.op.node_uuids: 411 nres = rpcres[node_uuid] 412 if nres.fail_msg: 413 msg = ("Command '%s' on node '%s' failed: %s" % 414 (self.op.command, self.cfg.GetNodeName(node_uuid), 415 nres.fail_msg)) 416 result.append((False, msg)) 417 else: 418 result.append((True, nres.payload)) 419 420 return result
421