Package ganeti :: Package cmdlib :: Module instance_operation
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cmdlib.instance_operation

  1  # 
  2  # 
  3   
  4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
  5  # 
  6  # This program is free software; you can redistribute it and/or modify 
  7  # it under the terms of the GNU General Public License as published by 
  8  # the Free Software Foundation; either version 2 of the License, or 
  9  # (at your option) any later version. 
 10  # 
 11  # This program is distributed in the hope that it will be useful, but 
 12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
 13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
 14  # General Public License for more details. 
 15  # 
 16  # You should have received a copy of the GNU General Public License 
 17  # along with this program; if not, write to the Free Software 
 18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
 19  # 02110-1301, USA. 
 20   
 21   
 22  """Logical units dealing with instance operations (start/stop/...). 
 23   
 24  Those operations have in common that they affect the operating system in a 
 25  running instance directly. 
 26   
 27  """ 
 28   
 29  import logging 
 30   
 31  from ganeti import constants 
 32  from ganeti import errors 
 33  from ganeti import hypervisor 
 34  from ganeti import locking 
 35  from ganeti import objects 
 36  from ganeti import utils 
 37  from ganeti.cmdlib.base import LogicalUnit, NoHooksLU 
 38  from ganeti.cmdlib.common import INSTANCE_ONLINE, INSTANCE_DOWN, \ 
 39    CheckHVParams, CheckInstanceState, CheckNodeOnline, GetUpdatedParams, \ 
 40    CheckOSParams, ShareAll 
 41  from ganeti.cmdlib.instance_storage import StartInstanceDisks, \ 
 42    ShutdownInstanceDisks 
 43  from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \ 
 44    CheckInstanceBridgesExist, CheckNodeFreeMemory, CheckNodeHasOS 
 45   
 46   
47 -class LUInstanceStartup(LogicalUnit):
48 """Starts an instance. 49 50 """ 51 HPATH = "instance-start" 52 HTYPE = constants.HTYPE_INSTANCE 53 REQ_BGL = False 54
55 - def CheckArguments(self):
56 # extra beparams 57 if self.op.beparams: 58 # fill the beparams dict 59 objects.UpgradeBeParams(self.op.beparams) 60 utils.ForceDictType(self.op.beparams, constants.BES_PARAMETER_TYPES)
61
62 - def ExpandNames(self):
63 self._ExpandAndLockInstance() 64 self.recalculate_locks[locking.LEVEL_NODE_RES] = constants.LOCKS_REPLACE
65
66 - def DeclareLocks(self, level):
67 if level == locking.LEVEL_NODE_RES: 68 self._LockInstancesNodes(primary_only=True, level=locking.LEVEL_NODE_RES)
69
70 - def BuildHooksEnv(self):
71 """Build hooks env. 72 73 This runs on master, primary and secondary nodes of the instance. 74 75 """ 76 env = { 77 "FORCE": self.op.force, 78 } 79 80 env.update(BuildInstanceHookEnvByObject(self, self.instance)) 81 82 return env
83
84 - def BuildHooksNodes(self):
85 """Build hooks nodes. 86 87 """ 88 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) 89 return (nl, nl)
90
91 - def CheckPrereq(self):
92 """Check prerequisites. 93 94 This checks that the instance is in the cluster. 95 96 """ 97 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 98 assert self.instance is not None, \ 99 "Cannot retrieve locked instance %s" % self.op.instance_name 100 101 cluster = self.cfg.GetClusterInfo() 102 # extra hvparams 103 if self.op.hvparams: 104 # check hypervisor parameter syntax (locally) 105 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES) 106 filled_hvp = cluster.FillHV(self.instance) 107 filled_hvp.update(self.op.hvparams) 108 hv_type = hypervisor.GetHypervisorClass(self.instance.hypervisor) 109 hv_type.CheckParameterSyntax(filled_hvp) 110 CheckHVParams(self, self.instance.all_nodes, self.instance.hypervisor, 111 filled_hvp) 112 113 CheckInstanceState(self, self.instance, INSTANCE_ONLINE) 114 115 self.primary_offline = \ 116 self.cfg.GetNodeInfo(self.instance.primary_node).offline 117 118 if self.primary_offline and self.op.ignore_offline_nodes: 119 self.LogWarning("Ignoring offline primary node") 120 121 if self.op.hvparams or self.op.beparams: 122 self.LogWarning("Overridden parameters are ignored") 123 else: 124 CheckNodeOnline(self, self.instance.primary_node) 125 126 bep = self.cfg.GetClusterInfo().FillBE(self.instance) 127 bep.update(self.op.beparams) 128 129 # check bridges existence 130 CheckInstanceBridgesExist(self, self.instance) 131 132 remote_info = self.rpc.call_instance_info( 133 self.instance.primary_node, self.instance.name, 134 self.instance.hypervisor, cluster.hvparams[self.instance.hypervisor]) 135 remote_info.Raise("Error checking node %s" % 136 self.cfg.GetNodeName(self.instance.primary_node), 137 prereq=True, ecode=errors.ECODE_ENVIRON) 138 if not remote_info.payload: # not running already 139 CheckNodeFreeMemory( 140 self, self.instance.primary_node, 141 "starting instance %s" % self.instance.name, 142 bep[constants.BE_MINMEM], self.instance.hypervisor, 143 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
144
145 - def Exec(self, feedback_fn):
146 """Start the instance. 147 148 """ 149 if not self.op.no_remember: 150 self.cfg.MarkInstanceUp(self.instance.uuid) 151 152 if self.primary_offline: 153 assert self.op.ignore_offline_nodes 154 self.LogInfo("Primary node offline, marked instance as started") 155 else: 156 StartInstanceDisks(self, self.instance, self.op.force) 157 158 result = \ 159 self.rpc.call_instance_start(self.instance.primary_node, 160 (self.instance, self.op.hvparams, 161 self.op.beparams), 162 self.op.startup_paused, self.op.reason) 163 msg = result.fail_msg 164 if msg: 165 ShutdownInstanceDisks(self, self.instance) 166 raise errors.OpExecError("Could not start instance: %s" % msg)
167 168
169 -class LUInstanceShutdown(LogicalUnit):
170 """Shutdown an instance. 171 172 """ 173 HPATH = "instance-stop" 174 HTYPE = constants.HTYPE_INSTANCE 175 REQ_BGL = False 176
177 - def ExpandNames(self):
179
180 - def BuildHooksEnv(self):
181 """Build hooks env. 182 183 This runs on master, primary and secondary nodes of the instance. 184 185 """ 186 env = BuildInstanceHookEnvByObject(self, self.instance) 187 env["TIMEOUT"] = self.op.timeout 188 return env
189
190 - def BuildHooksNodes(self):
191 """Build hooks nodes. 192 193 """ 194 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) 195 return (nl, nl)
196
197 - def CheckPrereq(self):
198 """Check prerequisites. 199 200 This checks that the instance is in the cluster. 201 202 """ 203 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 204 assert self.instance is not None, \ 205 "Cannot retrieve locked instance %s" % self.op.instance_name 206 207 if not self.op.force: 208 CheckInstanceState(self, self.instance, INSTANCE_ONLINE) 209 else: 210 self.LogWarning("Ignoring offline instance check") 211 212 self.primary_offline = \ 213 self.cfg.GetNodeInfo(self.instance.primary_node).offline 214 215 if self.primary_offline and self.op.ignore_offline_nodes: 216 self.LogWarning("Ignoring offline primary node") 217 else: 218 CheckNodeOnline(self, self.instance.primary_node)
219
220 - def Exec(self, feedback_fn):
221 """Shutdown the instance. 222 223 """ 224 # If the instance is offline we shouldn't mark it as down, as that 225 # resets the offline flag. 226 if not self.op.no_remember and self.instance.admin_state in INSTANCE_ONLINE: 227 self.cfg.MarkInstanceDown(self.instance.uuid) 228 229 if self.primary_offline: 230 assert self.op.ignore_offline_nodes 231 self.LogInfo("Primary node offline, marked instance as stopped") 232 else: 233 result = self.rpc.call_instance_shutdown(self.instance.primary_node, 234 self.instance, 235 self.op.timeout, self.op.reason) 236 msg = result.fail_msg 237 if msg: 238 self.LogWarning("Could not shutdown instance: %s", msg) 239 240 ShutdownInstanceDisks(self, self.instance)
241 242
243 -class LUInstanceReinstall(LogicalUnit):
244 """Reinstall an instance. 245 246 """ 247 HPATH = "instance-reinstall" 248 HTYPE = constants.HTYPE_INSTANCE 249 REQ_BGL = False 250
251 - def ExpandNames(self):
253
254 - def BuildHooksEnv(self):
255 """Build hooks env. 256 257 This runs on master, primary and secondary nodes of the instance. 258 259 """ 260 return BuildInstanceHookEnvByObject(self, self.instance)
261
262 - def BuildHooksNodes(self):
263 """Build hooks nodes. 264 265 """ 266 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) 267 return (nl, nl)
268
269 - def CheckPrereq(self):
270 """Check prerequisites. 271 272 This checks that the instance is in the cluster and is not running. 273 274 """ 275 instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 276 assert instance is not None, \ 277 "Cannot retrieve locked instance %s" % self.op.instance_name 278 CheckNodeOnline(self, instance.primary_node, "Instance primary node" 279 " offline, cannot reinstall") 280 281 if instance.disk_template == constants.DT_DISKLESS: 282 raise errors.OpPrereqError("Instance '%s' has no disks" % 283 self.op.instance_name, 284 errors.ECODE_INVAL) 285 CheckInstanceState(self, instance, INSTANCE_DOWN, msg="cannot reinstall") 286 287 if self.op.os_type is not None: 288 # OS verification 289 CheckNodeHasOS(self, instance.primary_node, self.op.os_type, 290 self.op.force_variant) 291 instance_os = self.op.os_type 292 else: 293 instance_os = instance.os 294 295 node_uuids = list(instance.all_nodes) 296 297 if self.op.osparams: 298 i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams) 299 CheckOSParams(self, True, node_uuids, instance_os, i_osdict) 300 self.os_inst = i_osdict # the new dict (without defaults) 301 else: 302 self.os_inst = None 303 304 self.instance = instance
305
306 - def Exec(self, feedback_fn):
307 """Reinstall the instance. 308 309 """ 310 if self.op.os_type is not None: 311 feedback_fn("Changing OS to '%s'..." % self.op.os_type) 312 self.instance.os = self.op.os_type 313 # Write to configuration 314 self.cfg.Update(self.instance, feedback_fn) 315 316 StartInstanceDisks(self, self.instance, None) 317 try: 318 feedback_fn("Running the instance OS create scripts...") 319 # FIXME: pass debug option from opcode to backend 320 result = self.rpc.call_instance_os_add(self.instance.primary_node, 321 (self.instance, self.os_inst), 322 True, self.op.debug_level) 323 result.Raise("Could not install OS for instance %s on node %s" % 324 (self.instance.name, 325 self.cfg.GetNodeName(self.instance.primary_node))) 326 finally: 327 ShutdownInstanceDisks(self, self.instance)
328 329
330 -class LUInstanceReboot(LogicalUnit):
331 """Reboot an instance. 332 333 """ 334 HPATH = "instance-reboot" 335 HTYPE = constants.HTYPE_INSTANCE 336 REQ_BGL = False 337
338 - def ExpandNames(self):
340
341 - def BuildHooksEnv(self):
342 """Build hooks env. 343 344 This runs on master, primary and secondary nodes of the instance. 345 346 """ 347 env = { 348 "IGNORE_SECONDARIES": self.op.ignore_secondaries, 349 "REBOOT_TYPE": self.op.reboot_type, 350 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout, 351 } 352 353 env.update(BuildInstanceHookEnvByObject(self, self.instance)) 354 355 return env
356
357 - def BuildHooksNodes(self):
358 """Build hooks nodes. 359 360 """ 361 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) 362 return (nl, nl)
363
364 - def CheckPrereq(self):
365 """Check prerequisites. 366 367 This checks that the instance is in the cluster. 368 369 """ 370 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 371 assert self.instance is not None, \ 372 "Cannot retrieve locked instance %s" % self.op.instance_name 373 CheckInstanceState(self, self.instance, INSTANCE_ONLINE) 374 CheckNodeOnline(self, self.instance.primary_node) 375 376 # check bridges existence 377 CheckInstanceBridgesExist(self, self.instance)
378
379 - def Exec(self, feedback_fn):
380 """Reboot the instance. 381 382 """ 383 cluster = self.cfg.GetClusterInfo() 384 remote_info = self.rpc.call_instance_info( 385 self.instance.primary_node, self.instance.name, 386 self.instance.hypervisor, cluster.hvparams[self.instance.hypervisor]) 387 remote_info.Raise("Error checking node %s" % 388 self.cfg.GetNodeName(self.instance.primary_node)) 389 instance_running = bool(remote_info.payload) 390 391 current_node_uuid = self.instance.primary_node 392 393 if instance_running and \ 394 self.op.reboot_type in [constants.INSTANCE_REBOOT_SOFT, 395 constants.INSTANCE_REBOOT_HARD]: 396 for disk in self.instance.disks: 397 self.cfg.SetDiskID(disk, current_node_uuid) 398 result = self.rpc.call_instance_reboot(current_node_uuid, self.instance, 399 self.op.reboot_type, 400 self.op.shutdown_timeout, 401 self.op.reason) 402 result.Raise("Could not reboot instance") 403 else: 404 if instance_running: 405 result = self.rpc.call_instance_shutdown(current_node_uuid, 406 self.instance, 407 self.op.shutdown_timeout, 408 self.op.reason) 409 result.Raise("Could not shutdown instance for full reboot") 410 ShutdownInstanceDisks(self, self.instance) 411 else: 412 self.LogInfo("Instance %s was already stopped, starting now", 413 self.instance.name) 414 StartInstanceDisks(self, self.instance, self.op.ignore_secondaries) 415 result = self.rpc.call_instance_start(current_node_uuid, 416 (self.instance, None, None), False, 417 self.op.reason) 418 msg = result.fail_msg 419 if msg: 420 ShutdownInstanceDisks(self, self.instance) 421 raise errors.OpExecError("Could not start instance for" 422 " full reboot: %s" % msg) 423 424 self.cfg.MarkInstanceUp(self.instance.uuid)
425 426
427 -def GetInstanceConsole(cluster, instance, primary_node):
428 """Returns console information for an instance. 429 430 @type cluster: L{objects.Cluster} 431 @type instance: L{objects.Instance} 432 @type primary_node: L{objects.Node} 433 @rtype: dict 434 435 """ 436 hyper = hypervisor.GetHypervisorClass(instance.hypervisor) 437 # beparams and hvparams are passed separately, to avoid editing the 438 # instance and then saving the defaults in the instance itself. 439 hvparams = cluster.FillHV(instance) 440 beparams = cluster.FillBE(instance) 441 console = hyper.GetInstanceConsole(instance, primary_node, hvparams, beparams) 442 443 assert console.instance == instance.name 444 assert console.Validate() 445 446 return console.ToDict()
447 448
449 -class LUInstanceConsole(NoHooksLU):
450 """Connect to an instance's console. 451 452 This is somewhat special in that it returns the command line that 453 you need to run on the master node in order to connect to the 454 console. 455 456 """ 457 REQ_BGL = False 458
459 - def ExpandNames(self):
460 self.share_locks = ShareAll() 461 self._ExpandAndLockInstance()
462
463 - def CheckPrereq(self):
464 """Check prerequisites. 465 466 This checks that the instance is in the cluster. 467 468 """ 469 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 470 assert self.instance is not None, \ 471 "Cannot retrieve locked instance %s" % self.op.instance_name 472 CheckNodeOnline(self, self.instance.primary_node)
473
474 - def Exec(self, feedback_fn):
475 """Connect to the console of an instance 476 477 """ 478 node_uuid = self.instance.primary_node 479 480 cluster_hvparams = self.cfg.GetClusterInfo().hvparams 481 node_insts = self.rpc.call_instance_list( 482 [node_uuid], [self.instance.hypervisor], 483 cluster_hvparams)[node_uuid] 484 node_insts.Raise("Can't get node information from %s" % 485 self.cfg.GetNodeName(node_uuid)) 486 487 if self.instance.name not in node_insts.payload: 488 if self.instance.admin_state == constants.ADMINST_UP: 489 state = constants.INSTST_ERRORDOWN 490 elif self.instance.admin_state == constants.ADMINST_DOWN: 491 state = constants.INSTST_ADMINDOWN 492 else: 493 state = constants.INSTST_ADMINOFFLINE 494 raise errors.OpExecError("Instance %s is not running (state %s)" % 495 (self.instance.name, state)) 496 497 logging.debug("Connecting to console of %s on %s", self.instance.name, 498 self.cfg.GetNodeName(node_uuid)) 499 500 return GetInstanceConsole(self.cfg.GetClusterInfo(), self.instance, 501 self.cfg.GetNodeInfo(self.instance.primary_node))
502