Package ganeti :: Package cmdlib :: Module instance
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cmdlib.instance

  1  # 
  2  # 
  3   
  4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc. 
  5  # All rights reserved. 
  6  # 
  7  # Redistribution and use in source and binary forms, with or without 
  8  # modification, are permitted provided that the following conditions are 
  9  # met: 
 10  # 
 11  # 1. Redistributions of source code must retain the above copyright notice, 
 12  # this list of conditions and the following disclaimer. 
 13  # 
 14  # 2. Redistributions in binary form must reproduce the above copyright 
 15  # notice, this list of conditions and the following disclaimer in the 
 16  # documentation and/or other materials provided with the distribution. 
 17  # 
 18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
 19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
 20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
 21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
 22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
 23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
 24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
 25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
 26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
 27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
 28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 29   
 30   
 31  """Logical units dealing with instances.""" 
 32   
 33  import logging 
 34  import os 
 35   
 36  from ganeti import compat 
 37  from ganeti import constants 
 38  from ganeti import errors 
 39  from ganeti import locking 
 40  from ganeti.masterd import iallocator 
 41  from ganeti import masterd 
 42  from ganeti import netutils 
 43  from ganeti import objects 
 44  from ganeti import utils 
 45   
 46  from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs 
 47   
 48  from ganeti.cmdlib.common import \ 
 49    INSTANCE_NOT_RUNNING, CheckNodeOnline, \ 
 50    ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \ 
 51    LoadNodeEvacResult, \ 
 52    ExpandInstanceUuidAndName, \ 
 53    CheckInstanceState, ExpandNodeUuidAndName, \ 
 54    CheckDiskTemplateEnabled 
 55  from ganeti.cmdlib.instance_storage import CreateDisks, \ 
 56    ComputeDisks, \ 
 57    StartInstanceDisks, ShutdownInstanceDisks, \ 
 58    AssembleInstanceDisks 
 59  from ganeti.cmdlib.instance_utils import \ 
 60    BuildInstanceHookEnvByObject,\ 
 61    CheckNodeNotDrained, RemoveInstance, CopyLockList, \ 
 62    CheckNodeVmCapable, CheckTargetNodeIPolicy, \ 
 63    GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \ 
 64    CheckInstanceBridgesExist, \ 
 65    CheckInstanceExistence, \ 
 66    CheckHostnameSane, CheckOpportunisticLocking, ComputeFullBeParams, \ 
 67    ComputeNics, CreateInstanceAllocRequest 
 68  import ganeti.masterd.instance 
 69   
 70   
71 -class LUInstanceRename(LogicalUnit):
72 """Rename an instance. 73 74 """ 75 HPATH = "instance-rename" 76 HTYPE = constants.HTYPE_INSTANCE 77 REQ_BGL = False 78
79 - def CheckArguments(self):
80 """Check arguments. 81 82 """ 83 if self.op.ip_check and not self.op.name_check: 84 # TODO: make the ip check more flexible and not depend on the name check 85 raise errors.OpPrereqError("IP address check requires a name check", 86 errors.ECODE_INVAL) 87 88 self._new_name_resolved = False
89
90 - def BuildHooksEnv(self):
91 """Build hooks env. 92 93 This runs on master, primary and secondary nodes of the instance. 94 95 """ 96 env = BuildInstanceHookEnvByObject(self, self.instance) 97 env["INSTANCE_NEW_NAME"] = self.op.new_name 98 return env
99
100 - def BuildHooksNodes(self):
101 """Build hooks nodes. 102 103 """ 104 nl = [self.cfg.GetMasterNode()] + \ 105 list(self.cfg.GetInstanceNodes(self.instance.uuid)) 106 return (nl, nl)
107
109 """Checks and resolves the new name, storing the FQDN, if permitted. 110 111 """ 112 if self._new_name_resolved or not self.op.name_check: 113 return 114 115 hostname = CheckHostnameSane(self, self.op.new_name) 116 self.op.new_name = hostname.name 117 if (self.op.ip_check and 118 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)): 119 raise errors.OpPrereqError("IP %s of instance %s already in use" % 120 (hostname.ip, self.op.new_name), 121 errors.ECODE_NOTUNIQUE) 122 self._new_name_resolved = True
123
124 - def CheckPrereq(self):
125 """Check prerequisites. 126 127 This checks that the instance is in the cluster and is not running. 128 129 """ 130 (self.op.instance_uuid, self.op.instance_name) = \ 131 ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid, 132 self.op.instance_name) 133 instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 134 assert instance is not None 135 136 # It should actually not happen that an instance is running with a disabled 137 # disk template, but in case it does, the renaming of file-based instances 138 # will fail horribly. Thus, we test it before. 139 for disk in self.cfg.GetInstanceDisks(instance.uuid): 140 if (disk.dev_type in constants.DTS_FILEBASED and 141 self.op.new_name != instance.name): 142 # TODO: when disks are separate objects, this should check for disk 143 # types, not disk templates. 144 CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(), disk.dev_type) 145 146 CheckNodeOnline(self, instance.primary_node) 147 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING, 148 msg="cannot rename") 149 self.instance = instance 150 151 self._PerformChecksAndResolveNewName() 152 153 if self.op.new_name != instance.name: 154 CheckInstanceExistence(self, self.op.new_name)
155
156 - def ExpandNames(self):
157 self._ExpandAndLockInstance(allow_forthcoming=True) 158 159 # Note that this call might not resolve anything if name checks have been 160 # disabled in the opcode. In this case, we might have a renaming collision 161 # if a shortened name and a full name are used simultaneously, as we will 162 # have two different locks. However, at that point the user has taken away 163 # the tools necessary to detect this issue. 164 self._PerformChecksAndResolveNewName() 165 166 # Used to prevent instance namespace collisions. 167 if self.op.new_name != self.op.instance_name: 168 CheckInstanceExistence(self, self.op.new_name) 169 self.add_locks[locking.LEVEL_INSTANCE] = self.op.new_name
170
171 - def Exec(self, feedback_fn):
172 """Rename the instance. 173 174 """ 175 old_name = self.instance.name 176 177 rename_file_storage = False 178 disks = self.cfg.GetInstanceDisks(self.instance.uuid) 179 renamed_storage = [d for d in disks 180 if (d.dev_type in constants.DTS_FILEBASED and 181 d.dev_type != constants.DT_GLUSTER)] 182 if (renamed_storage and self.op.new_name != self.instance.name): 183 disks = self.cfg.GetInstanceDisks(self.instance.uuid) 184 old_file_storage_dir = os.path.dirname(disks[0].logical_id[1]) 185 rename_file_storage = True 186 187 self.cfg.RenameInstance(self.instance.uuid, self.op.new_name) 188 189 # Assert that we have both the locks needed 190 assert old_name in self.owned_locks(locking.LEVEL_INSTANCE) 191 assert self.op.new_name in self.owned_locks(locking.LEVEL_INSTANCE) 192 193 # re-read the instance from the configuration after rename 194 renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid) 195 disks = self.cfg.GetInstanceDisks(renamed_inst.uuid) 196 197 if self.instance.forthcoming: 198 return renamed_inst.name 199 200 if rename_file_storage: 201 new_file_storage_dir = os.path.dirname(disks[0].logical_id[1]) 202 result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node, 203 old_file_storage_dir, 204 new_file_storage_dir) 205 result.Raise("Could not rename on node %s directory '%s' to '%s'" 206 " (but the instance has been renamed in Ganeti)" % 207 (self.cfg.GetNodeName(renamed_inst.primary_node), 208 old_file_storage_dir, new_file_storage_dir)) 209 210 StartInstanceDisks(self, renamed_inst, None) 211 renamed_inst = self.cfg.GetInstanceInfo(renamed_inst.uuid) 212 213 # update info on disks 214 info = GetInstanceInfoText(renamed_inst) 215 for (idx, disk) in enumerate(disks): 216 for node_uuid in self.cfg.GetInstanceNodes(renamed_inst.uuid): 217 result = self.rpc.call_blockdev_setinfo(node_uuid, 218 (disk, renamed_inst), info) 219 result.Warn("Error setting info on node %s for disk %s" % 220 (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning) 221 try: 222 result = self.rpc.call_instance_run_rename(renamed_inst.primary_node, 223 renamed_inst, old_name, 224 self.op.debug_level) 225 result.Warn("Could not run OS rename script for instance %s on node %s" 226 " (but the instance has been renamed in Ganeti)" % 227 (renamed_inst.name, 228 self.cfg.GetNodeName(renamed_inst.primary_node)), 229 self.LogWarning) 230 finally: 231 ShutdownInstanceDisks(self, renamed_inst) 232 233 return renamed_inst.name
234 235
236 -class LUInstanceRemove(LogicalUnit):
237 """Remove an instance. 238 239 """ 240 HPATH = "instance-remove" 241 HTYPE = constants.HTYPE_INSTANCE 242 REQ_BGL = False 243
244 - def ExpandNames(self):
245 self._ExpandAndLockInstance(allow_forthcoming=True) 246 self.needed_locks[locking.LEVEL_NODE] = [] 247 self.needed_locks[locking.LEVEL_NODE_RES] = [] 248 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE 249 self.dont_collate_locks[locking.LEVEL_NODE] = True 250 self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
251
252 - def DeclareLocks(self, level):
253 if level == locking.LEVEL_NODE: 254 self._LockInstancesNodes() 255 elif level == locking.LEVEL_NODE_RES: 256 # Copy node locks 257 self.needed_locks[locking.LEVEL_NODE_RES] = \ 258 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
259
260 - def BuildHooksEnv(self):
261 """Build hooks env. 262 263 This runs on master, primary and secondary nodes of the instance. 264 265 """ 266 env = BuildInstanceHookEnvByObject(self, self.instance, 267 secondary_nodes=self.secondary_nodes, 268 disks=self.inst_disks) 269 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout 270 return env
271
272 - def BuildHooksNodes(self):
273 """Build hooks nodes. 274 275 """ 276 nl = [self.cfg.GetMasterNode()] 277 nl_post = list(self.cfg.GetInstanceNodes(self.instance.uuid)) + nl 278 return (nl, nl_post)
279
280 - def CheckPrereq(self):
281 """Check prerequisites. 282 283 This checks that the instance is in the cluster. 284 285 """ 286 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 287 assert self.instance is not None, \ 288 "Cannot retrieve locked instance %s" % self.op.instance_name 289 self.secondary_nodes = \ 290 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid) 291 self.inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
292
293 - def Exec(self, feedback_fn):
294 """Remove the instance. 295 296 """ 297 assert (self.owned_locks(locking.LEVEL_NODE) == 298 self.owned_locks(locking.LEVEL_NODE_RES)) 299 assert not (set(self.cfg.GetInstanceNodes(self.instance.uuid)) - 300 self.owned_locks(locking.LEVEL_NODE)), \ 301 "Not owning correct locks" 302 303 if not self.instance.forthcoming: 304 logging.info("Shutting down instance %s on node %s", self.instance.name, 305 self.cfg.GetNodeName(self.instance.primary_node)) 306 307 result = self.rpc.call_instance_shutdown(self.instance.primary_node, 308 self.instance, 309 self.op.shutdown_timeout, 310 self.op.reason) 311 if self.op.ignore_failures: 312 result.Warn("Warning: can't shutdown instance", feedback_fn) 313 else: 314 result.Raise("Could not shutdown instance %s on node %s" % 315 (self.instance.name, 316 self.cfg.GetNodeName(self.instance.primary_node))) 317 else: 318 logging.info("Instance %s on node %s is forthcoming; not shutting down", 319 self.instance.name, 320 self.cfg.GetNodeName(self.instance.primary_node)) 321 322 RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
323 324
325 -class LUInstanceMove(LogicalUnit):
326 """Move an instance by data-copying. 327 328 This LU is only used if the instance needs to be moved by copying the data 329 from one node in the cluster to another. The instance is shut down and 330 the data is copied to the new node and the configuration change is propagated, 331 then the instance is started again. 332 333 See also: 334 L{LUInstanceFailover} for moving an instance on shared storage (no copying 335 required). 336 337 L{LUInstanceMigrate} for the live migration of an instance (no shutdown 338 required). 339 """ 340 HPATH = "instance-move" 341 HTYPE = constants.HTYPE_INSTANCE 342 REQ_BGL = False 343
344 - def ExpandNames(self):
345 self._ExpandAndLockInstance() 346 (self.op.target_node_uuid, self.op.target_node) = \ 347 ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid, 348 self.op.target_node) 349 self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid] 350 self.needed_locks[locking.LEVEL_NODE_RES] = [] 351 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
352
353 - def DeclareLocks(self, level):
354 if level == locking.LEVEL_NODE: 355 self._LockInstancesNodes(primary_only=True) 356 elif level == locking.LEVEL_NODE_RES: 357 # Copy node locks 358 self.needed_locks[locking.LEVEL_NODE_RES] = \ 359 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
360
361 - def BuildHooksEnv(self):
362 """Build hooks env. 363 364 This runs on master, primary and target nodes of the instance. 365 366 """ 367 env = { 368 "TARGET_NODE": self.op.target_node, 369 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout, 370 } 371 env.update(BuildInstanceHookEnvByObject(self, self.instance)) 372 return env
373
374 - def BuildHooksNodes(self):
375 """Build hooks nodes. 376 377 """ 378 nl = [ 379 self.cfg.GetMasterNode(), 380 self.instance.primary_node, 381 self.op.target_node_uuid, 382 ] 383 return (nl, nl)
384
385 - def CheckPrereq(self):
386 """Check prerequisites. 387 388 This checks that the instance is in the cluster. 389 390 """ 391 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 392 assert self.instance is not None, \ 393 "Cannot retrieve locked instance %s" % self.op.instance_name 394 395 disks = self.cfg.GetInstanceDisks(self.instance.uuid) 396 for idx, dsk in enumerate(disks): 397 if dsk.dev_type not in constants.DTS_COPYABLE: 398 raise errors.OpPrereqError("Instance disk %d has disk type %s and is" 399 " not suitable for copying" 400 % (idx, dsk.dev_type), errors.ECODE_STATE) 401 402 target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid) 403 assert target_node is not None, \ 404 "Cannot retrieve locked node %s" % self.op.target_node 405 406 self.target_node_uuid = target_node.uuid 407 if target_node.uuid == self.instance.primary_node: 408 raise errors.OpPrereqError("Instance %s is already on the node %s" % 409 (self.instance.name, target_node.name), 410 errors.ECODE_STATE) 411 412 cluster = self.cfg.GetClusterInfo() 413 bep = cluster.FillBE(self.instance) 414 415 CheckNodeOnline(self, target_node.uuid) 416 CheckNodeNotDrained(self, target_node.uuid) 417 CheckNodeVmCapable(self, target_node.uuid) 418 group_info = self.cfg.GetNodeGroup(target_node.group) 419 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) 420 CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg, 421 ignore=self.op.ignore_ipolicy) 422 423 if self.instance.admin_state == constants.ADMINST_UP: 424 # check memory requirements on the target node 425 CheckNodeFreeMemory( 426 self, target_node.uuid, "failing over instance %s" % 427 self.instance.name, bep[constants.BE_MAXMEM], 428 self.instance.hypervisor, 429 cluster.hvparams[self.instance.hypervisor]) 430 else: 431 self.LogInfo("Not checking memory on the secondary node as" 432 " instance will not be started") 433 434 # check bridge existance 435 CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
436
437 - def Exec(self, feedback_fn):
438 """Move an instance. 439 440 The move is done by shutting it down on its present node, copying 441 the data over (slow) and starting it on the new node. 442 443 """ 444 source_node = self.cfg.GetNodeInfo(self.instance.primary_node) 445 target_node = self.cfg.GetNodeInfo(self.target_node_uuid) 446 447 self.LogInfo("Shutting down instance %s on source node %s", 448 self.instance.name, source_node.name) 449 450 assert (self.owned_locks(locking.LEVEL_NODE) == 451 self.owned_locks(locking.LEVEL_NODE_RES)) 452 453 result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance, 454 self.op.shutdown_timeout, 455 self.op.reason) 456 if self.op.ignore_consistency: 457 result.Warn("Could not shutdown instance %s on node %s. Proceeding" 458 " anyway. Please make sure node %s is down. Error details" % 459 (self.instance.name, source_node.name, source_node.name), 460 self.LogWarning) 461 else: 462 result.Raise("Could not shutdown instance %s on node %s" % 463 (self.instance.name, source_node.name)) 464 465 # create the target disks 466 try: 467 CreateDisks(self, self.instance, target_node_uuid=target_node.uuid) 468 except errors.OpExecError: 469 self.LogWarning("Device creation failed") 470 for disk_uuid in self.instance.disks: 471 self.cfg.ReleaseDRBDMinors(disk_uuid) 472 raise 473 474 errs = [] 475 transfers = [] 476 # activate, get path, create transfer jobs 477 disks = self.cfg.GetInstanceDisks(self.instance.uuid) 478 for idx, disk in enumerate(disks): 479 # FIXME: pass debug option from opcode to backend 480 dt = masterd.instance.DiskTransfer("disk/%s" % idx, 481 constants.IEIO_RAW_DISK, 482 (disk, self.instance), 483 constants.IEIO_RAW_DISK, 484 (disk, self.instance), 485 None) 486 transfers.append(dt) 487 self.cfg.Update(disk, feedback_fn) 488 489 import_result = \ 490 masterd.instance.TransferInstanceData(self, feedback_fn, 491 source_node.uuid, 492 target_node.uuid, 493 target_node.secondary_ip, 494 self.op.compress, 495 self.instance, transfers) 496 if not compat.all(import_result): 497 errs.append("Failed to transfer instance data") 498 499 if errs: 500 self.LogWarning("Some disks failed to copy, aborting") 501 try: 502 RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid) 503 finally: 504 for disk_uuid in self.instance.disks: 505 self.cfg.ReleaseDRBDMinors(disk_uuid) 506 raise errors.OpExecError("Errors during disk copy: %s" % 507 (",".join(errs),)) 508 509 self.instance.primary_node = target_node.uuid 510 self.cfg.Update(self.instance, feedback_fn) 511 for disk in disks: 512 self.cfg.SetDiskNodes(disk.uuid, [target_node.uuid]) 513 514 self.LogInfo("Removing the disks on the original node") 515 RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid) 516 517 # Only start the instance if it's marked as up 518 if self.instance.admin_state == constants.ADMINST_UP: 519 self.LogInfo("Starting instance %s on node %s", 520 self.instance.name, target_node.name) 521 522 disks_ok, _, _ = AssembleInstanceDisks(self, self.instance, 523 ignore_secondaries=True) 524 if not disks_ok: 525 ShutdownInstanceDisks(self, self.instance) 526 raise errors.OpExecError("Can't activate the instance's disks") 527 528 result = self.rpc.call_instance_start(target_node.uuid, 529 (self.instance, None, None), False, 530 self.op.reason) 531 msg = result.fail_msg 532 if msg: 533 ShutdownInstanceDisks(self, self.instance) 534 raise errors.OpExecError("Could not start instance %s on node %s: %s" % 535 (self.instance.name, target_node.name, msg))
536 537
538 -class LUInstanceMultiAlloc(NoHooksLU):
539 """Allocates multiple instances at the same time. 540 541 """ 542 REQ_BGL = False 543
544 - def CheckArguments(self):
545 """Check arguments. 546 547 """ 548 nodes = [] 549 for inst in self.op.instances: 550 if inst.iallocator is not None: 551 raise errors.OpPrereqError("iallocator are not allowed to be set on" 552 " instance objects", errors.ECODE_INVAL) 553 nodes.append(bool(inst.pnode)) 554 if inst.disk_template in constants.DTS_INT_MIRROR: 555 nodes.append(bool(inst.snode)) 556 557 has_nodes = compat.any(nodes) 558 if compat.all(nodes) ^ has_nodes: 559 raise errors.OpPrereqError("There are instance objects providing" 560 " pnode/snode while others do not", 561 errors.ECODE_INVAL) 562 563 if not has_nodes and self.op.iallocator is None: 564 default_iallocator = self.cfg.GetDefaultIAllocator() 565 if default_iallocator: 566 self.op.iallocator = default_iallocator 567 else: 568 raise errors.OpPrereqError("No iallocator or nodes on the instances" 569 " given and no cluster-wide default" 570 " iallocator found; please specify either" 571 " an iallocator or nodes on the instances" 572 " or set a cluster-wide default iallocator", 573 errors.ECODE_INVAL) 574 575 CheckOpportunisticLocking(self.op) 576 577 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances]) 578 if dups: 579 raise errors.OpPrereqError("There are duplicate instance names: %s" % 580 utils.CommaJoin(dups), errors.ECODE_INVAL)
581
582 - def ExpandNames(self):
583 """Calculate the locks. 584 585 """ 586 self.share_locks = ShareAll() 587 self.needed_locks = {} 588 589 if self.op.iallocator: 590 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET 591 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET 592 593 if self.op.opportunistic_locking: 594 self.opportunistic_locks[locking.LEVEL_NODE] = True 595 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True 596 else: 597 nodeslist = [] 598 for inst in self.op.instances: 599 (inst.pnode_uuid, inst.pnode) = \ 600 ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode) 601 nodeslist.append(inst.pnode_uuid) 602 if inst.snode is not None: 603 (inst.snode_uuid, inst.snode) = \ 604 ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode) 605 nodeslist.append(inst.snode_uuid) 606 607 self.needed_locks[locking.LEVEL_NODE] = nodeslist 608 # Lock resources of instance's primary and secondary nodes (copy to 609 # prevent accidential modification) 610 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
611
612 - def CheckPrereq(self):
613 """Check prerequisite. 614 615 """ 616 if self.op.iallocator: 617 cluster = self.cfg.GetClusterInfo() 618 default_vg = self.cfg.GetVGName() 619 ec_id = self.proc.GetECId() 620 621 if self.op.opportunistic_locking: 622 # Only consider nodes for which a lock is held 623 node_whitelist = self.cfg.GetNodeNames( 624 set(self.owned_locks(locking.LEVEL_NODE)) & 625 set(self.owned_locks(locking.LEVEL_NODE_RES))) 626 else: 627 node_whitelist = None 628 629 insts = [CreateInstanceAllocRequest(op, ComputeDisks(op.disks, 630 op.disk_template, 631 default_vg), 632 ComputeNics(op, cluster, None, 633 self.cfg, ec_id), 634 ComputeFullBeParams(op, cluster), 635 node_whitelist) 636 for op in self.op.instances] 637 638 req = iallocator.IAReqMultiInstanceAlloc(instances=insts) 639 ial = iallocator.IAllocator(self.cfg, self.rpc, req) 640 641 ial.Run(self.op.iallocator) 642 643 if not ial.success: 644 raise errors.OpPrereqError("Can't compute nodes using" 645 " iallocator '%s': %s" % 646 (self.op.iallocator, ial.info), 647 errors.ECODE_NORES) 648 649 self.ia_result = ial.result 650 651 if self.op.dry_run: 652 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), { 653 constants.JOB_IDS_KEY: [], 654 })
655
656 - def _ConstructPartialResult(self):
657 """Contructs the partial result. 658 659 """ 660 if self.op.iallocator: 661 (allocatable, failed_insts) = self.ia_result 662 allocatable_insts = map(compat.fst, allocatable) 663 else: 664 allocatable_insts = [op.instance_name for op in self.op.instances] 665 failed_insts = [] 666 667 return { 668 constants.ALLOCATABLE_KEY: allocatable_insts, 669 constants.FAILED_KEY: failed_insts, 670 }
671
672 - def Exec(self, feedback_fn):
673 """Executes the opcode. 674 675 """ 676 jobs = [] 677 if self.op.iallocator: 678 op2inst = dict((op.instance_name, op) for op in self.op.instances) 679 (allocatable, failed) = self.ia_result 680 681 for (name, node_names) in allocatable: 682 op = op2inst.pop(name) 683 684 (op.pnode_uuid, op.pnode) = \ 685 ExpandNodeUuidAndName(self.cfg, None, node_names[0]) 686 if len(node_names) > 1: 687 (op.snode_uuid, op.snode) = \ 688 ExpandNodeUuidAndName(self.cfg, None, node_names[1]) 689 690 jobs.append([op]) 691 692 missing = set(op2inst.keys()) - set(failed) 693 assert not missing, \ 694 "Iallocator did return incomplete result: %s" % \ 695 utils.CommaJoin(missing) 696 else: 697 jobs.extend([op] for op in self.op.instances) 698 699 return ResultWithJobs(jobs, **self._ConstructPartialResult())
700 701
702 -class LUInstanceChangeGroup(LogicalUnit):
703 HPATH = "instance-change-group" 704 HTYPE = constants.HTYPE_INSTANCE 705 REQ_BGL = False 706
707 - def ExpandNames(self):
708 self.share_locks = ShareAll() 709 710 self.needed_locks = { 711 locking.LEVEL_NODEGROUP: [], 712 locking.LEVEL_NODE: [], 713 } 714 715 self._ExpandAndLockInstance() 716 717 if self.op.target_groups: 718 self.req_target_uuids = map(self.cfg.LookupNodeGroup, 719 self.op.target_groups) 720 else: 721 self.req_target_uuids = None 722 723 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
724
725 - def DeclareLocks(self, level):
726 if level == locking.LEVEL_NODEGROUP: 727 assert not self.needed_locks[locking.LEVEL_NODEGROUP] 728 729 if self.req_target_uuids: 730 lock_groups = set(self.req_target_uuids) 731 732 # Lock all groups used by instance optimistically; this requires going 733 # via the node before it's locked, requiring verification later on 734 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid) 735 lock_groups.update(instance_groups) 736 else: 737 # No target groups, need to lock all of them 738 lock_groups = locking.ALL_SET 739 740 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups 741 742 elif level == locking.LEVEL_NODE: 743 if self.req_target_uuids: 744 # Lock all nodes used by instances 745 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND 746 self._LockInstancesNodes() 747 748 # Lock all nodes in all potential target groups 749 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) | 750 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)) 751 member_nodes = [node_uuid 752 for group in lock_groups 753 for node_uuid in self.cfg.GetNodeGroup(group).members] 754 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes) 755 else: 756 # Lock all nodes as all groups are potential targets 757 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
758
759 - def CheckPrereq(self):
760 owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) 761 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) 762 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE)) 763 764 assert (self.req_target_uuids is None or 765 owned_groups.issuperset(self.req_target_uuids)) 766 assert owned_instance_names == set([self.op.instance_name]) 767 768 # Get instance information 769 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 770 771 # Check if node groups for locked instance are still correct 772 instance_all_nodes = self.cfg.GetInstanceNodes(self.instance.uuid) 773 assert owned_nodes.issuperset(instance_all_nodes), \ 774 ("Instance %s's nodes changed while we kept the lock" % 775 self.op.instance_name) 776 777 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid, 778 owned_groups) 779 780 if self.req_target_uuids: 781 # User requested specific target groups 782 self.target_uuids = frozenset(self.req_target_uuids) 783 else: 784 # All groups except those used by the instance are potential targets 785 self.target_uuids = owned_groups - inst_groups 786 787 conflicting_groups = self.target_uuids & inst_groups 788 if conflicting_groups: 789 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are" 790 " used by the instance '%s'" % 791 (utils.CommaJoin(conflicting_groups), 792 self.op.instance_name), 793 errors.ECODE_INVAL) 794 795 if not self.target_uuids: 796 raise errors.OpPrereqError("There are no possible target groups", 797 errors.ECODE_INVAL)
798
799 - def BuildHooksEnv(self):
800 """Build hooks env. 801 802 """ 803 assert self.target_uuids 804 805 env = { 806 "TARGET_GROUPS": " ".join(self.target_uuids), 807 } 808 809 env.update(BuildInstanceHookEnvByObject(self, self.instance)) 810 811 return env
812
813 - def BuildHooksNodes(self):
814 """Build hooks nodes. 815 816 """ 817 mn = self.cfg.GetMasterNode() 818 return ([mn], [mn])
819
820 - def Exec(self, feedback_fn):
821 instances = list(self.owned_locks(locking.LEVEL_INSTANCE)) 822 823 assert instances == [self.op.instance_name], "Instance not locked" 824 825 req = iallocator.IAReqGroupChange(instances=instances, 826 target_groups=list(self.target_uuids)) 827 ial = iallocator.IAllocator(self.cfg, self.rpc, req) 828 829 ial.Run(self.op.iallocator) 830 831 if not ial.success: 832 raise errors.OpPrereqError("Can't compute solution for changing group of" 833 " instance '%s' using iallocator '%s': %s" % 834 (self.op.instance_name, self.op.iallocator, 835 ial.info), errors.ECODE_NORES) 836 837 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False) 838 839 self.LogInfo("Iallocator returned %s job(s) for changing group of" 840 " instance '%s'", len(jobs), self.op.instance_name) 841 842 return ResultWithJobs(jobs)
843