Package ganeti :: Package cmdlib :: Module instance
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cmdlib.instance

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc. 
   5  # 
   6  # This program is free software; you can redistribute it and/or modify 
   7  # it under the terms of the GNU General Public License as published by 
   8  # the Free Software Foundation; either version 2 of the License, or 
   9  # (at your option) any later version. 
  10  # 
  11  # This program is distributed in the hope that it will be useful, but 
  12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
  13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
  14  # General Public License for more details. 
  15  # 
  16  # You should have received a copy of the GNU General Public License 
  17  # along with this program; if not, write to the Free Software 
  18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
  19  # 02110-1301, USA. 
  20   
  21   
  22  """Logical units dealing with instances.""" 
  23   
  24  import OpenSSL 
  25  import copy 
  26  import logging 
  27  import os 
  28   
  29  from ganeti import compat 
  30  from ganeti import constants 
  31  from ganeti import errors 
  32  from ganeti import ht 
  33  from ganeti import hypervisor 
  34  from ganeti import locking 
  35  from ganeti.masterd import iallocator 
  36  from ganeti import masterd 
  37  from ganeti import netutils 
  38  from ganeti import objects 
  39  from ganeti import opcodes 
  40  from ganeti import pathutils 
  41  from ganeti import rpc 
  42  from ganeti import utils 
  43   
  44  from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs 
  45   
  46  from ganeti.cmdlib.common import INSTANCE_DOWN, \ 
  47    INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \ 
  48    ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \ 
  49    LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \ 
  50    IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \ 
  51    AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \ 
  52    ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \ 
  53    CheckDiskTemplateEnabled 
  54  from ganeti.cmdlib.instance_storage import CreateDisks, \ 
  55    CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \ 
  56    IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \ 
  57    CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \ 
  58    StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \ 
  59    CheckSpindlesExclusiveStorage 
  60  from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \ 
  61    GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \ 
  62    NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \ 
  63    ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \ 
  64    GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \ 
  65    CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS 
  66   
  67  import ganeti.masterd.instance 
  68   
  69   
  70  #: Type description for changes as returned by L{_ApplyContainerMods}'s 
  71  #: callbacks 
  72  _TApplyContModsCbChanges = \ 
  73    ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([ 
  74      ht.TNonEmptyString, 
  75      ht.TAny, 
  76      ]))) 
77 78 79 -def _CheckHostnameSane(lu, name):
80 """Ensures that a given hostname resolves to a 'sane' name. 81 82 The given name is required to be a prefix of the resolved hostname, 83 to prevent accidental mismatches. 84 85 @param lu: the logical unit on behalf of which we're checking 86 @param name: the name we should resolve and check 87 @return: the resolved hostname object 88 89 """ 90 hostname = netutils.GetHostname(name=name) 91 if hostname.name != name: 92 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name) 93 if not utils.MatchNameComponent(name, [hostname.name]): 94 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the" 95 " same as given hostname '%s'") % 96 (hostname.name, name), errors.ECODE_INVAL) 97 return hostname
98
99 100 -def _CheckOpportunisticLocking(op):
101 """Generate error if opportunistic locking is not possible. 102 103 """ 104 if op.opportunistic_locking and not op.iallocator: 105 raise errors.OpPrereqError("Opportunistic locking is only available in" 106 " combination with an instance allocator", 107 errors.ECODE_INVAL)
108
109 110 -def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
111 """Wrapper around IAReqInstanceAlloc. 112 113 @param op: The instance opcode 114 @param disks: The computed disks 115 @param nics: The computed nics 116 @param beparams: The full filled beparams 117 @param node_name_whitelist: List of nodes which should appear as online to the 118 allocator (unless the node is already marked offline) 119 120 @returns: A filled L{iallocator.IAReqInstanceAlloc} 121 122 """ 123 spindle_use = beparams[constants.BE_SPINDLE_USE] 124 return iallocator.IAReqInstanceAlloc(name=op.instance_name, 125 disk_template=op.disk_template, 126 tags=op.tags, 127 os=op.os_type, 128 vcpus=beparams[constants.BE_VCPUS], 129 memory=beparams[constants.BE_MAXMEM], 130 spindle_use=spindle_use, 131 disks=disks, 132 nics=[n.ToDict() for n in nics], 133 hypervisor=op.hypervisor, 134 node_whitelist=node_name_whitelist)
135
136 137 -def _ComputeFullBeParams(op, cluster):
138 """Computes the full beparams. 139 140 @param op: The instance opcode 141 @param cluster: The cluster config object 142 143 @return: The fully filled beparams 144 145 """ 146 default_beparams = cluster.beparams[constants.PP_DEFAULT] 147 for param, value in op.beparams.iteritems(): 148 if value == constants.VALUE_AUTO: 149 op.beparams[param] = default_beparams[param] 150 objects.UpgradeBeParams(op.beparams) 151 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES) 152 return cluster.SimpleFillBE(op.beparams)
153
154 155 -def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
156 """Computes the nics. 157 158 @param op: The instance opcode 159 @param cluster: Cluster configuration object 160 @param default_ip: The default ip to assign 161 @param cfg: An instance of the configuration object 162 @param ec_id: Execution context ID 163 164 @returns: The build up nics 165 166 """ 167 nics = [] 168 for nic in op.nics: 169 nic_mode_req = nic.get(constants.INIC_MODE, None) 170 nic_mode = nic_mode_req 171 if nic_mode is None or nic_mode == constants.VALUE_AUTO: 172 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE] 173 174 net = nic.get(constants.INIC_NETWORK, None) 175 link = nic.get(constants.NIC_LINK, None) 176 ip = nic.get(constants.INIC_IP, None) 177 178 if net is None or net.lower() == constants.VALUE_NONE: 179 net = None 180 else: 181 if nic_mode_req is not None or link is not None: 182 raise errors.OpPrereqError("If network is given, no mode or link" 183 " is allowed to be passed", 184 errors.ECODE_INVAL) 185 186 # ip validity checks 187 if ip is None or ip.lower() == constants.VALUE_NONE: 188 nic_ip = None 189 elif ip.lower() == constants.VALUE_AUTO: 190 if not op.name_check: 191 raise errors.OpPrereqError("IP address set to auto but name checks" 192 " have been skipped", 193 errors.ECODE_INVAL) 194 nic_ip = default_ip 195 else: 196 # We defer pool operations until later, so that the iallocator has 197 # filled in the instance's node(s) dimara 198 if ip.lower() == constants.NIC_IP_POOL: 199 if net is None: 200 raise errors.OpPrereqError("if ip=pool, parameter network" 201 " must be passed too", 202 errors.ECODE_INVAL) 203 204 elif not netutils.IPAddress.IsValid(ip): 205 raise errors.OpPrereqError("Invalid IP address '%s'" % ip, 206 errors.ECODE_INVAL) 207 208 nic_ip = ip 209 210 # TODO: check the ip address for uniqueness 211 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip: 212 raise errors.OpPrereqError("Routed nic mode requires an ip address", 213 errors.ECODE_INVAL) 214 215 # MAC address verification 216 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO) 217 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 218 mac = utils.NormalizeAndValidateMac(mac) 219 220 try: 221 # TODO: We need to factor this out 222 cfg.ReserveMAC(mac, ec_id) 223 except errors.ReservationError: 224 raise errors.OpPrereqError("MAC address %s already in use" 225 " in cluster" % mac, 226 errors.ECODE_NOTUNIQUE) 227 228 # Build nic parameters 229 nicparams = {} 230 if nic_mode_req: 231 nicparams[constants.NIC_MODE] = nic_mode 232 if link: 233 nicparams[constants.NIC_LINK] = link 234 235 check_params = cluster.SimpleFillNIC(nicparams) 236 objects.NIC.CheckParameterSyntax(check_params) 237 net_uuid = cfg.LookupNetwork(net) 238 name = nic.get(constants.INIC_NAME, None) 239 if name is not None and name.lower() == constants.VALUE_NONE: 240 name = None 241 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name, 242 network=net_uuid, nicparams=nicparams) 243 nic_obj.uuid = cfg.GenerateUniqueID(ec_id) 244 nics.append(nic_obj) 245 246 return nics
247
248 249 -def _CheckForConflictingIp(lu, ip, node_uuid):
250 """In case of conflicting IP address raise error. 251 252 @type ip: string 253 @param ip: IP address 254 @type node_uuid: string 255 @param node_uuid: node UUID 256 257 """ 258 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid) 259 if conf_net is not None: 260 raise errors.OpPrereqError(("The requested IP address (%s) belongs to" 261 " network %s, but the target NIC does not." % 262 (ip, conf_net)), 263 errors.ECODE_STATE) 264 265 return (None, None)
266
267 268 -def _ComputeIPolicyInstanceSpecViolation( 269 ipolicy, instance_spec, disk_template, 270 _compute_fn=ComputeIPolicySpecViolation):
271 """Compute if instance specs meets the specs of ipolicy. 272 273 @type ipolicy: dict 274 @param ipolicy: The ipolicy to verify against 275 @param instance_spec: dict 276 @param instance_spec: The instance spec to verify 277 @type disk_template: string 278 @param disk_template: the disk template of the instance 279 @param _compute_fn: The function to verify ipolicy (unittest only) 280 @see: L{ComputeIPolicySpecViolation} 281 282 """ 283 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None) 284 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None) 285 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0) 286 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, []) 287 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0) 288 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None) 289 290 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count, 291 disk_sizes, spindle_use, disk_template)
292
293 294 -def _CheckOSVariant(os_obj, name):
295 """Check whether an OS name conforms to the os variants specification. 296 297 @type os_obj: L{objects.OS} 298 @param os_obj: OS object to check 299 @type name: string 300 @param name: OS name passed by the user, to check for validity 301 302 """ 303 variant = objects.OS.GetVariant(name) 304 if not os_obj.supported_variants: 305 if variant: 306 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'" 307 " passed)" % (os_obj.name, variant), 308 errors.ECODE_INVAL) 309 return 310 if not variant: 311 raise errors.OpPrereqError("OS name must include a variant", 312 errors.ECODE_INVAL) 313 314 if variant not in os_obj.supported_variants: 315 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
316
317 318 -class LUInstanceCreate(LogicalUnit):
319 """Create an instance. 320 321 """ 322 HPATH = "instance-add" 323 HTYPE = constants.HTYPE_INSTANCE 324 REQ_BGL = False 325
326 - def _CheckDiskTemplateValid(self):
327 """Checks validity of disk template. 328 329 """ 330 cluster = self.cfg.GetClusterInfo() 331 if self.op.disk_template is None: 332 # FIXME: It would be better to take the default disk template from the 333 # ipolicy, but for the ipolicy we need the primary node, which we get from 334 # the iallocator, which wants the disk template as input. To solve this 335 # chicken-and-egg problem, it should be possible to specify just a node 336 # group from the iallocator and take the ipolicy from that. 337 self.op.disk_template = cluster.enabled_disk_templates[0] 338 CheckDiskTemplateEnabled(cluster, self.op.disk_template)
339
340 - def _CheckDiskArguments(self):
341 """Checks validity of disk-related arguments. 342 343 """ 344 # check that disk's names are unique and valid 345 utils.ValidateDeviceNames("disk", self.op.disks) 346 347 self._CheckDiskTemplateValid() 348 349 # check disks. parameter names and consistent adopt/no-adopt strategy 350 has_adopt = has_no_adopt = False 351 for disk in self.op.disks: 352 if self.op.disk_template != constants.DT_EXT: 353 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES) 354 if constants.IDISK_ADOPT in disk: 355 has_adopt = True 356 else: 357 has_no_adopt = True 358 if has_adopt and has_no_adopt: 359 raise errors.OpPrereqError("Either all disks are adopted or none is", 360 errors.ECODE_INVAL) 361 if has_adopt: 362 if self.op.disk_template not in constants.DTS_MAY_ADOPT: 363 raise errors.OpPrereqError("Disk adoption is not supported for the" 364 " '%s' disk template" % 365 self.op.disk_template, 366 errors.ECODE_INVAL) 367 if self.op.iallocator is not None: 368 raise errors.OpPrereqError("Disk adoption not allowed with an" 369 " iallocator script", errors.ECODE_INVAL) 370 if self.op.mode == constants.INSTANCE_IMPORT: 371 raise errors.OpPrereqError("Disk adoption not allowed for" 372 " instance import", errors.ECODE_INVAL) 373 else: 374 if self.op.disk_template in constants.DTS_MUST_ADOPT: 375 raise errors.OpPrereqError("Disk template %s requires disk adoption," 376 " but no 'adopt' parameter given" % 377 self.op.disk_template, 378 errors.ECODE_INVAL) 379 380 self.adopt_disks = has_adopt
381
382 - def CheckArguments(self):
383 """Check arguments. 384 385 """ 386 # do not require name_check to ease forward/backward compatibility 387 # for tools 388 if self.op.no_install and self.op.start: 389 self.LogInfo("No-installation mode selected, disabling startup") 390 self.op.start = False 391 # validate/normalize the instance name 392 self.op.instance_name = \ 393 netutils.Hostname.GetNormalizedName(self.op.instance_name) 394 395 if self.op.ip_check and not self.op.name_check: 396 # TODO: make the ip check more flexible and not depend on the name check 397 raise errors.OpPrereqError("Cannot do IP address check without a name" 398 " check", errors.ECODE_INVAL) 399 400 # check nics' parameter names 401 for nic in self.op.nics: 402 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES) 403 # check that NIC's parameters names are unique and valid 404 utils.ValidateDeviceNames("NIC", self.op.nics) 405 406 self._CheckDiskArguments() 407 408 # instance name verification 409 if self.op.name_check: 410 self.hostname = _CheckHostnameSane(self, self.op.instance_name) 411 self.op.instance_name = self.hostname.name 412 # used in CheckPrereq for ip ping check 413 self.check_ip = self.hostname.ip 414 else: 415 self.check_ip = None 416 417 # file storage checks 418 if (self.op.file_driver and 419 not self.op.file_driver in constants.FILE_DRIVER): 420 raise errors.OpPrereqError("Invalid file driver name '%s'" % 421 self.op.file_driver, errors.ECODE_INVAL) 422 423 # set default file_driver if unset and required 424 if (not self.op.file_driver and 425 self.op.disk_template in [constants.DT_FILE, 426 constants.DT_SHARED_FILE]): 427 self.op.file_driver = constants.FD_DEFAULT 428 429 ### Node/iallocator related checks 430 CheckIAllocatorOrNode(self, "iallocator", "pnode") 431 432 if self.op.pnode is not None: 433 if self.op.disk_template in constants.DTS_INT_MIRROR: 434 if self.op.snode is None: 435 raise errors.OpPrereqError("The networked disk templates need" 436 " a mirror node", errors.ECODE_INVAL) 437 elif self.op.snode: 438 self.LogWarning("Secondary node will be ignored on non-mirrored disk" 439 " template") 440 self.op.snode = None 441 442 _CheckOpportunisticLocking(self.op) 443 444 self._cds = GetClusterDomainSecret() 445 446 if self.op.mode == constants.INSTANCE_IMPORT: 447 # On import force_variant must be True, because if we forced it at 448 # initial install, our only chance when importing it back is that it 449 # works again! 450 self.op.force_variant = True 451 452 if self.op.no_install: 453 self.LogInfo("No-installation mode has no effect during import") 454 455 elif self.op.mode == constants.INSTANCE_CREATE: 456 if self.op.os_type is None: 457 raise errors.OpPrereqError("No guest OS specified", 458 errors.ECODE_INVAL) 459 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os: 460 raise errors.OpPrereqError("Guest OS '%s' is not allowed for" 461 " installation" % self.op.os_type, 462 errors.ECODE_STATE) 463 if self.op.disk_template is None: 464 raise errors.OpPrereqError("No disk template specified", 465 errors.ECODE_INVAL) 466 467 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT: 468 # Check handshake to ensure both clusters have the same domain secret 469 src_handshake = self.op.source_handshake 470 if not src_handshake: 471 raise errors.OpPrereqError("Missing source handshake", 472 errors.ECODE_INVAL) 473 474 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds, 475 src_handshake) 476 if errmsg: 477 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg, 478 errors.ECODE_INVAL) 479 480 # Load and check source CA 481 self.source_x509_ca_pem = self.op.source_x509_ca 482 if not self.source_x509_ca_pem: 483 raise errors.OpPrereqError("Missing source X509 CA", 484 errors.ECODE_INVAL) 485 486 try: 487 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem, 488 self._cds) 489 except OpenSSL.crypto.Error, err: 490 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" % 491 (err, ), errors.ECODE_INVAL) 492 493 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None) 494 if errcode is not None: 495 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ), 496 errors.ECODE_INVAL) 497 498 self.source_x509_ca = cert 499 500 src_instance_name = self.op.source_instance_name 501 if not src_instance_name: 502 raise errors.OpPrereqError("Missing source instance name", 503 errors.ECODE_INVAL) 504 505 self.source_instance_name = \ 506 netutils.GetHostname(name=src_instance_name).name 507 508 else: 509 raise errors.OpPrereqError("Invalid instance creation mode %r" % 510 self.op.mode, errors.ECODE_INVAL)
511
512 - def ExpandNames(self):
513 """ExpandNames for CreateInstance. 514 515 Figure out the right locks for instance creation. 516 517 """ 518 self.needed_locks = {} 519 520 # this is just a preventive check, but someone might still add this 521 # instance in the meantime, and creation will fail at lock-add time 522 if self.op.instance_name in\ 523 [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]: 524 raise errors.OpPrereqError("Instance '%s' is already in the cluster" % 525 self.op.instance_name, errors.ECODE_EXISTS) 526 527 self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name 528 529 if self.op.iallocator: 530 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by 531 # specifying a group on instance creation and then selecting nodes from 532 # that group 533 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET 534 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET 535 536 if self.op.opportunistic_locking: 537 self.opportunistic_locks[locking.LEVEL_NODE] = True 538 else: 539 (self.op.pnode_uuid, self.op.pnode) = \ 540 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode) 541 nodelist = [self.op.pnode_uuid] 542 if self.op.snode is not None: 543 (self.op.snode_uuid, self.op.snode) = \ 544 ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode) 545 nodelist.append(self.op.snode_uuid) 546 self.needed_locks[locking.LEVEL_NODE] = nodelist 547 548 # in case of import lock the source node too 549 if self.op.mode == constants.INSTANCE_IMPORT: 550 src_node = self.op.src_node 551 src_path = self.op.src_path 552 553 if src_path is None: 554 self.op.src_path = src_path = self.op.instance_name 555 556 if src_node is None: 557 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET 558 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET 559 self.op.src_node = None 560 if os.path.isabs(src_path): 561 raise errors.OpPrereqError("Importing an instance from a path" 562 " requires a source node option", 563 errors.ECODE_INVAL) 564 else: 565 (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \ 566 ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node) 567 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET: 568 self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid) 569 if not os.path.isabs(src_path): 570 self.op.src_path = \ 571 utils.PathJoin(pathutils.EXPORT_DIR, src_path) 572 573 self.needed_locks[locking.LEVEL_NODE_RES] = \ 574 CopyLockList(self.needed_locks[locking.LEVEL_NODE]) 575 576 # Optimistically acquire shared group locks (we're reading the 577 # configuration). We can't just call GetInstanceNodeGroups, because the 578 # instance doesn't exist yet. Therefore we lock all node groups of all 579 # nodes we have. 580 if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET: 581 # In the case we lock all nodes for opportunistic allocation, we have no 582 # choice than to lock all groups, because they're allocated before nodes. 583 # This is sad, but true. At least we release all those we don't need in 584 # CheckPrereq later. 585 self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET 586 else: 587 self.needed_locks[locking.LEVEL_NODEGROUP] = \ 588 list(self.cfg.GetNodeGroupsFromNodes( 589 self.needed_locks[locking.LEVEL_NODE])) 590 self.share_locks[locking.LEVEL_NODEGROUP] = 1
591
592 - def DeclareLocks(self, level):
593 if level == locking.LEVEL_NODE_RES and \ 594 self.opportunistic_locks[locking.LEVEL_NODE]: 595 # Even when using opportunistic locking, we require the same set of 596 # NODE_RES locks as we got NODE locks 597 self.needed_locks[locking.LEVEL_NODE_RES] = \ 598 self.owned_locks(locking.LEVEL_NODE)
599
600 - def _RunAllocator(self):
601 """Run the allocator based on input opcode. 602 603 """ 604 if self.op.opportunistic_locking: 605 # Only consider nodes for which a lock is held 606 node_name_whitelist = self.cfg.GetNodeNames( 607 self.owned_locks(locking.LEVEL_NODE)) 608 else: 609 node_name_whitelist = None 610 611 #TODO Export network to iallocator so that it chooses a pnode 612 # in a nodegroup that has the desired network connected to 613 req = _CreateInstanceAllocRequest(self.op, self.disks, 614 self.nics, self.be_full, 615 node_name_whitelist) 616 ial = iallocator.IAllocator(self.cfg, self.rpc, req) 617 618 ial.Run(self.op.iallocator) 619 620 if not ial.success: 621 # When opportunistic locks are used only a temporary failure is generated 622 if self.op.opportunistic_locking: 623 ecode = errors.ECODE_TEMP_NORES 624 else: 625 ecode = errors.ECODE_NORES 626 627 raise errors.OpPrereqError("Can't compute nodes using" 628 " iallocator '%s': %s" % 629 (self.op.iallocator, ial.info), 630 ecode) 631 632 (self.op.pnode_uuid, self.op.pnode) = \ 633 ExpandNodeUuidAndName(self.cfg, None, ial.result[0]) 634 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s", 635 self.op.instance_name, self.op.iallocator, 636 utils.CommaJoin(ial.result)) 637 638 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator" 639 640 if req.RequiredNodes() == 2: 641 (self.op.snode_uuid, self.op.snode) = \ 642 ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
643
644 - def BuildHooksEnv(self):
645 """Build hooks env. 646 647 This runs on master, primary and secondary nodes of the instance. 648 649 """ 650 env = { 651 "ADD_MODE": self.op.mode, 652 } 653 if self.op.mode == constants.INSTANCE_IMPORT: 654 env["SRC_NODE"] = self.op.src_node 655 env["SRC_PATH"] = self.op.src_path 656 env["SRC_IMAGES"] = self.src_images 657 658 env.update(BuildInstanceHookEnv( 659 name=self.op.instance_name, 660 primary_node_name=self.op.pnode, 661 secondary_node_names=self.cfg.GetNodeNames(self.secondaries), 662 status=self.op.start, 663 os_type=self.op.os_type, 664 minmem=self.be_full[constants.BE_MINMEM], 665 maxmem=self.be_full[constants.BE_MAXMEM], 666 vcpus=self.be_full[constants.BE_VCPUS], 667 nics=NICListToTuple(self, self.nics), 668 disk_template=self.op.disk_template, 669 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""), 670 d[constants.IDISK_SIZE], d[constants.IDISK_MODE]) 671 for d in self.disks], 672 bep=self.be_full, 673 hvp=self.hv_full, 674 hypervisor_name=self.op.hypervisor, 675 tags=self.op.tags, 676 )) 677 678 return env
679
680 - def BuildHooksNodes(self):
681 """Build hooks nodes. 682 683 """ 684 nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries 685 return nl, nl
686
687 - def _ReadExportInfo(self):
688 """Reads the export information from disk. 689 690 It will override the opcode source node and path with the actual 691 information, if these two were not specified before. 692 693 @return: the export information 694 695 """ 696 assert self.op.mode == constants.INSTANCE_IMPORT 697 698 if self.op.src_node_uuid is None: 699 locked_nodes = self.owned_locks(locking.LEVEL_NODE) 700 exp_list = self.rpc.call_export_list(locked_nodes) 701 found = False 702 for node_uuid in exp_list: 703 if exp_list[node_uuid].fail_msg: 704 continue 705 if self.op.src_path in exp_list[node_uuid].payload: 706 found = True 707 self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name 708 self.op.src_node_uuid = node_uuid 709 self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR, 710 self.op.src_path) 711 break 712 if not found: 713 raise errors.OpPrereqError("No export found for relative path %s" % 714 self.op.src_path, errors.ECODE_INVAL) 715 716 CheckNodeOnline(self, self.op.src_node_uuid) 717 result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path) 718 result.Raise("No export or invalid export found in dir %s" % 719 self.op.src_path) 720 721 export_info = objects.SerializableConfigParser.Loads(str(result.payload)) 722 if not export_info.has_section(constants.INISECT_EXP): 723 raise errors.ProgrammerError("Corrupted export config", 724 errors.ECODE_ENVIRON) 725 726 ei_version = export_info.get(constants.INISECT_EXP, "version") 727 if int(ei_version) != constants.EXPORT_VERSION: 728 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" % 729 (ei_version, constants.EXPORT_VERSION), 730 errors.ECODE_ENVIRON) 731 return export_info
732
733 - def _ReadExportParams(self, einfo):
734 """Use export parameters as defaults. 735 736 In case the opcode doesn't specify (as in override) some instance 737 parameters, then try to use them from the export information, if 738 that declares them. 739 740 """ 741 self.op.os_type = einfo.get(constants.INISECT_EXP, "os") 742 743 if not self.op.disks: 744 disks = [] 745 # TODO: import the disk iv_name too 746 for idx in range(constants.MAX_DISKS): 747 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx): 748 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx) 749 disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx) 750 disk = { 751 constants.IDISK_SIZE: disk_sz, 752 constants.IDISK_NAME: disk_name 753 } 754 disks.append(disk) 755 self.op.disks = disks 756 if not disks and self.op.disk_template != constants.DT_DISKLESS: 757 raise errors.OpPrereqError("No disk info specified and the export" 758 " is missing the disk information", 759 errors.ECODE_INVAL) 760 761 if not self.op.nics: 762 nics = [] 763 for idx in range(constants.MAX_NICS): 764 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx): 765 ndict = {} 766 for name in [constants.INIC_IP, 767 constants.INIC_MAC, constants.INIC_NAME]: 768 nic_param_name = "nic%d_%s" % (idx, name) 769 if einfo.has_option(constants.INISECT_INS, nic_param_name): 770 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name)) 771 ndict[name] = v 772 network = einfo.get(constants.INISECT_INS, 773 "nic%d_%s" % (idx, constants.INIC_NETWORK)) 774 # in case network is given link and mode are inherited 775 # from nodegroup's netparams and thus should not be passed here 776 if network: 777 ndict[constants.INIC_NETWORK] = network 778 else: 779 for name in list(constants.NICS_PARAMETERS): 780 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name)) 781 ndict[name] = v 782 nics.append(ndict) 783 else: 784 break 785 self.op.nics = nics 786 787 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"): 788 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split() 789 790 if (self.op.hypervisor is None and 791 einfo.has_option(constants.INISECT_INS, "hypervisor")): 792 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor") 793 794 if einfo.has_section(constants.INISECT_HYP): 795 # use the export parameters but do not override the ones 796 # specified by the user 797 for name, value in einfo.items(constants.INISECT_HYP): 798 if name not in self.op.hvparams: 799 self.op.hvparams[name] = value 800 801 if einfo.has_section(constants.INISECT_BEP): 802 # use the parameters, without overriding 803 for name, value in einfo.items(constants.INISECT_BEP): 804 if name not in self.op.beparams: 805 self.op.beparams[name] = value 806 # Compatibility for the old "memory" be param 807 if name == constants.BE_MEMORY: 808 if constants.BE_MAXMEM not in self.op.beparams: 809 self.op.beparams[constants.BE_MAXMEM] = value 810 if constants.BE_MINMEM not in self.op.beparams: 811 self.op.beparams[constants.BE_MINMEM] = value 812 else: 813 # try to read the parameters old style, from the main section 814 for name in constants.BES_PARAMETERS: 815 if (name not in self.op.beparams and 816 einfo.has_option(constants.INISECT_INS, name)): 817 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name) 818 819 if einfo.has_section(constants.INISECT_OSP): 820 # use the parameters, without overriding 821 for name, value in einfo.items(constants.INISECT_OSP): 822 if name not in self.op.osparams: 823 self.op.osparams[name] = value
824
825 - def _RevertToDefaults(self, cluster):
826 """Revert the instance parameters to the default values. 827 828 """ 829 # hvparams 830 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {}) 831 for name in self.op.hvparams.keys(): 832 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]: 833 del self.op.hvparams[name] 834 # beparams 835 be_defs = cluster.SimpleFillBE({}) 836 for name in self.op.beparams.keys(): 837 if name in be_defs and be_defs[name] == self.op.beparams[name]: 838 del self.op.beparams[name] 839 # nic params 840 nic_defs = cluster.SimpleFillNIC({}) 841 for nic in self.op.nics: 842 for name in constants.NICS_PARAMETERS: 843 if name in nic and name in nic_defs and nic[name] == nic_defs[name]: 844 del nic[name] 845 # osparams 846 os_defs = cluster.SimpleFillOS(self.op.os_type, {}) 847 for name in self.op.osparams.keys(): 848 if name in os_defs and os_defs[name] == self.op.osparams[name]: 849 del self.op.osparams[name]
850
851 - def _CalculateFileStorageDir(self):
852 """Calculate final instance file storage dir. 853 854 """ 855 # file storage dir calculation/check 856 self.instance_file_storage_dir = None 857 if self.op.disk_template in constants.DTS_FILEBASED: 858 # build the full file storage dir path 859 joinargs = [] 860 861 if self.op.disk_template == constants.DT_SHARED_FILE: 862 get_fsd_fn = self.cfg.GetSharedFileStorageDir 863 else: 864 get_fsd_fn = self.cfg.GetFileStorageDir 865 866 cfg_storagedir = get_fsd_fn() 867 if not cfg_storagedir: 868 raise errors.OpPrereqError("Cluster file storage dir not defined", 869 errors.ECODE_STATE) 870 joinargs.append(cfg_storagedir) 871 872 if self.op.file_storage_dir is not None: 873 joinargs.append(self.op.file_storage_dir) 874 875 joinargs.append(self.op.instance_name) 876 877 # pylint: disable=W0142 878 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
879
880 - def CheckPrereq(self): # pylint: disable=R0914
881 """Check prerequisites. 882 883 """ 884 # Check that the optimistically acquired groups are correct wrt the 885 # acquired nodes 886 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) 887 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE)) 888 cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes)) 889 if not owned_groups.issuperset(cur_groups): 890 raise errors.OpPrereqError("New instance %s's node groups changed since" 891 " locks were acquired, current groups are" 892 " are '%s', owning groups '%s'; retry the" 893 " operation" % 894 (self.op.instance_name, 895 utils.CommaJoin(cur_groups), 896 utils.CommaJoin(owned_groups)), 897 errors.ECODE_STATE) 898 899 self._CalculateFileStorageDir() 900 901 if self.op.mode == constants.INSTANCE_IMPORT: 902 export_info = self._ReadExportInfo() 903 self._ReadExportParams(export_info) 904 self._old_instance_name = export_info.get(constants.INISECT_INS, "name") 905 else: 906 self._old_instance_name = None 907 908 if (not self.cfg.GetVGName() and 909 self.op.disk_template not in constants.DTS_NOT_LVM): 910 raise errors.OpPrereqError("Cluster does not support lvm-based" 911 " instances", errors.ECODE_STATE) 912 913 if (self.op.hypervisor is None or 914 self.op.hypervisor == constants.VALUE_AUTO): 915 self.op.hypervisor = self.cfg.GetHypervisorType() 916 917 cluster = self.cfg.GetClusterInfo() 918 enabled_hvs = cluster.enabled_hypervisors 919 if self.op.hypervisor not in enabled_hvs: 920 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the" 921 " cluster (%s)" % 922 (self.op.hypervisor, ",".join(enabled_hvs)), 923 errors.ECODE_STATE) 924 925 # Check tag validity 926 for tag in self.op.tags: 927 objects.TaggableObject.ValidateTag(tag) 928 929 # check hypervisor parameter syntax (locally) 930 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES) 931 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, 932 self.op.hvparams) 933 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor) 934 hv_type.CheckParameterSyntax(filled_hvp) 935 self.hv_full = filled_hvp 936 # check that we don't specify global parameters on an instance 937 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor", 938 "instance", "cluster") 939 940 # fill and remember the beparams dict 941 self.be_full = _ComputeFullBeParams(self.op, cluster) 942 943 # build os parameters 944 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams) 945 946 # now that hvp/bep are in final format, let's reset to defaults, 947 # if told to do so 948 if self.op.identify_defaults: 949 self._RevertToDefaults(cluster) 950 951 # NIC buildup 952 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg, 953 self.proc.GetECId()) 954 955 # disk checks/pre-build 956 default_vg = self.cfg.GetVGName() 957 self.disks = ComputeDisks(self.op, default_vg) 958 959 if self.op.mode == constants.INSTANCE_IMPORT: 960 disk_images = [] 961 for idx in range(len(self.disks)): 962 option = "disk%d_dump" % idx 963 if export_info.has_option(constants.INISECT_INS, option): 964 # FIXME: are the old os-es, disk sizes, etc. useful? 965 export_name = export_info.get(constants.INISECT_INS, option) 966 image = utils.PathJoin(self.op.src_path, export_name) 967 disk_images.append(image) 968 else: 969 disk_images.append(False) 970 971 self.src_images = disk_images 972 973 if self.op.instance_name == self._old_instance_name: 974 for idx, nic in enumerate(self.nics): 975 if nic.mac == constants.VALUE_AUTO: 976 nic_mac_ini = "nic%d_mac" % idx 977 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini) 978 979 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT 980 981 # ip ping checks (we use the same ip that was resolved in ExpandNames) 982 if self.op.ip_check: 983 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT): 984 raise errors.OpPrereqError("IP %s of instance %s already in use" % 985 (self.check_ip, self.op.instance_name), 986 errors.ECODE_NOTUNIQUE) 987 988 #### mac address generation 989 # By generating here the mac address both the allocator and the hooks get 990 # the real final mac address rather than the 'auto' or 'generate' value. 991 # There is a race condition between the generation and the instance object 992 # creation, which means that we know the mac is valid now, but we're not 993 # sure it will be when we actually add the instance. If things go bad 994 # adding the instance will abort because of a duplicate mac, and the 995 # creation job will fail. 996 for nic in self.nics: 997 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 998 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId()) 999 1000 #### allocator run 1001 1002 if self.op.iallocator is not None: 1003 self._RunAllocator() 1004 1005 # Release all unneeded node locks 1006 keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid, 1007 self.op.src_node_uuid]) 1008 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks) 1009 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks) 1010 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC) 1011 # Release all unneeded group locks 1012 ReleaseLocks(self, locking.LEVEL_NODEGROUP, 1013 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks)) 1014 1015 assert (self.owned_locks(locking.LEVEL_NODE) == 1016 self.owned_locks(locking.LEVEL_NODE_RES)), \ 1017 "Node locks differ from node resource locks" 1018 1019 #### node related checks 1020 1021 # check primary node 1022 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid) 1023 assert self.pnode is not None, \ 1024 "Cannot retrieve locked node %s" % self.op.pnode_uuid 1025 if pnode.offline: 1026 raise errors.OpPrereqError("Cannot use offline primary node '%s'" % 1027 pnode.name, errors.ECODE_STATE) 1028 if pnode.drained: 1029 raise errors.OpPrereqError("Cannot use drained primary node '%s'" % 1030 pnode.name, errors.ECODE_STATE) 1031 if not pnode.vm_capable: 1032 raise errors.OpPrereqError("Cannot use non-vm_capable primary node" 1033 " '%s'" % pnode.name, errors.ECODE_STATE) 1034 1035 self.secondaries = [] 1036 1037 # Fill in any IPs from IP pools. This must happen here, because we need to 1038 # know the nic's primary node, as specified by the iallocator 1039 for idx, nic in enumerate(self.nics): 1040 net_uuid = nic.network 1041 if net_uuid is not None: 1042 nobj = self.cfg.GetNetwork(net_uuid) 1043 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid) 1044 if netparams is None: 1045 raise errors.OpPrereqError("No netparams found for network" 1046 " %s. Propably not connected to" 1047 " node's %s nodegroup" % 1048 (nobj.name, self.pnode.name), 1049 errors.ECODE_INVAL) 1050 self.LogInfo("NIC/%d inherits netparams %s" % 1051 (idx, netparams.values())) 1052 nic.nicparams = dict(netparams) 1053 if nic.ip is not None: 1054 if nic.ip.lower() == constants.NIC_IP_POOL: 1055 try: 1056 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId()) 1057 except errors.ReservationError: 1058 raise errors.OpPrereqError("Unable to get a free IP for NIC %d" 1059 " from the address pool" % idx, 1060 errors.ECODE_STATE) 1061 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name) 1062 else: 1063 try: 1064 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId()) 1065 except errors.ReservationError: 1066 raise errors.OpPrereqError("IP address %s already in use" 1067 " or does not belong to network %s" % 1068 (nic.ip, nobj.name), 1069 errors.ECODE_NOTUNIQUE) 1070 1071 # net is None, ip None or given 1072 elif self.op.conflicts_check: 1073 _CheckForConflictingIp(self, nic.ip, self.pnode.uuid) 1074 1075 # mirror node verification 1076 if self.op.disk_template in constants.DTS_INT_MIRROR: 1077 if self.op.snode_uuid == pnode.uuid: 1078 raise errors.OpPrereqError("The secondary node cannot be the" 1079 " primary node", errors.ECODE_INVAL) 1080 CheckNodeOnline(self, self.op.snode_uuid) 1081 CheckNodeNotDrained(self, self.op.snode_uuid) 1082 CheckNodeVmCapable(self, self.op.snode_uuid) 1083 self.secondaries.append(self.op.snode_uuid) 1084 1085 snode = self.cfg.GetNodeInfo(self.op.snode_uuid) 1086 if pnode.group != snode.group: 1087 self.LogWarning("The primary and secondary nodes are in two" 1088 " different node groups; the disk parameters" 1089 " from the first disk's node group will be" 1090 " used") 1091 1092 nodes = [pnode] 1093 if self.op.disk_template in constants.DTS_INT_MIRROR: 1094 nodes.append(snode) 1095 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n) 1096 excl_stor = compat.any(map(has_es, nodes)) 1097 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE: 1098 raise errors.OpPrereqError("Disk template %s not supported with" 1099 " exclusive storage" % self.op.disk_template, 1100 errors.ECODE_STATE) 1101 for disk in self.disks: 1102 CheckSpindlesExclusiveStorage(disk, excl_stor, True) 1103 1104 node_uuids = [pnode.uuid] + self.secondaries 1105 1106 if not self.adopt_disks: 1107 if self.op.disk_template == constants.DT_RBD: 1108 # _CheckRADOSFreeSpace() is just a placeholder. 1109 # Any function that checks prerequisites can be placed here. 1110 # Check if there is enough space on the RADOS cluster. 1111 CheckRADOSFreeSpace() 1112 elif self.op.disk_template == constants.DT_EXT: 1113 # FIXME: Function that checks prereqs if needed 1114 pass 1115 elif self.op.disk_template in utils.GetLvmDiskTemplates(): 1116 # Check lv size requirements, if not adopting 1117 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks) 1118 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes) 1119 else: 1120 # FIXME: add checks for other, non-adopting, non-lvm disk templates 1121 pass 1122 1123 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data 1124 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG], 1125 disk[constants.IDISK_ADOPT]) 1126 for disk in self.disks]) 1127 if len(all_lvs) != len(self.disks): 1128 raise errors.OpPrereqError("Duplicate volume names given for adoption", 1129 errors.ECODE_INVAL) 1130 for lv_name in all_lvs: 1131 try: 1132 # FIXME: lv_name here is "vg/lv" need to ensure that other calls 1133 # to ReserveLV uses the same syntax 1134 self.cfg.ReserveLV(lv_name, self.proc.GetECId()) 1135 except errors.ReservationError: 1136 raise errors.OpPrereqError("LV named %s used by another instance" % 1137 lv_name, errors.ECODE_NOTUNIQUE) 1138 1139 vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid] 1140 vg_names.Raise("Cannot get VG information from node %s" % pnode.name) 1141 1142 node_lvs = self.rpc.call_lv_list([pnode.uuid], 1143 vg_names.payload.keys())[pnode.uuid] 1144 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name) 1145 node_lvs = node_lvs.payload 1146 1147 delta = all_lvs.difference(node_lvs.keys()) 1148 if delta: 1149 raise errors.OpPrereqError("Missing logical volume(s): %s" % 1150 utils.CommaJoin(delta), 1151 errors.ECODE_INVAL) 1152 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]] 1153 if online_lvs: 1154 raise errors.OpPrereqError("Online logical volumes found, cannot" 1155 " adopt: %s" % utils.CommaJoin(online_lvs), 1156 errors.ECODE_STATE) 1157 # update the size of disk based on what is found 1158 for dsk in self.disks: 1159 dsk[constants.IDISK_SIZE] = \ 1160 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG], 1161 dsk[constants.IDISK_ADOPT])][0])) 1162 1163 elif self.op.disk_template == constants.DT_BLOCK: 1164 # Normalize and de-duplicate device paths 1165 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT]) 1166 for disk in self.disks]) 1167 if len(all_disks) != len(self.disks): 1168 raise errors.OpPrereqError("Duplicate disk names given for adoption", 1169 errors.ECODE_INVAL) 1170 baddisks = [d for d in all_disks 1171 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)] 1172 if baddisks: 1173 raise errors.OpPrereqError("Device node(s) %s lie outside %s and" 1174 " cannot be adopted" % 1175 (utils.CommaJoin(baddisks), 1176 constants.ADOPTABLE_BLOCKDEV_ROOT), 1177 errors.ECODE_INVAL) 1178 1179 node_disks = self.rpc.call_bdev_sizes([pnode.uuid], 1180 list(all_disks))[pnode.uuid] 1181 node_disks.Raise("Cannot get block device information from node %s" % 1182 pnode.name) 1183 node_disks = node_disks.payload 1184 delta = all_disks.difference(node_disks.keys()) 1185 if delta: 1186 raise errors.OpPrereqError("Missing block device(s): %s" % 1187 utils.CommaJoin(delta), 1188 errors.ECODE_INVAL) 1189 for dsk in self.disks: 1190 dsk[constants.IDISK_SIZE] = \ 1191 int(float(node_disks[dsk[constants.IDISK_ADOPT]])) 1192 1193 # Verify instance specs 1194 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None) 1195 ispec = { 1196 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None), 1197 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None), 1198 constants.ISPEC_DISK_COUNT: len(self.disks), 1199 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE] 1200 for disk in self.disks], 1201 constants.ISPEC_NIC_COUNT: len(self.nics), 1202 constants.ISPEC_SPINDLE_USE: spindle_use, 1203 } 1204 1205 group_info = self.cfg.GetNodeGroup(pnode.group) 1206 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) 1207 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec, 1208 self.op.disk_template) 1209 if not self.op.ignore_ipolicy and res: 1210 msg = ("Instance allocation to group %s (%s) violates policy: %s" % 1211 (pnode.group, group_info.name, utils.CommaJoin(res))) 1212 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 1213 1214 CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams) 1215 1216 CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant) 1217 # check OS parameters (remotely) 1218 CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full) 1219 1220 CheckNicsBridgesExist(self, self.nics, self.pnode.uuid) 1221 1222 #TODO: _CheckExtParams (remotely) 1223 # Check parameters for extstorage 1224 1225 # memory check on primary node 1226 #TODO(dynmem): use MINMEM for checking 1227 if self.op.start: 1228 hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}), 1229 self.op.hvparams) 1230 CheckNodeFreeMemory(self, self.pnode.uuid, 1231 "creating instance %s" % self.op.instance_name, 1232 self.be_full[constants.BE_MAXMEM], 1233 self.op.hypervisor, hvfull) 1234 1235 self.dry_run_result = list(node_uuids)
1236
1237 - def Exec(self, feedback_fn):
1238 """Create and add the instance to the cluster. 1239 1240 """ 1241 assert not (self.owned_locks(locking.LEVEL_NODE_RES) - 1242 self.owned_locks(locking.LEVEL_NODE)), \ 1243 "Node locks differ from node resource locks" 1244 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC) 1245 1246 ht_kind = self.op.hypervisor 1247 if ht_kind in constants.HTS_REQ_PORT: 1248 network_port = self.cfg.AllocatePort() 1249 else: 1250 network_port = None 1251 1252 instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId()) 1253 1254 # This is ugly but we got a chicken-egg problem here 1255 # We can only take the group disk parameters, as the instance 1256 # has no disks yet (we are generating them right here). 1257 nodegroup = self.cfg.GetNodeGroup(self.pnode.group) 1258 disks = GenerateDiskTemplate(self, 1259 self.op.disk_template, 1260 instance_uuid, self.pnode.uuid, 1261 self.secondaries, 1262 self.disks, 1263 self.instance_file_storage_dir, 1264 self.op.file_driver, 1265 0, 1266 feedback_fn, 1267 self.cfg.GetGroupDiskParams(nodegroup)) 1268 1269 iobj = objects.Instance(name=self.op.instance_name, 1270 uuid=instance_uuid, 1271 os=self.op.os_type, 1272 primary_node=self.pnode.uuid, 1273 nics=self.nics, disks=disks, 1274 disk_template=self.op.disk_template, 1275 disks_active=False, 1276 admin_state=constants.ADMINST_DOWN, 1277 network_port=network_port, 1278 beparams=self.op.beparams, 1279 hvparams=self.op.hvparams, 1280 hypervisor=self.op.hypervisor, 1281 osparams=self.op.osparams, 1282 ) 1283 1284 if self.op.tags: 1285 for tag in self.op.tags: 1286 iobj.AddTag(tag) 1287 1288 if self.adopt_disks: 1289 if self.op.disk_template == constants.DT_PLAIN: 1290 # rename LVs to the newly-generated names; we need to construct 1291 # 'fake' LV disks with the old data, plus the new unique_id 1292 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks] 1293 rename_to = [] 1294 for t_dsk, a_dsk in zip(tmp_disks, self.disks): 1295 rename_to.append(t_dsk.logical_id) 1296 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT]) 1297 self.cfg.SetDiskID(t_dsk, self.pnode.uuid) 1298 result = self.rpc.call_blockdev_rename(self.pnode.uuid, 1299 zip(tmp_disks, rename_to)) 1300 result.Raise("Failed to rename adoped LVs") 1301 else: 1302 feedback_fn("* creating instance disks...") 1303 try: 1304 CreateDisks(self, iobj) 1305 except errors.OpExecError: 1306 self.LogWarning("Device creation failed") 1307 self.cfg.ReleaseDRBDMinors(self.op.instance_name) 1308 raise 1309 1310 feedback_fn("adding instance %s to cluster config" % self.op.instance_name) 1311 1312 self.cfg.AddInstance(iobj, self.proc.GetECId()) 1313 1314 # Declare that we don't want to remove the instance lock anymore, as we've 1315 # added the instance to the config 1316 del self.remove_locks[locking.LEVEL_INSTANCE] 1317 1318 if self.op.mode == constants.INSTANCE_IMPORT: 1319 # Release unused nodes 1320 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid]) 1321 else: 1322 # Release all nodes 1323 ReleaseLocks(self, locking.LEVEL_NODE) 1324 1325 disk_abort = False 1326 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks: 1327 feedback_fn("* wiping instance disks...") 1328 try: 1329 WipeDisks(self, iobj) 1330 except errors.OpExecError, err: 1331 logging.exception("Wiping disks failed") 1332 self.LogWarning("Wiping instance disks failed (%s)", err) 1333 disk_abort = True 1334 1335 if disk_abort: 1336 # Something is already wrong with the disks, don't do anything else 1337 pass 1338 elif self.op.wait_for_sync: 1339 disk_abort = not WaitForSync(self, iobj) 1340 elif iobj.disk_template in constants.DTS_INT_MIRROR: 1341 # make sure the disks are not degraded (still sync-ing is ok) 1342 feedback_fn("* checking mirrors status") 1343 disk_abort = not WaitForSync(self, iobj, oneshot=True) 1344 else: 1345 disk_abort = False 1346 1347 if disk_abort: 1348 RemoveDisks(self, iobj) 1349 self.cfg.RemoveInstance(iobj.uuid) 1350 # Make sure the instance lock gets removed 1351 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name 1352 raise errors.OpExecError("There are some degraded disks for" 1353 " this instance") 1354 1355 # instance disks are now active 1356 iobj.disks_active = True 1357 1358 # Release all node resource locks 1359 ReleaseLocks(self, locking.LEVEL_NODE_RES) 1360 1361 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks: 1362 # we need to set the disks ID to the primary node, since the 1363 # preceding code might or might have not done it, depending on 1364 # disk template and other options 1365 for disk in iobj.disks: 1366 self.cfg.SetDiskID(disk, self.pnode.uuid) 1367 if self.op.mode == constants.INSTANCE_CREATE: 1368 if not self.op.no_install: 1369 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and 1370 not self.op.wait_for_sync) 1371 if pause_sync: 1372 feedback_fn("* pausing disk sync to install instance OS") 1373 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid, 1374 (iobj.disks, 1375 iobj), True) 1376 for idx, success in enumerate(result.payload): 1377 if not success: 1378 logging.warn("pause-sync of instance %s for disk %d failed", 1379 self.op.instance_name, idx) 1380 1381 feedback_fn("* running the instance OS create scripts...") 1382 # FIXME: pass debug option from opcode to backend 1383 os_add_result = \ 1384 self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False, 1385 self.op.debug_level) 1386 if pause_sync: 1387 feedback_fn("* resuming disk sync") 1388 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid, 1389 (iobj.disks, 1390 iobj), False) 1391 for idx, success in enumerate(result.payload): 1392 if not success: 1393 logging.warn("resume-sync of instance %s for disk %d failed", 1394 self.op.instance_name, idx) 1395 1396 os_add_result.Raise("Could not add os for instance %s" 1397 " on node %s" % (self.op.instance_name, 1398 self.pnode.name)) 1399 1400 else: 1401 if self.op.mode == constants.INSTANCE_IMPORT: 1402 feedback_fn("* running the instance OS import scripts...") 1403 1404 transfers = [] 1405 1406 for idx, image in enumerate(self.src_images): 1407 if not image: 1408 continue 1409 1410 # FIXME: pass debug option from opcode to backend 1411 dt = masterd.instance.DiskTransfer("disk/%s" % idx, 1412 constants.IEIO_FILE, (image, ), 1413 constants.IEIO_SCRIPT, 1414 (iobj.disks[idx], idx), 1415 None) 1416 transfers.append(dt) 1417 1418 import_result = \ 1419 masterd.instance.TransferInstanceData(self, feedback_fn, 1420 self.op.src_node_uuid, 1421 self.pnode.uuid, 1422 self.pnode.secondary_ip, 1423 iobj, transfers) 1424 if not compat.all(import_result): 1425 self.LogWarning("Some disks for instance %s on node %s were not" 1426 " imported successfully" % (self.op.instance_name, 1427 self.pnode.name)) 1428 1429 rename_from = self._old_instance_name 1430 1431 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT: 1432 feedback_fn("* preparing remote import...") 1433 # The source cluster will stop the instance before attempting to make 1434 # a connection. In some cases stopping an instance can take a long 1435 # time, hence the shutdown timeout is added to the connection 1436 # timeout. 1437 connect_timeout = (constants.RIE_CONNECT_TIMEOUT + 1438 self.op.source_shutdown_timeout) 1439 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout) 1440 1441 assert iobj.primary_node == self.pnode.uuid 1442 disk_results = \ 1443 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode, 1444 self.source_x509_ca, 1445 self._cds, timeouts) 1446 if not compat.all(disk_results): 1447 # TODO: Should the instance still be started, even if some disks 1448 # failed to import (valid for local imports, too)? 1449 self.LogWarning("Some disks for instance %s on node %s were not" 1450 " imported successfully" % (self.op.instance_name, 1451 self.pnode.name)) 1452 1453 rename_from = self.source_instance_name 1454 1455 else: 1456 # also checked in the prereq part 1457 raise errors.ProgrammerError("Unknown OS initialization mode '%s'" 1458 % self.op.mode) 1459 1460 # Run rename script on newly imported instance 1461 assert iobj.name == self.op.instance_name 1462 feedback_fn("Running rename script for %s" % self.op.instance_name) 1463 result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj, 1464 rename_from, 1465 self.op.debug_level) 1466 result.Warn("Failed to run rename script for %s on node %s" % 1467 (self.op.instance_name, self.pnode.name), self.LogWarning) 1468 1469 assert not self.owned_locks(locking.LEVEL_NODE_RES) 1470 1471 if self.op.start: 1472 iobj.admin_state = constants.ADMINST_UP 1473 self.cfg.Update(iobj, feedback_fn) 1474 logging.info("Starting instance %s on node %s", self.op.instance_name, 1475 self.pnode.name) 1476 feedback_fn("* starting instance...") 1477 result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None), 1478 False, self.op.reason) 1479 result.Raise("Could not start instance") 1480 1481 return self.cfg.GetNodeNames(list(iobj.all_nodes))
1482
1483 1484 -class LUInstanceRename(LogicalUnit):
1485 """Rename an instance. 1486 1487 """ 1488 HPATH = "instance-rename" 1489 HTYPE = constants.HTYPE_INSTANCE 1490
1491 - def CheckArguments(self):
1492 """Check arguments. 1493 1494 """ 1495 if self.op.ip_check and not self.op.name_check: 1496 # TODO: make the ip check more flexible and not depend on the name check 1497 raise errors.OpPrereqError("IP address check requires a name check", 1498 errors.ECODE_INVAL)
1499
1500 - def BuildHooksEnv(self):
1501 """Build hooks env. 1502 1503 This runs on master, primary and secondary nodes of the instance. 1504 1505 """ 1506 env = BuildInstanceHookEnvByObject(self, self.instance) 1507 env["INSTANCE_NEW_NAME"] = self.op.new_name 1508 return env
1509
1510 - def BuildHooksNodes(self):
1511 """Build hooks nodes. 1512 1513 """ 1514 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) 1515 return (nl, nl)
1516
1517 - def CheckPrereq(self):
1518 """Check prerequisites. 1519 1520 This checks that the instance is in the cluster and is not running. 1521 1522 """ 1523 (self.op.instance_uuid, self.op.instance_name) = \ 1524 ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid, 1525 self.op.instance_name) 1526 instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 1527 assert instance is not None 1528 1529 # It should actually not happen that an instance is running with a disabled 1530 # disk template, but in case it does, the renaming of file-based instances 1531 # will fail horribly. Thus, we test it before. 1532 if (instance.disk_template in constants.DTS_FILEBASED and 1533 self.op.new_name != instance.name): 1534 CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(), 1535 instance.disk_template) 1536 1537 CheckNodeOnline(self, instance.primary_node) 1538 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING, 1539 msg="cannot rename") 1540 self.instance = instance 1541 1542 new_name = self.op.new_name 1543 if self.op.name_check: 1544 hostname = _CheckHostnameSane(self, new_name) 1545 new_name = self.op.new_name = hostname.name 1546 if (self.op.ip_check and 1547 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)): 1548 raise errors.OpPrereqError("IP %s of instance %s already in use" % 1549 (hostname.ip, new_name), 1550 errors.ECODE_NOTUNIQUE) 1551 1552 instance_names = [inst.name for 1553 inst in self.cfg.GetAllInstancesInfo().values()] 1554 if new_name in instance_names and new_name != instance.name: 1555 raise errors.OpPrereqError("Instance '%s' is already in the cluster" % 1556 new_name, errors.ECODE_EXISTS)
1557
1558 - def Exec(self, feedback_fn):
1559 """Rename the instance. 1560 1561 """ 1562 old_name = self.instance.name 1563 1564 rename_file_storage = False 1565 if (self.instance.disk_template in constants.DTS_FILEBASED and 1566 self.op.new_name != self.instance.name): 1567 old_file_storage_dir = os.path.dirname( 1568 self.instance.disks[0].logical_id[1]) 1569 rename_file_storage = True 1570 1571 self.cfg.RenameInstance(self.instance.uuid, self.op.new_name) 1572 # Change the instance lock. This is definitely safe while we hold the BGL. 1573 # Otherwise the new lock would have to be added in acquired mode. 1574 assert self.REQ_BGL 1575 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER) 1576 self.glm.remove(locking.LEVEL_INSTANCE, old_name) 1577 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name) 1578 1579 # re-read the instance from the configuration after rename 1580 renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid) 1581 1582 if rename_file_storage: 1583 new_file_storage_dir = os.path.dirname( 1584 renamed_inst.disks[0].logical_id[1]) 1585 result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node, 1586 old_file_storage_dir, 1587 new_file_storage_dir) 1588 result.Raise("Could not rename on node %s directory '%s' to '%s'" 1589 " (but the instance has been renamed in Ganeti)" % 1590 (self.cfg.GetNodeName(renamed_inst.primary_node), 1591 old_file_storage_dir, new_file_storage_dir)) 1592 1593 StartInstanceDisks(self, renamed_inst, None) 1594 # update info on disks 1595 info = GetInstanceInfoText(renamed_inst) 1596 for (idx, disk) in enumerate(renamed_inst.disks): 1597 for node_uuid in renamed_inst.all_nodes: 1598 self.cfg.SetDiskID(disk, node_uuid) 1599 result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info) 1600 result.Warn("Error setting info on node %s for disk %s" % 1601 (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning) 1602 try: 1603 result = self.rpc.call_instance_run_rename(renamed_inst.primary_node, 1604 renamed_inst, old_name, 1605 self.op.debug_level) 1606 result.Warn("Could not run OS rename script for instance %s on node %s" 1607 " (but the instance has been renamed in Ganeti)" % 1608 (renamed_inst.name, 1609 self.cfg.GetNodeName(renamed_inst.primary_node)), 1610 self.LogWarning) 1611 finally: 1612 ShutdownInstanceDisks(self, renamed_inst) 1613 1614 return renamed_inst.name
1615
1616 1617 -class LUInstanceRemove(LogicalUnit):
1618 """Remove an instance. 1619 1620 """ 1621 HPATH = "instance-remove" 1622 HTYPE = constants.HTYPE_INSTANCE 1623 REQ_BGL = False 1624
1625 - def ExpandNames(self):
1626 self._ExpandAndLockInstance() 1627 self.needed_locks[locking.LEVEL_NODE] = [] 1628 self.needed_locks[locking.LEVEL_NODE_RES] = [] 1629 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1630
1631 - def DeclareLocks(self, level):
1632 if level == locking.LEVEL_NODE: 1633 self._LockInstancesNodes() 1634 elif level == locking.LEVEL_NODE_RES: 1635 # Copy node locks 1636 self.needed_locks[locking.LEVEL_NODE_RES] = \ 1637 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1638
1639 - def BuildHooksEnv(self):
1640 """Build hooks env. 1641 1642 This runs on master, primary and secondary nodes of the instance. 1643 1644 """ 1645 env = BuildInstanceHookEnvByObject(self, self.instance) 1646 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout 1647 return env
1648
1649 - def BuildHooksNodes(self):
1650 """Build hooks nodes. 1651 1652 """ 1653 nl = [self.cfg.GetMasterNode()] 1654 nl_post = list(self.instance.all_nodes) + nl 1655 return (nl, nl_post)
1656
1657 - def CheckPrereq(self):
1658 """Check prerequisites. 1659 1660 This checks that the instance is in the cluster. 1661 1662 """ 1663 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 1664 assert self.instance is not None, \ 1665 "Cannot retrieve locked instance %s" % self.op.instance_name
1666
1667 - def Exec(self, feedback_fn):
1668 """Remove the instance. 1669 1670 """ 1671 logging.info("Shutting down instance %s on node %s", self.instance.name, 1672 self.cfg.GetNodeName(self.instance.primary_node)) 1673 1674 result = self.rpc.call_instance_shutdown(self.instance.primary_node, 1675 self.instance, 1676 self.op.shutdown_timeout, 1677 self.op.reason) 1678 if self.op.ignore_failures: 1679 result.Warn("Warning: can't shutdown instance", feedback_fn) 1680 else: 1681 result.Raise("Could not shutdown instance %s on node %s" % 1682 (self.instance.name, 1683 self.cfg.GetNodeName(self.instance.primary_node))) 1684 1685 assert (self.owned_locks(locking.LEVEL_NODE) == 1686 self.owned_locks(locking.LEVEL_NODE_RES)) 1687 assert not (set(self.instance.all_nodes) - 1688 self.owned_locks(locking.LEVEL_NODE)), \ 1689 "Not owning correct locks" 1690 1691 RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1692
1693 1694 -class LUInstanceMove(LogicalUnit):
1695 """Move an instance by data-copying. 1696 1697 """ 1698 HPATH = "instance-move" 1699 HTYPE = constants.HTYPE_INSTANCE 1700 REQ_BGL = False 1701
1702 - def ExpandNames(self):
1703 self._ExpandAndLockInstance() 1704 (self.op.target_node_uuid, self.op.target_node) = \ 1705 ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid, 1706 self.op.target_node) 1707 self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid] 1708 self.needed_locks[locking.LEVEL_NODE_RES] = [] 1709 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1710
1711 - def DeclareLocks(self, level):
1712 if level == locking.LEVEL_NODE: 1713 self._LockInstancesNodes(primary_only=True) 1714 elif level == locking.LEVEL_NODE_RES: 1715 # Copy node locks 1716 self.needed_locks[locking.LEVEL_NODE_RES] = \ 1717 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1718
1719 - def BuildHooksEnv(self):
1720 """Build hooks env. 1721 1722 This runs on master, primary and secondary nodes of the instance. 1723 1724 """ 1725 env = { 1726 "TARGET_NODE": self.op.target_node, 1727 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout, 1728 } 1729 env.update(BuildInstanceHookEnvByObject(self, self.instance)) 1730 return env
1731
1732 - def BuildHooksNodes(self):
1733 """Build hooks nodes. 1734 1735 """ 1736 nl = [ 1737 self.cfg.GetMasterNode(), 1738 self.instance.primary_node, 1739 self.op.target_node_uuid, 1740 ] 1741 return (nl, nl)
1742
1743 - def CheckPrereq(self):
1744 """Check prerequisites. 1745 1746 This checks that the instance is in the cluster. 1747 1748 """ 1749 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 1750 assert self.instance is not None, \ 1751 "Cannot retrieve locked instance %s" % self.op.instance_name 1752 1753 if self.instance.disk_template not in constants.DTS_COPYABLE: 1754 raise errors.OpPrereqError("Disk template %s not suitable for copying" % 1755 self.instance.disk_template, 1756 errors.ECODE_STATE) 1757 1758 target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid) 1759 assert target_node is not None, \ 1760 "Cannot retrieve locked node %s" % self.op.target_node 1761 1762 self.target_node_uuid = target_node.uuid 1763 if target_node.uuid == self.instance.primary_node: 1764 raise errors.OpPrereqError("Instance %s is already on the node %s" % 1765 (self.instance.name, target_node.name), 1766 errors.ECODE_STATE) 1767 1768 bep = self.cfg.GetClusterInfo().FillBE(self.instance) 1769 1770 for idx, dsk in enumerate(self.instance.disks): 1771 if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE, 1772 constants.DT_SHARED_FILE): 1773 raise errors.OpPrereqError("Instance disk %d has a complex layout," 1774 " cannot copy" % idx, errors.ECODE_STATE) 1775 1776 CheckNodeOnline(self, target_node.uuid) 1777 CheckNodeNotDrained(self, target_node.uuid) 1778 CheckNodeVmCapable(self, target_node.uuid) 1779 cluster = self.cfg.GetClusterInfo() 1780 group_info = self.cfg.GetNodeGroup(target_node.group) 1781 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) 1782 CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg, 1783 ignore=self.op.ignore_ipolicy) 1784 1785 if self.instance.admin_state == constants.ADMINST_UP: 1786 # check memory requirements on the secondary node 1787 CheckNodeFreeMemory( 1788 self, target_node.uuid, "failing over instance %s" % 1789 self.instance.name, bep[constants.BE_MAXMEM], 1790 self.instance.hypervisor, 1791 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor]) 1792 else: 1793 self.LogInfo("Not checking memory on the secondary node as" 1794 " instance will not be started") 1795 1796 # check bridge existance 1797 CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1798
1799 - def Exec(self, feedback_fn):
1800 """Move an instance. 1801 1802 The move is done by shutting it down on its present node, copying 1803 the data over (slow) and starting it on the new node. 1804 1805 """ 1806 source_node = self.cfg.GetNodeInfo(self.instance.primary_node) 1807 target_node = self.cfg.GetNodeInfo(self.target_node_uuid) 1808 1809 self.LogInfo("Shutting down instance %s on source node %s", 1810 self.instance.name, source_node.name) 1811 1812 assert (self.owned_locks(locking.LEVEL_NODE) == 1813 self.owned_locks(locking.LEVEL_NODE_RES)) 1814 1815 result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance, 1816 self.op.shutdown_timeout, 1817 self.op.reason) 1818 if self.op.ignore_consistency: 1819 result.Warn("Could not shutdown instance %s on node %s. Proceeding" 1820 " anyway. Please make sure node %s is down. Error details" % 1821 (self.instance.name, source_node.name, source_node.name), 1822 self.LogWarning) 1823 else: 1824 result.Raise("Could not shutdown instance %s on node %s" % 1825 (self.instance.name, source_node.name)) 1826 1827 # create the target disks 1828 try: 1829 CreateDisks(self, self.instance, target_node_uuid=target_node.uuid) 1830 except errors.OpExecError: 1831 self.LogWarning("Device creation failed") 1832 self.cfg.ReleaseDRBDMinors(self.instance.uuid) 1833 raise 1834 1835 cluster_name = self.cfg.GetClusterInfo().cluster_name 1836 1837 errs = [] 1838 # activate, get path, copy the data over 1839 for idx, disk in enumerate(self.instance.disks): 1840 self.LogInfo("Copying data for disk %d", idx) 1841 result = self.rpc.call_blockdev_assemble( 1842 target_node.uuid, (disk, self.instance), self.instance.name, 1843 True, idx) 1844 if result.fail_msg: 1845 self.LogWarning("Can't assemble newly created disk %d: %s", 1846 idx, result.fail_msg) 1847 errs.append(result.fail_msg) 1848 break 1849 dev_path = result.payload 1850 result = self.rpc.call_blockdev_export(source_node.uuid, (disk, 1851 self.instance), 1852 target_node.secondary_ip, 1853 dev_path, cluster_name) 1854 if result.fail_msg: 1855 self.LogWarning("Can't copy data over for disk %d: %s", 1856 idx, result.fail_msg) 1857 errs.append(result.fail_msg) 1858 break 1859 1860 if errs: 1861 self.LogWarning("Some disks failed to copy, aborting") 1862 try: 1863 RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid) 1864 finally: 1865 self.cfg.ReleaseDRBDMinors(self.instance.uuid) 1866 raise errors.OpExecError("Errors during disk copy: %s" % 1867 (",".join(errs),)) 1868 1869 self.instance.primary_node = target_node.uuid 1870 self.cfg.Update(self.instance, feedback_fn) 1871 1872 self.LogInfo("Removing the disks on the original node") 1873 RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid) 1874 1875 # Only start the instance if it's marked as up 1876 if self.instance.admin_state == constants.ADMINST_UP: 1877 self.LogInfo("Starting instance %s on node %s", 1878 self.instance.name, target_node.name) 1879 1880 disks_ok, _ = AssembleInstanceDisks(self, self.instance, 1881 ignore_secondaries=True) 1882 if not disks_ok: 1883 ShutdownInstanceDisks(self, self.instance) 1884 raise errors.OpExecError("Can't activate the instance's disks") 1885 1886 result = self.rpc.call_instance_start(target_node.uuid, 1887 (self.instance, None, None), False, 1888 self.op.reason) 1889 msg = result.fail_msg 1890 if msg: 1891 ShutdownInstanceDisks(self, self.instance) 1892 raise errors.OpExecError("Could not start instance %s on node %s: %s" % 1893 (self.instance.name, target_node.name, msg))
1894
1895 1896 -class LUInstanceMultiAlloc(NoHooksLU):
1897 """Allocates multiple instances at the same time. 1898 1899 """ 1900 REQ_BGL = False 1901
1902 - def CheckArguments(self):
1903 """Check arguments. 1904 1905 """ 1906 nodes = [] 1907 for inst in self.op.instances: 1908 if inst.iallocator is not None: 1909 raise errors.OpPrereqError("iallocator are not allowed to be set on" 1910 " instance objects", errors.ECODE_INVAL) 1911 nodes.append(bool(inst.pnode)) 1912 if inst.disk_template in constants.DTS_INT_MIRROR: 1913 nodes.append(bool(inst.snode)) 1914 1915 has_nodes = compat.any(nodes) 1916 if compat.all(nodes) ^ has_nodes: 1917 raise errors.OpPrereqError("There are instance objects providing" 1918 " pnode/snode while others do not", 1919 errors.ECODE_INVAL) 1920 1921 if not has_nodes and self.op.iallocator is None: 1922 default_iallocator = self.cfg.GetDefaultIAllocator() 1923 if default_iallocator: 1924 self.op.iallocator = default_iallocator 1925 else: 1926 raise errors.OpPrereqError("No iallocator or nodes on the instances" 1927 " given and no cluster-wide default" 1928 " iallocator found; please specify either" 1929 " an iallocator or nodes on the instances" 1930 " or set a cluster-wide default iallocator", 1931 errors.ECODE_INVAL) 1932 1933 _CheckOpportunisticLocking(self.op) 1934 1935 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances]) 1936 if dups: 1937 raise errors.OpPrereqError("There are duplicate instance names: %s" % 1938 utils.CommaJoin(dups), errors.ECODE_INVAL)
1939
1940 - def ExpandNames(self):
1941 """Calculate the locks. 1942 1943 """ 1944 self.share_locks = ShareAll() 1945 self.needed_locks = { 1946 # iallocator will select nodes and even if no iallocator is used, 1947 # collisions with LUInstanceCreate should be avoided 1948 locking.LEVEL_NODE_ALLOC: locking.ALL_SET, 1949 } 1950 1951 if self.op.iallocator: 1952 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET 1953 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET 1954 1955 if self.op.opportunistic_locking: 1956 self.opportunistic_locks[locking.LEVEL_NODE] = True 1957 else: 1958 nodeslist = [] 1959 for inst in self.op.instances: 1960 (inst.pnode_uuid, inst.pnode) = \ 1961 ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode) 1962 nodeslist.append(inst.pnode_uuid) 1963 if inst.snode is not None: 1964 (inst.snode_uuid, inst.snode) = \ 1965 ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode) 1966 nodeslist.append(inst.snode_uuid) 1967 1968 self.needed_locks[locking.LEVEL_NODE] = nodeslist 1969 # Lock resources of instance's primary and secondary nodes (copy to 1970 # prevent accidential modification) 1971 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1972
1973 - def DeclareLocks(self, level):
1974 if level == locking.LEVEL_NODE_RES and \ 1975 self.opportunistic_locks[locking.LEVEL_NODE]: 1976 # Even when using opportunistic locking, we require the same set of 1977 # NODE_RES locks as we got NODE locks 1978 self.needed_locks[locking.LEVEL_NODE_RES] = \ 1979 self.owned_locks(locking.LEVEL_NODE)
1980
1981 - def CheckPrereq(self):
1982 """Check prerequisite. 1983 1984 """ 1985 if self.op.iallocator: 1986 cluster = self.cfg.GetClusterInfo() 1987 default_vg = self.cfg.GetVGName() 1988 ec_id = self.proc.GetECId() 1989 1990 if self.op.opportunistic_locking: 1991 # Only consider nodes for which a lock is held 1992 node_whitelist = self.cfg.GetNodeNames( 1993 list(self.owned_locks(locking.LEVEL_NODE))) 1994 else: 1995 node_whitelist = None 1996 1997 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg), 1998 _ComputeNics(op, cluster, None, 1999 self.cfg, ec_id), 2000 _ComputeFullBeParams(op, cluster), 2001 node_whitelist) 2002 for op in self.op.instances] 2003 2004 req = iallocator.IAReqMultiInstanceAlloc(instances=insts) 2005 ial = iallocator.IAllocator(self.cfg, self.rpc, req) 2006 2007 ial.Run(self.op.iallocator) 2008 2009 if not ial.success: 2010 raise errors.OpPrereqError("Can't compute nodes using" 2011 " iallocator '%s': %s" % 2012 (self.op.iallocator, ial.info), 2013 errors.ECODE_NORES) 2014 2015 self.ia_result = ial.result 2016 2017 if self.op.dry_run: 2018 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), { 2019 constants.JOB_IDS_KEY: [], 2020 })
2021
2022 - def _ConstructPartialResult(self):
2023 """Contructs the partial result. 2024 2025 """ 2026 if self.op.iallocator: 2027 (allocatable, failed_insts) = self.ia_result 2028 allocatable_insts = map(compat.fst, allocatable) 2029 else: 2030 allocatable_insts = [op.instance_name for op in self.op.instances] 2031 failed_insts = [] 2032 2033 return { 2034 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts, 2035 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts, 2036 }
2037
2038 - def Exec(self, feedback_fn):
2039 """Executes the opcode. 2040 2041 """ 2042 jobs = [] 2043 if self.op.iallocator: 2044 op2inst = dict((op.instance_name, op) for op in self.op.instances) 2045 (allocatable, failed) = self.ia_result 2046 2047 for (name, node_names) in allocatable: 2048 op = op2inst.pop(name) 2049 2050 (op.pnode_uuid, op.pnode) = \ 2051 ExpandNodeUuidAndName(self.cfg, None, node_names[0]) 2052 if len(node_names) > 1: 2053 (op.snode_uuid, op.snode) = \ 2054 ExpandNodeUuidAndName(self.cfg, None, node_names[1]) 2055 2056 jobs.append([op]) 2057 2058 missing = set(op2inst.keys()) - set(failed) 2059 assert not missing, \ 2060 "Iallocator did return incomplete result: %s" % \ 2061 utils.CommaJoin(missing) 2062 else: 2063 jobs.extend([op] for op in self.op.instances) 2064 2065 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2066
2067 2068 -class _InstNicModPrivate:
2069 """Data structure for network interface modifications. 2070 2071 Used by L{LUInstanceSetParams}. 2072 2073 """
2074 - def __init__(self):
2075 self.params = None 2076 self.filled = None
2077
2078 2079 -def _PrepareContainerMods(mods, private_fn):
2080 """Prepares a list of container modifications by adding a private data field. 2081 2082 @type mods: list of tuples; (operation, index, parameters) 2083 @param mods: List of modifications 2084 @type private_fn: callable or None 2085 @param private_fn: Callable for constructing a private data field for a 2086 modification 2087 @rtype: list 2088 2089 """ 2090 if private_fn is None: 2091 fn = lambda: None 2092 else: 2093 fn = private_fn 2094 2095 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2096
2097 2098 -def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2099 """Checks if nodes have enough physical CPUs 2100 2101 This function checks if all given nodes have the needed number of 2102 physical CPUs. In case any node has less CPUs or we cannot get the 2103 information from the node, this function raises an OpPrereqError 2104 exception. 2105 2106 @type lu: C{LogicalUnit} 2107 @param lu: a logical unit from which we get configuration data 2108 @type node_uuids: C{list} 2109 @param node_uuids: the list of node UUIDs to check 2110 @type requested: C{int} 2111 @param requested: the minimum acceptable number of physical CPUs 2112 @type hypervisor_specs: list of pairs (string, dict of strings) 2113 @param hypervisor_specs: list of hypervisor specifications in 2114 pairs (hypervisor_name, hvparams) 2115 @raise errors.OpPrereqError: if the node doesn't have enough CPUs, 2116 or we cannot check the node 2117 2118 """ 2119 nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs) 2120 for node_uuid in node_uuids: 2121 info = nodeinfo[node_uuid] 2122 node_name = lu.cfg.GetNodeName(node_uuid) 2123 info.Raise("Cannot get current information from node %s" % node_name, 2124 prereq=True, ecode=errors.ECODE_ENVIRON) 2125 (_, _, (hv_info, )) = info.payload 2126 num_cpus = hv_info.get("cpu_total", None) 2127 if not isinstance(num_cpus, int): 2128 raise errors.OpPrereqError("Can't compute the number of physical CPUs" 2129 " on node %s, result was '%s'" % 2130 (node_name, num_cpus), errors.ECODE_ENVIRON) 2131 if requested > num_cpus: 2132 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are " 2133 "required" % (node_name, num_cpus, requested), 2134 errors.ECODE_NORES)
2135
2136 2137 -def GetItemFromContainer(identifier, kind, container):
2138 """Return the item refered by the identifier. 2139 2140 @type identifier: string 2141 @param identifier: Item index or name or UUID 2142 @type kind: string 2143 @param kind: One-word item description 2144 @type container: list 2145 @param container: Container to get the item from 2146 2147 """ 2148 # Index 2149 try: 2150 idx = int(identifier) 2151 if idx == -1: 2152 # Append 2153 absidx = len(container) - 1 2154 elif idx < 0: 2155 raise IndexError("Not accepting negative indices other than -1") 2156 elif idx > len(container): 2157 raise IndexError("Got %s index %s, but there are only %s" % 2158 (kind, idx, len(container))) 2159 else: 2160 absidx = idx 2161 return (absidx, container[idx]) 2162 except ValueError: 2163 pass 2164 2165 for idx, item in enumerate(container): 2166 if item.uuid == identifier or item.name == identifier: 2167 return (idx, item) 2168 2169 raise errors.OpPrereqError("Cannot find %s with identifier %s" % 2170 (kind, identifier), errors.ECODE_NOENT)
2171
2172 2173 -def _ApplyContainerMods(kind, container, chgdesc, mods, 2174 create_fn, modify_fn, remove_fn):
2175 """Applies descriptions in C{mods} to C{container}. 2176 2177 @type kind: string 2178 @param kind: One-word item description 2179 @type container: list 2180 @param container: Container to modify 2181 @type chgdesc: None or list 2182 @param chgdesc: List of applied changes 2183 @type mods: list 2184 @param mods: Modifications as returned by L{_PrepareContainerMods} 2185 @type create_fn: callable 2186 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD}); 2187 receives absolute item index, parameters and private data object as added 2188 by L{_PrepareContainerMods}, returns tuple containing new item and changes 2189 as list 2190 @type modify_fn: callable 2191 @param modify_fn: Callback for modifying an existing item 2192 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters 2193 and private data object as added by L{_PrepareContainerMods}, returns 2194 changes as list 2195 @type remove_fn: callable 2196 @param remove_fn: Callback on removing item; receives absolute item index, 2197 item and private data object as added by L{_PrepareContainerMods} 2198 2199 """ 2200 for (op, identifier, params, private) in mods: 2201 changes = None 2202 2203 if op == constants.DDM_ADD: 2204 # Calculate where item will be added 2205 # When adding an item, identifier can only be an index 2206 try: 2207 idx = int(identifier) 2208 except ValueError: 2209 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as" 2210 " identifier for %s" % constants.DDM_ADD, 2211 errors.ECODE_INVAL) 2212 if idx == -1: 2213 addidx = len(container) 2214 else: 2215 if idx < 0: 2216 raise IndexError("Not accepting negative indices other than -1") 2217 elif idx > len(container): 2218 raise IndexError("Got %s index %s, but there are only %s" % 2219 (kind, idx, len(container))) 2220 addidx = idx 2221 2222 if create_fn is None: 2223 item = params 2224 else: 2225 (item, changes) = create_fn(addidx, params, private) 2226 2227 if idx == -1: 2228 container.append(item) 2229 else: 2230 assert idx >= 0 2231 assert idx <= len(container) 2232 # list.insert does so before the specified index 2233 container.insert(idx, item) 2234 else: 2235 # Retrieve existing item 2236 (absidx, item) = GetItemFromContainer(identifier, kind, container) 2237 2238 if op == constants.DDM_REMOVE: 2239 assert not params 2240 2241 if remove_fn is not None: 2242 remove_fn(absidx, item, private) 2243 2244 changes = [("%s/%s" % (kind, absidx), "remove")] 2245 2246 assert container[absidx] == item 2247 del container[absidx] 2248 elif op == constants.DDM_MODIFY: 2249 if modify_fn is not None: 2250 changes = modify_fn(absidx, item, params, private) 2251 else: 2252 raise errors.ProgrammerError("Unhandled operation '%s'" % op) 2253 2254 assert _TApplyContModsCbChanges(changes) 2255 2256 if not (chgdesc is None or changes is None): 2257 chgdesc.extend(changes)
2258
2259 2260 -def _UpdateIvNames(base_index, disks):
2261 """Updates the C{iv_name} attribute of disks. 2262 2263 @type disks: list of L{objects.Disk} 2264 2265 """ 2266 for (idx, disk) in enumerate(disks): 2267 disk.iv_name = "disk/%s" % (base_index + idx, )
2268
2269 2270 -class LUInstanceSetParams(LogicalUnit):
2271 """Modifies an instances's parameters. 2272 2273 """ 2274 HPATH = "instance-modify" 2275 HTYPE = constants.HTYPE_INSTANCE 2276 REQ_BGL = False 2277 2278 @staticmethod
2279 - def _UpgradeDiskNicMods(kind, mods, verify_fn):
2280 assert ht.TList(mods) 2281 assert not mods or len(mods[0]) in (2, 3) 2282 2283 if mods and len(mods[0]) == 2: 2284 result = [] 2285 2286 addremove = 0 2287 for op, params in mods: 2288 if op in (constants.DDM_ADD, constants.DDM_REMOVE): 2289 result.append((op, -1, params)) 2290 addremove += 1 2291 2292 if addremove > 1: 2293 raise errors.OpPrereqError("Only one %s add or remove operation is" 2294 " supported at a time" % kind, 2295 errors.ECODE_INVAL) 2296 else: 2297 result.append((constants.DDM_MODIFY, op, params)) 2298 2299 assert verify_fn(result) 2300 else: 2301 result = mods 2302 2303 return result
2304 2305 @staticmethod
2306 - def _CheckMods(kind, mods, key_types, item_fn):
2307 """Ensures requested disk/NIC modifications are valid. 2308 2309 """ 2310 for (op, _, params) in mods: 2311 assert ht.TDict(params) 2312 2313 # If 'key_types' is an empty dict, we assume we have an 2314 # 'ext' template and thus do not ForceDictType 2315 if key_types: 2316 utils.ForceDictType(params, key_types) 2317 2318 if op == constants.DDM_REMOVE: 2319 if params: 2320 raise errors.OpPrereqError("No settings should be passed when" 2321 " removing a %s" % kind, 2322 errors.ECODE_INVAL) 2323 elif op in (constants.DDM_ADD, constants.DDM_MODIFY): 2324 item_fn(op, params) 2325 else: 2326 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2327
2328 - def _VerifyDiskModification(self, op, params, excl_stor):
2329 """Verifies a disk modification. 2330 2331 """ 2332 if op == constants.DDM_ADD: 2333 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR) 2334 if mode not in constants.DISK_ACCESS_SET: 2335 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode, 2336 errors.ECODE_INVAL) 2337 2338 size = params.get(constants.IDISK_SIZE, None) 2339 if size is None: 2340 raise errors.OpPrereqError("Required disk parameter '%s' missing" % 2341 constants.IDISK_SIZE, errors.ECODE_INVAL) 2342 2343 try: 2344 size = int(size) 2345 except (TypeError, ValueError), err: 2346 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err, 2347 errors.ECODE_INVAL) 2348 2349 params[constants.IDISK_SIZE] = size 2350 name = params.get(constants.IDISK_NAME, None) 2351 if name is not None and name.lower() == constants.VALUE_NONE: 2352 params[constants.IDISK_NAME] = None 2353 2354 CheckSpindlesExclusiveStorage(params, excl_stor, True) 2355 2356 elif op == constants.DDM_MODIFY: 2357 if constants.IDISK_SIZE in params: 2358 raise errors.OpPrereqError("Disk size change not possible, use" 2359 " grow-disk", errors.ECODE_INVAL) 2360 2361 # Disk modification supports changing only the disk name and mode. 2362 # Changing arbitrary parameters is allowed only for ext disk template", 2363 if self.instance.disk_template != constants.DT_EXT: 2364 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES) 2365 2366 name = params.get(constants.IDISK_NAME, None) 2367 if name is not None and name.lower() == constants.VALUE_NONE: 2368 params[constants.IDISK_NAME] = None
2369 2370 @staticmethod
2371 - def _VerifyNicModification(op, params):
2372 """Verifies a network interface modification. 2373 2374 """ 2375 if op in (constants.DDM_ADD, constants.DDM_MODIFY): 2376 ip = params.get(constants.INIC_IP, None) 2377 name = params.get(constants.INIC_NAME, None) 2378 req_net = params.get(constants.INIC_NETWORK, None) 2379 link = params.get(constants.NIC_LINK, None) 2380 mode = params.get(constants.NIC_MODE, None) 2381 if name is not None and name.lower() == constants.VALUE_NONE: 2382 params[constants.INIC_NAME] = None 2383 if req_net is not None: 2384 if req_net.lower() == constants.VALUE_NONE: 2385 params[constants.INIC_NETWORK] = None 2386 req_net = None 2387 elif link is not None or mode is not None: 2388 raise errors.OpPrereqError("If network is given" 2389 " mode or link should not", 2390 errors.ECODE_INVAL) 2391 2392 if op == constants.DDM_ADD: 2393 macaddr = params.get(constants.INIC_MAC, None) 2394 if macaddr is None: 2395 params[constants.INIC_MAC] = constants.VALUE_AUTO 2396 2397 if ip is not None: 2398 if ip.lower() == constants.VALUE_NONE: 2399 params[constants.INIC_IP] = None 2400 else: 2401 if ip.lower() == constants.NIC_IP_POOL: 2402 if op == constants.DDM_ADD and req_net is None: 2403 raise errors.OpPrereqError("If ip=pool, parameter network" 2404 " cannot be none", 2405 errors.ECODE_INVAL) 2406 else: 2407 if not netutils.IPAddress.IsValid(ip): 2408 raise errors.OpPrereqError("Invalid IP address '%s'" % ip, 2409 errors.ECODE_INVAL) 2410 2411 if constants.INIC_MAC in params: 2412 macaddr = params[constants.INIC_MAC] 2413 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 2414 macaddr = utils.NormalizeAndValidateMac(macaddr) 2415 2416 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO: 2417 raise errors.OpPrereqError("'auto' is not a valid MAC address when" 2418 " modifying an existing NIC", 2419 errors.ECODE_INVAL)
2420
2421 - def CheckArguments(self):
2422 if not (self.op.nics or self.op.disks or self.op.disk_template or 2423 self.op.hvparams or self.op.beparams or self.op.os_name or 2424 self.op.osparams or self.op.offline is not None or 2425 self.op.runtime_mem or self.op.pnode): 2426 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL) 2427 2428 if self.op.hvparams: 2429 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, 2430 "hypervisor", "instance", "cluster") 2431 2432 self.op.disks = self._UpgradeDiskNicMods( 2433 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications) 2434 self.op.nics = self._UpgradeDiskNicMods( 2435 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications) 2436 2437 if self.op.disks and self.op.disk_template is not None: 2438 raise errors.OpPrereqError("Disk template conversion and other disk" 2439 " changes not supported at the same time", 2440 errors.ECODE_INVAL) 2441 2442 if (self.op.disk_template and 2443 self.op.disk_template in constants.DTS_INT_MIRROR and 2444 self.op.remote_node is None): 2445 raise errors.OpPrereqError("Changing the disk template to a mirrored" 2446 " one requires specifying a secondary node", 2447 errors.ECODE_INVAL) 2448 2449 # Check NIC modifications 2450 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES, 2451 self._VerifyNicModification) 2452 2453 if self.op.pnode: 2454 (self.op.pnode_uuid, self.op.pnode) = \ 2455 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2456
2457 - def ExpandNames(self):
2458 self._ExpandAndLockInstance() 2459 self.needed_locks[locking.LEVEL_NODEGROUP] = [] 2460 # Can't even acquire node locks in shared mode as upcoming changes in 2461 # Ganeti 2.6 will start to modify the node object on disk conversion 2462 self.needed_locks[locking.LEVEL_NODE] = [] 2463 self.needed_locks[locking.LEVEL_NODE_RES] = [] 2464 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE 2465 # Look node group to look up the ipolicy 2466 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2467
2468 - def DeclareLocks(self, level):
2469 if level == locking.LEVEL_NODEGROUP: 2470 assert not self.needed_locks[locking.LEVEL_NODEGROUP] 2471 # Acquire locks for the instance's nodegroups optimistically. Needs 2472 # to be verified in CheckPrereq 2473 self.needed_locks[locking.LEVEL_NODEGROUP] = \ 2474 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid) 2475 elif level == locking.LEVEL_NODE: 2476 self._LockInstancesNodes() 2477 if self.op.disk_template and self.op.remote_node: 2478 (self.op.remote_node_uuid, self.op.remote_node) = \ 2479 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid, 2480 self.op.remote_node) 2481 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid) 2482 elif level == locking.LEVEL_NODE_RES and self.op.disk_template: 2483 # Copy node locks 2484 self.needed_locks[locking.LEVEL_NODE_RES] = \ 2485 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2486
2487 - def BuildHooksEnv(self):
2488 """Build hooks env. 2489 2490 This runs on the master, primary and secondaries. 2491 2492 """ 2493 args = {} 2494 if constants.BE_MINMEM in self.be_new: 2495 args["minmem"] = self.be_new[constants.BE_MINMEM] 2496 if constants.BE_MAXMEM in self.be_new: 2497 args["maxmem"] = self.be_new[constants.BE_MAXMEM] 2498 if constants.BE_VCPUS in self.be_new: 2499 args["vcpus"] = self.be_new[constants.BE_VCPUS] 2500 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk 2501 # information at all. 2502 2503 if self._new_nics is not None: 2504 nics = [] 2505 2506 for nic in self._new_nics: 2507 n = copy.deepcopy(nic) 2508 nicparams = self.cluster.SimpleFillNIC(n.nicparams) 2509 n.nicparams = nicparams 2510 nics.append(NICToTuple(self, n)) 2511 2512 args["nics"] = nics 2513 2514 env = BuildInstanceHookEnvByObject(self, self.instance, override=args) 2515 if self.op.disk_template: 2516 env["NEW_DISK_TEMPLATE"] = self.op.disk_template 2517 if self.op.runtime_mem: 2518 env["RUNTIME_MEMORY"] = self.op.runtime_mem 2519 2520 return env
2521
2522 - def BuildHooksNodes(self):
2523 """Build hooks nodes. 2524 2525 """ 2526 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) 2527 return (nl, nl)
2528
2529 - def _PrepareNicModification(self, params, private, old_ip, old_net_uuid, 2530 old_params, cluster, pnode_uuid):
2531 2532 update_params_dict = dict([(key, params[key]) 2533 for key in constants.NICS_PARAMETERS 2534 if key in params]) 2535 2536 req_link = update_params_dict.get(constants.NIC_LINK, None) 2537 req_mode = update_params_dict.get(constants.NIC_MODE, None) 2538 2539 new_net_uuid = None 2540 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid) 2541 if new_net_uuid_or_name: 2542 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name) 2543 new_net_obj = self.cfg.GetNetwork(new_net_uuid) 2544 2545 if old_net_uuid: 2546 old_net_obj = self.cfg.GetNetwork(old_net_uuid) 2547 2548 if new_net_uuid: 2549 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid) 2550 if not netparams: 2551 raise errors.OpPrereqError("No netparams found for the network" 2552 " %s, probably not connected" % 2553 new_net_obj.name, errors.ECODE_INVAL) 2554 new_params = dict(netparams) 2555 else: 2556 new_params = GetUpdatedParams(old_params, update_params_dict) 2557 2558 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES) 2559 2560 new_filled_params = cluster.SimpleFillNIC(new_params) 2561 objects.NIC.CheckParameterSyntax(new_filled_params) 2562 2563 new_mode = new_filled_params[constants.NIC_MODE] 2564 if new_mode == constants.NIC_MODE_BRIDGED: 2565 bridge = new_filled_params[constants.NIC_LINK] 2566 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg 2567 if msg: 2568 msg = "Error checking bridges on node '%s': %s" % \ 2569 (self.cfg.GetNodeName(pnode_uuid), msg) 2570 if self.op.force: 2571 self.warn.append(msg) 2572 else: 2573 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON) 2574 2575 elif new_mode == constants.NIC_MODE_ROUTED: 2576 ip = params.get(constants.INIC_IP, old_ip) 2577 if ip is None: 2578 raise errors.OpPrereqError("Cannot set the NIC IP address to None" 2579 " on a routed NIC", errors.ECODE_INVAL) 2580 2581 elif new_mode == constants.NIC_MODE_OVS: 2582 # TODO: check OVS link 2583 self.LogInfo("OVS links are currently not checked for correctness") 2584 2585 if constants.INIC_MAC in params: 2586 mac = params[constants.INIC_MAC] 2587 if mac is None: 2588 raise errors.OpPrereqError("Cannot unset the NIC MAC address", 2589 errors.ECODE_INVAL) 2590 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 2591 # otherwise generate the MAC address 2592 params[constants.INIC_MAC] = \ 2593 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId()) 2594 else: 2595 # or validate/reserve the current one 2596 try: 2597 self.cfg.ReserveMAC(mac, self.proc.GetECId()) 2598 except errors.ReservationError: 2599 raise errors.OpPrereqError("MAC address '%s' already in use" 2600 " in cluster" % mac, 2601 errors.ECODE_NOTUNIQUE) 2602 elif new_net_uuid != old_net_uuid: 2603 2604 def get_net_prefix(net_uuid): 2605 mac_prefix = None 2606 if net_uuid: 2607 nobj = self.cfg.GetNetwork(net_uuid) 2608 mac_prefix = nobj.mac_prefix 2609 2610 return mac_prefix
2611 2612 new_prefix = get_net_prefix(new_net_uuid) 2613 old_prefix = get_net_prefix(old_net_uuid) 2614 if old_prefix != new_prefix: 2615 params[constants.INIC_MAC] = \ 2616 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId()) 2617 2618 # if there is a change in (ip, network) tuple 2619 new_ip = params.get(constants.INIC_IP, old_ip) 2620 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid): 2621 if new_ip: 2622 # if IP is pool then require a network and generate one IP 2623 if new_ip.lower() == constants.NIC_IP_POOL: 2624 if new_net_uuid: 2625 try: 2626 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId()) 2627 except errors.ReservationError: 2628 raise errors.OpPrereqError("Unable to get a free IP" 2629 " from the address pool", 2630 errors.ECODE_STATE) 2631 self.LogInfo("Chose IP %s from network %s", 2632 new_ip, 2633 new_net_obj.name) 2634 params[constants.INIC_IP] = new_ip 2635 else: 2636 raise errors.OpPrereqError("ip=pool, but no network found", 2637 errors.ECODE_INVAL) 2638 # Reserve new IP if in the new network if any 2639 elif new_net_uuid: 2640 try: 2641 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId()) 2642 self.LogInfo("Reserving IP %s in network %s", 2643 new_ip, new_net_obj.name) 2644 except errors.ReservationError: 2645 raise errors.OpPrereqError("IP %s not available in network %s" % 2646 (new_ip, new_net_obj.name), 2647 errors.ECODE_NOTUNIQUE) 2648 # new network is None so check if new IP is a conflicting IP 2649 elif self.op.conflicts_check: 2650 _CheckForConflictingIp(self, new_ip, pnode_uuid) 2651 2652 # release old IP if old network is not None 2653 if old_ip and old_net_uuid: 2654 try: 2655 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId()) 2656 except errors.AddressPoolError: 2657 logging.warning("Release IP %s not contained in network %s", 2658 old_ip, old_net_obj.name) 2659 2660 # there are no changes in (ip, network) tuple and old network is not None 2661 elif (old_net_uuid is not None and 2662 (req_link is not None or req_mode is not None)): 2663 raise errors.OpPrereqError("Not allowed to change link or mode of" 2664 " a NIC that is connected to a network", 2665 errors.ECODE_INVAL) 2666 2667 private.params = new_params 2668 private.filled = new_filled_params
2669
2670 - def _PreCheckDiskTemplate(self, pnode_info):
2671 """CheckPrereq checks related to a new disk template.""" 2672 # Arguments are passed to avoid configuration lookups 2673 pnode_uuid = self.instance.primary_node 2674 if self.instance.disk_template == self.op.disk_template: 2675 raise errors.OpPrereqError("Instance already has disk template %s" % 2676 self.instance.disk_template, 2677 errors.ECODE_INVAL) 2678 2679 if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template): 2680 raise errors.OpPrereqError("Disk template '%s' is not enabled for this" 2681 " cluster." % self.op.disk_template) 2682 2683 if (self.instance.disk_template, 2684 self.op.disk_template) not in self._DISK_CONVERSIONS: 2685 raise errors.OpPrereqError("Unsupported disk template conversion from" 2686 " %s to %s" % (self.instance.disk_template, 2687 self.op.disk_template), 2688 errors.ECODE_INVAL) 2689 CheckInstanceState(self, self.instance, INSTANCE_DOWN, 2690 msg="cannot change disk template") 2691 if self.op.disk_template in constants.DTS_INT_MIRROR: 2692 if self.op.remote_node_uuid == pnode_uuid: 2693 raise errors.OpPrereqError("Given new secondary node %s is the same" 2694 " as the primary node of the instance" % 2695 self.op.remote_node, errors.ECODE_STATE) 2696 CheckNodeOnline(self, self.op.remote_node_uuid) 2697 CheckNodeNotDrained(self, self.op.remote_node_uuid) 2698 # FIXME: here we assume that the old instance type is DT_PLAIN 2699 assert self.instance.disk_template == constants.DT_PLAIN 2700 disks = [{constants.IDISK_SIZE: d.size, 2701 constants.IDISK_VG: d.logical_id[0]} 2702 for d in self.instance.disks] 2703 required = ComputeDiskSizePerVG(self.op.disk_template, disks) 2704 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required) 2705 2706 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid) 2707 snode_group = self.cfg.GetNodeGroup(snode_info.group) 2708 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster, 2709 snode_group) 2710 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg, 2711 ignore=self.op.ignore_ipolicy) 2712 if pnode_info.group != snode_info.group: 2713 self.LogWarning("The primary and secondary nodes are in two" 2714 " different node groups; the disk parameters" 2715 " from the first disk's node group will be" 2716 " used") 2717 2718 if not self.op.disk_template in constants.DTS_EXCL_STORAGE: 2719 # Make sure none of the nodes require exclusive storage 2720 nodes = [pnode_info] 2721 if self.op.disk_template in constants.DTS_INT_MIRROR: 2722 assert snode_info 2723 nodes.append(snode_info) 2724 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n) 2725 if compat.any(map(has_es, nodes)): 2726 errmsg = ("Cannot convert disk template from %s to %s when exclusive" 2727 " storage is enabled" % (self.instance.disk_template, 2728 self.op.disk_template)) 2729 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2730
2731 - def _PreCheckDisks(self, ispec):
2732 """CheckPrereq checks related to disk changes. 2733 2734 @type ispec: dict 2735 @param ispec: instance specs to be updated with the new disks 2736 2737 """ 2738 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance) 2739 2740 excl_stor = compat.any( 2741 rpc.GetExclusiveStorageForNodes(self.cfg, 2742 self.instance.all_nodes).values() 2743 ) 2744 2745 # Check disk modifications. This is done here and not in CheckArguments 2746 # (as with NICs), because we need to know the instance's disk template 2747 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor) 2748 if self.instance.disk_template == constants.DT_EXT: 2749 self._CheckMods("disk", self.op.disks, {}, ver_fn) 2750 else: 2751 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES, 2752 ver_fn) 2753 2754 self.diskmod = _PrepareContainerMods(self.op.disks, None) 2755 2756 # Check the validity of the `provider' parameter 2757 if self.instance.disk_template in constants.DT_EXT: 2758 for mod in self.diskmod: 2759 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None) 2760 if mod[0] == constants.DDM_ADD: 2761 if ext_provider is None: 2762 raise errors.OpPrereqError("Instance template is '%s' and parameter" 2763 " '%s' missing, during disk add" % 2764 (constants.DT_EXT, 2765 constants.IDISK_PROVIDER), 2766 errors.ECODE_NOENT) 2767 elif mod[0] == constants.DDM_MODIFY: 2768 if ext_provider: 2769 raise errors.OpPrereqError("Parameter '%s' is invalid during disk" 2770 " modification" % 2771 constants.IDISK_PROVIDER, 2772 errors.ECODE_INVAL) 2773 else: 2774 for mod in self.diskmod: 2775 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None) 2776 if ext_provider is not None: 2777 raise errors.OpPrereqError("Parameter '%s' is only valid for" 2778 " instances of type '%s'" % 2779 (constants.IDISK_PROVIDER, 2780 constants.DT_EXT), 2781 errors.ECODE_INVAL) 2782 2783 if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS: 2784 raise errors.OpPrereqError("Disk operations not supported for" 2785 " diskless instances", errors.ECODE_INVAL) 2786 2787 def _PrepareDiskMod(_, disk, params, __): 2788 disk.name = params.get(constants.IDISK_NAME, None)
2789 2790 # Verify disk changes (operating on a copy) 2791 disks = copy.deepcopy(self.instance.disks) 2792 _ApplyContainerMods("disk", disks, None, self.diskmod, None, 2793 _PrepareDiskMod, None) 2794 utils.ValidateDeviceNames("disk", disks) 2795 if len(disks) > constants.MAX_DISKS: 2796 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add" 2797 " more" % constants.MAX_DISKS, 2798 errors.ECODE_STATE) 2799 disk_sizes = [disk.size for disk in self.instance.disks] 2800 disk_sizes.extend(params["size"] for (op, idx, params, private) in 2801 self.diskmod if op == constants.DDM_ADD) 2802 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes) 2803 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes 2804 2805 if self.op.offline is not None and self.op.offline: 2806 CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE, 2807 msg="can't change to offline") 2808
2809 - def CheckPrereq(self):
2810 """Check prerequisites. 2811 2812 This only checks the instance list against the existing names. 2813 2814 """ 2815 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE) 2816 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 2817 self.cluster = self.cfg.GetClusterInfo() 2818 2819 assert self.instance is not None, \ 2820 "Cannot retrieve locked instance %s" % self.op.instance_name 2821 2822 pnode_uuid = self.instance.primary_node 2823 2824 self.warn = [] 2825 2826 if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and 2827 not self.op.force): 2828 # verify that the instance is not up 2829 instance_info = self.rpc.call_instance_info( 2830 pnode_uuid, self.instance.name, self.instance.hypervisor, 2831 self.instance.hvparams) 2832 if instance_info.fail_msg: 2833 self.warn.append("Can't get instance runtime information: %s" % 2834 instance_info.fail_msg) 2835 elif instance_info.payload: 2836 raise errors.OpPrereqError("Instance is still running on %s" % 2837 self.cfg.GetNodeName(pnode_uuid), 2838 errors.ECODE_STATE) 2839 2840 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE) 2841 node_uuids = list(self.instance.all_nodes) 2842 pnode_info = self.cfg.GetNodeInfo(pnode_uuid) 2843 2844 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups) 2845 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP) 2846 group_info = self.cfg.GetNodeGroup(pnode_info.group) 2847 2848 # dictionary with instance information after the modification 2849 ispec = {} 2850 2851 # Prepare NIC modifications 2852 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate) 2853 2854 # OS change 2855 if self.op.os_name and not self.op.force: 2856 CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name, 2857 self.op.force_variant) 2858 instance_os = self.op.os_name 2859 else: 2860 instance_os = self.instance.os 2861 2862 assert not (self.op.disk_template and self.op.disks), \ 2863 "Can't modify disk template and apply disk changes at the same time" 2864 2865 if self.op.disk_template: 2866 self._PreCheckDiskTemplate(pnode_info) 2867 2868 self._PreCheckDisks(ispec) 2869 2870 # hvparams processing 2871 if self.op.hvparams: 2872 hv_type = self.instance.hypervisor 2873 i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams) 2874 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES) 2875 hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict) 2876 2877 # local check 2878 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new) 2879 CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new) 2880 self.hv_proposed = self.hv_new = hv_new # the new actual values 2881 self.hv_inst = i_hvdict # the new dict (without defaults) 2882 else: 2883 self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor, 2884 self.instance.os, 2885 self.instance.hvparams) 2886 self.hv_new = self.hv_inst = {} 2887 2888 # beparams processing 2889 if self.op.beparams: 2890 i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams, 2891 use_none=True) 2892 objects.UpgradeBeParams(i_bedict) 2893 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES) 2894 be_new = self.cluster.SimpleFillBE(i_bedict) 2895 self.be_proposed = self.be_new = be_new # the new actual values 2896 self.be_inst = i_bedict # the new dict (without defaults) 2897 else: 2898 self.be_new = self.be_inst = {} 2899 self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams) 2900 be_old = self.cluster.FillBE(self.instance) 2901 2902 # CPU param validation -- checking every time a parameter is 2903 # changed to cover all cases where either CPU mask or vcpus have 2904 # changed 2905 if (constants.BE_VCPUS in self.be_proposed and 2906 constants.HV_CPU_MASK in self.hv_proposed): 2907 cpu_list = \ 2908 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK]) 2909 # Verify mask is consistent with number of vCPUs. Can skip this 2910 # test if only 1 entry in the CPU mask, which means same mask 2911 # is applied to all vCPUs. 2912 if (len(cpu_list) > 1 and 2913 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]): 2914 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the" 2915 " CPU mask [%s]" % 2916 (self.be_proposed[constants.BE_VCPUS], 2917 self.hv_proposed[constants.HV_CPU_MASK]), 2918 errors.ECODE_INVAL) 2919 2920 # Only perform this test if a new CPU mask is given 2921 if constants.HV_CPU_MASK in self.hv_new: 2922 # Calculate the largest CPU number requested 2923 max_requested_cpu = max(map(max, cpu_list)) 2924 # Check that all of the instance's nodes have enough physical CPUs to 2925 # satisfy the requested CPU mask 2926 hvspecs = [(self.instance.hypervisor, 2927 self.cfg.GetClusterInfo() 2928 .hvparams[self.instance.hypervisor])] 2929 _CheckNodesPhysicalCPUs(self, self.instance.all_nodes, 2930 max_requested_cpu + 1, 2931 hvspecs) 2932 2933 # osparams processing 2934 if self.op.osparams: 2935 i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams) 2936 CheckOSParams(self, True, node_uuids, instance_os, i_osdict) 2937 self.os_inst = i_osdict # the new dict (without defaults) 2938 else: 2939 self.os_inst = {} 2940 2941 #TODO(dynmem): do the appropriate check involving MINMEM 2942 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and 2943 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]): 2944 mem_check_list = [pnode_uuid] 2945 if be_new[constants.BE_AUTO_BALANCE]: 2946 # either we changed auto_balance to yes or it was from before 2947 mem_check_list.extend(self.instance.secondary_nodes) 2948 instance_info = self.rpc.call_instance_info( 2949 pnode_uuid, self.instance.name, self.instance.hypervisor, 2950 self.instance.hvparams) 2951 hvspecs = [(self.instance.hypervisor, 2952 self.cluster.hvparams[self.instance.hypervisor])] 2953 nodeinfo = self.rpc.call_node_info(mem_check_list, None, 2954 hvspecs) 2955 pninfo = nodeinfo[pnode_uuid] 2956 msg = pninfo.fail_msg 2957 if msg: 2958 # Assume the primary node is unreachable and go ahead 2959 self.warn.append("Can't get info from primary node %s: %s" % 2960 (self.cfg.GetNodeName(pnode_uuid), msg)) 2961 else: 2962 (_, _, (pnhvinfo, )) = pninfo.payload 2963 if not isinstance(pnhvinfo.get("memory_free", None), int): 2964 self.warn.append("Node data from primary node %s doesn't contain" 2965 " free memory information" % 2966 self.cfg.GetNodeName(pnode_uuid)) 2967 elif instance_info.fail_msg: 2968 self.warn.append("Can't get instance runtime information: %s" % 2969 instance_info.fail_msg) 2970 else: 2971 if instance_info.payload: 2972 current_mem = int(instance_info.payload["memory"]) 2973 else: 2974 # Assume instance not running 2975 # (there is a slight race condition here, but it's not very 2976 # probable, and we have no other way to check) 2977 # TODO: Describe race condition 2978 current_mem = 0 2979 #TODO(dynmem): do the appropriate check involving MINMEM 2980 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem - 2981 pnhvinfo["memory_free"]) 2982 if miss_mem > 0: 2983 raise errors.OpPrereqError("This change will prevent the instance" 2984 " from starting, due to %d MB of memory" 2985 " missing on its primary node" % 2986 miss_mem, errors.ECODE_NORES) 2987 2988 if be_new[constants.BE_AUTO_BALANCE]: 2989 for node_uuid, nres in nodeinfo.items(): 2990 if node_uuid not in self.instance.secondary_nodes: 2991 continue 2992 nres.Raise("Can't get info from secondary node %s" % 2993 self.cfg.GetNodeName(node_uuid), prereq=True, 2994 ecode=errors.ECODE_STATE) 2995 (_, _, (nhvinfo, )) = nres.payload 2996 if not isinstance(nhvinfo.get("memory_free", None), int): 2997 raise errors.OpPrereqError("Secondary node %s didn't return free" 2998 " memory information" % 2999 self.cfg.GetNodeName(node_uuid), 3000 errors.ECODE_STATE) 3001 #TODO(dynmem): do the appropriate check involving MINMEM 3002 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]: 3003 raise errors.OpPrereqError("This change will prevent the instance" 3004 " from failover to its secondary node" 3005 " %s, due to not enough memory" % 3006 self.cfg.GetNodeName(node_uuid), 3007 errors.ECODE_STATE) 3008 3009 if self.op.runtime_mem: 3010 remote_info = self.rpc.call_instance_info( 3011 self.instance.primary_node, self.instance.name, 3012 self.instance.hypervisor, 3013 self.cluster.hvparams[self.instance.hypervisor]) 3014 remote_info.Raise("Error checking node %s" % 3015 self.cfg.GetNodeName(self.instance.primary_node)) 3016 if not remote_info.payload: # not running already 3017 raise errors.OpPrereqError("Instance %s is not running" % 3018 self.instance.name, errors.ECODE_STATE) 3019 3020 current_memory = remote_info.payload["memory"] 3021 if (not self.op.force and 3022 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or 3023 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])): 3024 raise errors.OpPrereqError("Instance %s must have memory between %d" 3025 " and %d MB of memory unless --force is" 3026 " given" % 3027 (self.instance.name, 3028 self.be_proposed[constants.BE_MINMEM], 3029 self.be_proposed[constants.BE_MAXMEM]), 3030 errors.ECODE_INVAL) 3031 3032 delta = self.op.runtime_mem - current_memory 3033 if delta > 0: 3034 CheckNodeFreeMemory( 3035 self, self.instance.primary_node, 3036 "ballooning memory for instance %s" % self.instance.name, delta, 3037 self.instance.hypervisor, 3038 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor]) 3039 3040 # make self.cluster visible in the functions below 3041 cluster = self.cluster 3042 3043 def _PrepareNicCreate(_, params, private): 3044 self._PrepareNicModification(params, private, None, None, 3045 {}, cluster, pnode_uuid) 3046 return (None, None)
3047 3048 def _PrepareNicMod(_, nic, params, private): 3049 self._PrepareNicModification(params, private, nic.ip, nic.network, 3050 nic.nicparams, cluster, pnode_uuid) 3051 return None 3052 3053 def _PrepareNicRemove(_, params, __): 3054 ip = params.ip 3055 net = params.network 3056 if net is not None and ip is not None: 3057 self.cfg.ReleaseIp(net, ip, self.proc.GetECId()) 3058 3059 # Verify NIC changes (operating on copy) 3060 nics = self.instance.nics[:] 3061 _ApplyContainerMods("NIC", nics, None, self.nicmod, 3062 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove) 3063 if len(nics) > constants.MAX_NICS: 3064 raise errors.OpPrereqError("Instance has too many network interfaces" 3065 " (%d), cannot add more" % constants.MAX_NICS, 3066 errors.ECODE_STATE) 3067 3068 # Pre-compute NIC changes (necessary to use result in hooks) 3069 self._nic_chgdesc = [] 3070 if self.nicmod: 3071 # Operate on copies as this is still in prereq 3072 nics = [nic.Copy() for nic in self.instance.nics] 3073 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod, 3074 self._CreateNewNic, self._ApplyNicMods, None) 3075 # Verify that NIC names are unique and valid 3076 utils.ValidateDeviceNames("NIC", nics) 3077 self._new_nics = nics 3078 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics) 3079 else: 3080 self._new_nics = None 3081 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics) 3082 3083 if not self.op.ignore_ipolicy: 3084 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster, 3085 group_info) 3086 3087 # Fill ispec with backend parameters 3088 ispec[constants.ISPEC_SPINDLE_USE] = \ 3089 self.be_new.get(constants.BE_SPINDLE_USE, None) 3090 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS, 3091 None) 3092 3093 # Copy ispec to verify parameters with min/max values separately 3094 if self.op.disk_template: 3095 new_disk_template = self.op.disk_template 3096 else: 3097 new_disk_template = self.instance.disk_template 3098 ispec_max = ispec.copy() 3099 ispec_max[constants.ISPEC_MEM_SIZE] = \ 3100 self.be_new.get(constants.BE_MAXMEM, None) 3101 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max, 3102 new_disk_template) 3103 ispec_min = ispec.copy() 3104 ispec_min[constants.ISPEC_MEM_SIZE] = \ 3105 self.be_new.get(constants.BE_MINMEM, None) 3106 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min, 3107 new_disk_template) 3108 3109 if (res_max or res_min): 3110 # FIXME: Improve error message by including information about whether 3111 # the upper or lower limit of the parameter fails the ipolicy. 3112 msg = ("Instance allocation to group %s (%s) violates policy: %s" % 3113 (group_info, group_info.name, 3114 utils.CommaJoin(set(res_max + res_min)))) 3115 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 3116
3117 - def _ConvertPlainToDrbd(self, feedback_fn):
3118 """Converts an instance from plain to drbd. 3119 3120 """ 3121 feedback_fn("Converting template to drbd") 3122 pnode_uuid = self.instance.primary_node 3123 snode_uuid = self.op.remote_node_uuid 3124 3125 assert self.instance.disk_template == constants.DT_PLAIN 3126 3127 # create a fake disk info for _GenerateDiskTemplate 3128 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode, 3129 constants.IDISK_VG: d.logical_id[0], 3130 constants.IDISK_NAME: d.name} 3131 for d in self.instance.disks] 3132 new_disks = GenerateDiskTemplate(self, self.op.disk_template, 3133 self.instance.uuid, pnode_uuid, 3134 [snode_uuid], disk_info, None, None, 0, 3135 feedback_fn, self.diskparams) 3136 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks, 3137 self.diskparams) 3138 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid) 3139 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid) 3140 info = GetInstanceInfoText(self.instance) 3141 feedback_fn("Creating additional volumes...") 3142 # first, create the missing data and meta devices 3143 for disk in anno_disks: 3144 # unfortunately this is... not too nice 3145 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1], 3146 info, True, p_excl_stor) 3147 for child in disk.children: 3148 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True, 3149 s_excl_stor) 3150 # at this stage, all new LVs have been created, we can rename the 3151 # old ones 3152 feedback_fn("Renaming original volumes...") 3153 rename_list = [(o, n.children[0].logical_id) 3154 for (o, n) in zip(self.instance.disks, new_disks)] 3155 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list) 3156 result.Raise("Failed to rename original LVs") 3157 3158 feedback_fn("Initializing DRBD devices...") 3159 # all child devices are in place, we can now create the DRBD devices 3160 try: 3161 for disk in anno_disks: 3162 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor), 3163 (snode_uuid, s_excl_stor)]: 3164 f_create = node_uuid == pnode_uuid 3165 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info, 3166 f_create, excl_stor) 3167 except errors.GenericError, e: 3168 feedback_fn("Initializing of DRBD devices failed;" 3169 " renaming back original volumes...") 3170 for disk in new_disks: 3171 self.cfg.SetDiskID(disk, pnode_uuid) 3172 rename_back_list = [(n.children[0], o.logical_id) 3173 for (n, o) in zip(new_disks, self.instance.disks)] 3174 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list) 3175 result.Raise("Failed to rename LVs back after error %s" % str(e)) 3176 raise 3177 3178 # at this point, the instance has been modified 3179 self.instance.disk_template = constants.DT_DRBD8 3180 self.instance.disks = new_disks 3181 self.cfg.Update(self.instance, feedback_fn) 3182 3183 # Release node locks while waiting for sync 3184 ReleaseLocks(self, locking.LEVEL_NODE) 3185 3186 # disks are created, waiting for sync 3187 disk_abort = not WaitForSync(self, self.instance, 3188 oneshot=not self.op.wait_for_sync) 3189 if disk_abort: 3190 raise errors.OpExecError("There are some degraded disks for" 3191 " this instance, please cleanup manually")
3192 3193 # Node resource locks will be released by caller 3194
3195 - def _ConvertDrbdToPlain(self, feedback_fn):
3196 """Converts an instance from drbd to plain. 3197 3198 """ 3199 assert len(self.instance.secondary_nodes) == 1 3200 assert self.instance.disk_template == constants.DT_DRBD8 3201 3202 pnode_uuid = self.instance.primary_node 3203 snode_uuid = self.instance.secondary_nodes[0] 3204 feedback_fn("Converting template to plain") 3205 3206 old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg) 3207 new_disks = [d.children[0] for d in self.instance.disks] 3208 3209 # copy over size, mode and name 3210 for parent, child in zip(old_disks, new_disks): 3211 child.size = parent.size 3212 child.mode = parent.mode 3213 child.name = parent.name 3214 3215 # this is a DRBD disk, return its port to the pool 3216 # NOTE: this must be done right before the call to cfg.Update! 3217 for disk in old_disks: 3218 tcp_port = disk.logical_id[2] 3219 self.cfg.AddTcpUdpPort(tcp_port) 3220 3221 # update instance structure 3222 self.instance.disks = new_disks 3223 self.instance.disk_template = constants.DT_PLAIN 3224 _UpdateIvNames(0, self.instance.disks) 3225 self.cfg.Update(self.instance, feedback_fn) 3226 3227 # Release locks in case removing disks takes a while 3228 ReleaseLocks(self, locking.LEVEL_NODE) 3229 3230 feedback_fn("Removing volumes on the secondary node...") 3231 for disk in old_disks: 3232 self.cfg.SetDiskID(disk, snode_uuid) 3233 msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg 3234 if msg: 3235 self.LogWarning("Could not remove block device %s on node %s," 3236 " continuing anyway: %s", disk.iv_name, 3237 self.cfg.GetNodeName(snode_uuid), msg) 3238 3239 feedback_fn("Removing unneeded volumes on the primary node...") 3240 for idx, disk in enumerate(old_disks): 3241 meta = disk.children[1] 3242 self.cfg.SetDiskID(meta, pnode_uuid) 3243 msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg 3244 if msg: 3245 self.LogWarning("Could not remove metadata for disk %d on node %s," 3246 " continuing anyway: %s", idx, 3247 self.cfg.GetNodeName(pnode_uuid), msg)
3248
3249 - def _CreateNewDisk(self, idx, params, _):
3250 """Creates a new disk. 3251 3252 """ 3253 # add a new disk 3254 if self.instance.disk_template in constants.DTS_FILEBASED: 3255 (file_driver, file_path) = self.instance.disks[0].logical_id 3256 file_path = os.path.dirname(file_path) 3257 else: 3258 file_driver = file_path = None 3259 3260 disk = \ 3261 GenerateDiskTemplate(self, self.instance.disk_template, 3262 self.instance.uuid, self.instance.primary_node, 3263 self.instance.secondary_nodes, [params], file_path, 3264 file_driver, idx, self.Log, self.diskparams)[0] 3265 3266 new_disks = CreateDisks(self, self.instance, disks=[disk]) 3267 3268 if self.cluster.prealloc_wipe_disks: 3269 # Wipe new disk 3270 WipeOrCleanupDisks(self, self.instance, 3271 disks=[(idx, disk, 0)], 3272 cleanup=new_disks) 3273 3274 return (disk, [ 3275 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)), 3276 ])
3277
3278 - def _ModifyDisk(self, idx, disk, params, _):
3279 """Modifies a disk. 3280 3281 """ 3282 changes = [] 3283 if constants.IDISK_MODE in params: 3284 disk.mode = params.get(constants.IDISK_MODE) 3285 changes.append(("disk.mode/%d" % idx, disk.mode)) 3286 3287 if constants.IDISK_NAME in params: 3288 disk.name = params.get(constants.IDISK_NAME) 3289 changes.append(("disk.name/%d" % idx, disk.name)) 3290 3291 # Modify arbitrary params in case instance template is ext 3292 for key, value in params.iteritems(): 3293 if (key not in constants.MODIFIABLE_IDISK_PARAMS and 3294 self.instance.disk_template == constants.DT_EXT): 3295 # stolen from GetUpdatedParams: default means reset/delete 3296 if value.lower() == constants.VALUE_DEFAULT: 3297 try: 3298 del disk.params[key] 3299 except KeyError: 3300 pass 3301 else: 3302 disk.params[key] = value 3303 changes.append(("disk.params:%s/%d" % (key, idx), value)) 3304 3305 return changes
3306
3307 - def _RemoveDisk(self, idx, root, _):
3308 """Removes a disk. 3309 3310 """ 3311 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg) 3312 for node_uuid, disk in anno_disk.ComputeNodeTree( 3313 self.instance.primary_node): 3314 self.cfg.SetDiskID(disk, node_uuid) 3315 msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg 3316 if msg: 3317 self.LogWarning("Could not remove disk/%d on node '%s': %s," 3318 " continuing anyway", idx, 3319 self.cfg.GetNodeName(node_uuid), msg) 3320 3321 # if this is a DRBD disk, return its port to the pool 3322 if root.dev_type in constants.DTS_DRBD: 3323 self.cfg.AddTcpUdpPort(root.logical_id[2])
3324
3325 - def _CreateNewNic(self, idx, params, private):
3326 """Creates data structure for a new network interface. 3327 3328 """ 3329 mac = params[constants.INIC_MAC] 3330 ip = params.get(constants.INIC_IP, None) 3331 net = params.get(constants.INIC_NETWORK, None) 3332 name = params.get(constants.INIC_NAME, None) 3333 net_uuid = self.cfg.LookupNetwork(net) 3334 #TODO: not private.filled?? can a nic have no nicparams?? 3335 nicparams = private.filled 3336 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name, 3337 nicparams=nicparams) 3338 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId()) 3339 3340 return (nobj, [ 3341 ("nic.%d" % idx, 3342 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" % 3343 (mac, ip, private.filled[constants.NIC_MODE], 3344 private.filled[constants.NIC_LINK], 3345 net)), 3346 ])
3347
3348 - def _ApplyNicMods(self, idx, nic, params, private):
3349 """Modifies a network interface. 3350 3351 """ 3352 changes = [] 3353 3354 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]: 3355 if key in params: 3356 changes.append(("nic.%s/%d" % (key, idx), params[key])) 3357 setattr(nic, key, params[key]) 3358 3359 new_net = params.get(constants.INIC_NETWORK, nic.network) 3360 new_net_uuid = self.cfg.LookupNetwork(new_net) 3361 if new_net_uuid != nic.network: 3362 changes.append(("nic.network/%d" % idx, new_net)) 3363 nic.network = new_net_uuid 3364 3365 if private.filled: 3366 nic.nicparams = private.filled 3367 3368 for (key, val) in nic.nicparams.items(): 3369 changes.append(("nic.%s/%d" % (key, idx), val)) 3370 3371 return changes
3372
3373 - def Exec(self, feedback_fn):
3374 """Modifies an instance. 3375 3376 All parameters take effect only at the next restart of the instance. 3377 3378 """ 3379 # Process here the warnings from CheckPrereq, as we don't have a 3380 # feedback_fn there. 3381 # TODO: Replace with self.LogWarning 3382 for warn in self.warn: 3383 feedback_fn("WARNING: %s" % warn) 3384 3385 assert ((self.op.disk_template is None) ^ 3386 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \ 3387 "Not owning any node resource locks" 3388 3389 result = [] 3390 3391 # New primary node 3392 if self.op.pnode_uuid: 3393 self.instance.primary_node = self.op.pnode_uuid 3394 3395 # runtime memory 3396 if self.op.runtime_mem: 3397 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node, 3398 self.instance, 3399 self.op.runtime_mem) 3400 rpcres.Raise("Cannot modify instance runtime memory") 3401 result.append(("runtime_memory", self.op.runtime_mem)) 3402 3403 # Apply disk changes 3404 _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod, 3405 self._CreateNewDisk, self._ModifyDisk, 3406 self._RemoveDisk) 3407 _UpdateIvNames(0, self.instance.disks) 3408 3409 if self.op.disk_template: 3410 if __debug__: 3411 check_nodes = set(self.instance.all_nodes) 3412 if self.op.remote_node_uuid: 3413 check_nodes.add(self.op.remote_node_uuid) 3414 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]: 3415 owned = self.owned_locks(level) 3416 assert not (check_nodes - owned), \ 3417 ("Not owning the correct locks, owning %r, expected at least %r" % 3418 (owned, check_nodes)) 3419 3420 r_shut = ShutdownInstanceDisks(self, self.instance) 3421 if not r_shut: 3422 raise errors.OpExecError("Cannot shutdown instance disks, unable to" 3423 " proceed with disk template conversion") 3424 mode = (self.instance.disk_template, self.op.disk_template) 3425 try: 3426 self._DISK_CONVERSIONS[mode](self, feedback_fn) 3427 except: 3428 self.cfg.ReleaseDRBDMinors(self.instance.uuid) 3429 raise 3430 result.append(("disk_template", self.op.disk_template)) 3431 3432 assert self.instance.disk_template == self.op.disk_template, \ 3433 ("Expected disk template '%s', found '%s'" % 3434 (self.op.disk_template, self.instance.disk_template)) 3435 3436 # Release node and resource locks if there are any (they might already have 3437 # been released during disk conversion) 3438 ReleaseLocks(self, locking.LEVEL_NODE) 3439 ReleaseLocks(self, locking.LEVEL_NODE_RES) 3440 3441 # Apply NIC changes 3442 if self._new_nics is not None: 3443 self.instance.nics = self._new_nics 3444 result.extend(self._nic_chgdesc) 3445 3446 # hvparams changes 3447 if self.op.hvparams: 3448 self.instance.hvparams = self.hv_inst 3449 for key, val in self.op.hvparams.iteritems(): 3450 result.append(("hv/%s" % key, val)) 3451 3452 # beparams changes 3453 if self.op.beparams: 3454 self.instance.beparams = self.be_inst 3455 for key, val in self.op.beparams.iteritems(): 3456 result.append(("be/%s" % key, val)) 3457 3458 # OS change 3459 if self.op.os_name: 3460 self.instance.os = self.op.os_name 3461 3462 # osparams changes 3463 if self.op.osparams: 3464 self.instance.osparams = self.os_inst 3465 for key, val in self.op.osparams.iteritems(): 3466 result.append(("os/%s" % key, val)) 3467 3468 if self.op.offline is None: 3469 # Ignore 3470 pass 3471 elif self.op.offline: 3472 # Mark instance as offline 3473 self.cfg.MarkInstanceOffline(self.instance.uuid) 3474 result.append(("admin_state", constants.ADMINST_OFFLINE)) 3475 else: 3476 # Mark instance as online, but stopped 3477 self.cfg.MarkInstanceDown(self.instance.uuid) 3478 result.append(("admin_state", constants.ADMINST_DOWN)) 3479 3480 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId()) 3481 3482 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or 3483 self.owned_locks(locking.LEVEL_NODE)), \ 3484 "All node locks should have been released by now" 3485 3486 return result
3487 3488 _DISK_CONVERSIONS = { 3489 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd, 3490 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain, 3491 } 3492
3493 3494 -class LUInstanceChangeGroup(LogicalUnit):
3495 HPATH = "instance-change-group" 3496 HTYPE = constants.HTYPE_INSTANCE 3497 REQ_BGL = False 3498
3499 - def ExpandNames(self):
3500 self.share_locks = ShareAll() 3501 3502 self.needed_locks = { 3503 locking.LEVEL_NODEGROUP: [], 3504 locking.LEVEL_NODE: [], 3505 locking.LEVEL_NODE_ALLOC: locking.ALL_SET, 3506 } 3507 3508 self._ExpandAndLockInstance() 3509 3510 if self.op.target_groups: 3511 self.req_target_uuids = map(self.cfg.LookupNodeGroup, 3512 self.op.target_groups) 3513 else: 3514 self.req_target_uuids = None 3515 3516 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3517
3518 - def DeclareLocks(self, level):
3519 if level == locking.LEVEL_NODEGROUP: 3520 assert not self.needed_locks[locking.LEVEL_NODEGROUP] 3521 3522 if self.req_target_uuids: 3523 lock_groups = set(self.req_target_uuids) 3524 3525 # Lock all groups used by instance optimistically; this requires going 3526 # via the node before it's locked, requiring verification later on 3527 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid) 3528 lock_groups.update(instance_groups) 3529 else: 3530 # No target groups, need to lock all of them 3531 lock_groups = locking.ALL_SET 3532 3533 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups 3534 3535 elif level == locking.LEVEL_NODE: 3536 if self.req_target_uuids: 3537 # Lock all nodes used by instances 3538 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND 3539 self._LockInstancesNodes() 3540 3541 # Lock all nodes in all potential target groups 3542 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) - 3543 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)) 3544 member_nodes = [node_uuid 3545 for group in lock_groups 3546 for node_uuid in self.cfg.GetNodeGroup(group).members] 3547 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes) 3548 else: 3549 # Lock all nodes as all groups are potential targets 3550 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3551
3552 - def CheckPrereq(self):
3553 owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) 3554 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) 3555 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE)) 3556 3557 assert (self.req_target_uuids is None or 3558 owned_groups.issuperset(self.req_target_uuids)) 3559 assert owned_instance_names == set([self.op.instance_name]) 3560 3561 # Get instance information 3562 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 3563 3564 # Check if node groups for locked instance are still correct 3565 assert owned_nodes.issuperset(self.instance.all_nodes), \ 3566 ("Instance %s's nodes changed while we kept the lock" % 3567 self.op.instance_name) 3568 3569 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid, 3570 owned_groups) 3571 3572 if self.req_target_uuids: 3573 # User requested specific target groups 3574 self.target_uuids = frozenset(self.req_target_uuids) 3575 else: 3576 # All groups except those used by the instance are potential targets 3577 self.target_uuids = owned_groups - inst_groups 3578 3579 conflicting_groups = self.target_uuids & inst_groups 3580 if conflicting_groups: 3581 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are" 3582 " used by the instance '%s'" % 3583 (utils.CommaJoin(conflicting_groups), 3584 self.op.instance_name), 3585 errors.ECODE_INVAL) 3586 3587 if not self.target_uuids: 3588 raise errors.OpPrereqError("There are no possible target groups", 3589 errors.ECODE_INVAL)
3590
3591 - def BuildHooksEnv(self):
3592 """Build hooks env. 3593 3594 """ 3595 assert self.target_uuids 3596 3597 env = { 3598 "TARGET_GROUPS": " ".join(self.target_uuids), 3599 } 3600 3601 env.update(BuildInstanceHookEnvByObject(self, self.instance)) 3602 3603 return env
3604
3605 - def BuildHooksNodes(self):
3606 """Build hooks nodes. 3607 3608 """ 3609 mn = self.cfg.GetMasterNode() 3610 return ([mn], [mn])
3611
3612 - def Exec(self, feedback_fn):
3613 instances = list(self.owned_locks(locking.LEVEL_INSTANCE)) 3614 3615 assert instances == [self.op.instance_name], "Instance not locked" 3616 3617 req = iallocator.IAReqGroupChange(instances=instances, 3618 target_groups=list(self.target_uuids)) 3619 ial = iallocator.IAllocator(self.cfg, self.rpc, req) 3620 3621 ial.Run(self.op.iallocator) 3622 3623 if not ial.success: 3624 raise errors.OpPrereqError("Can't compute solution for changing group of" 3625 " instance '%s' using iallocator '%s': %s" % 3626 (self.op.instance_name, self.op.iallocator, 3627 ial.info), errors.ECODE_NORES) 3628 3629 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False) 3630 3631 self.LogInfo("Iallocator returned %s job(s) for changing group of" 3632 " instance '%s'", len(jobs), self.op.instance_name) 3633 3634 return ResultWithJobs(jobs)
3635