Package ganeti :: Package cmdlib :: Module instance
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cmdlib.instance

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Logical units dealing with instances.""" 
  32   
  33  import OpenSSL 
  34  import copy 
  35  import logging 
  36  import os 
  37   
  38  from ganeti import compat 
  39  from ganeti import constants 
  40  from ganeti import errors 
  41  from ganeti import ht 
  42  from ganeti import hypervisor 
  43  from ganeti import locking 
  44  from ganeti.masterd import iallocator 
  45  from ganeti import masterd 
  46  from ganeti import netutils 
  47  from ganeti import objects 
  48  from ganeti import pathutils 
  49  from ganeti import rpc 
  50  from ganeti import utils 
  51   
  52  from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs 
  53   
  54  from ganeti.cmdlib.common import INSTANCE_DOWN, \ 
  55    INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \ 
  56    ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \ 
  57    LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \ 
  58    IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \ 
  59    AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \ 
  60    ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \ 
  61    CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination 
  62  from ganeti.cmdlib.instance_storage import CreateDisks, \ 
  63    CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \ 
  64    IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \ 
  65    CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \ 
  66    StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \ 
  67    CheckSpindlesExclusiveStorage 
  68  from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \ 
  69    GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \ 
  70    NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \ 
  71    ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \ 
  72    GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \ 
  73    CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS 
  74   
  75  import ganeti.masterd.instance 
  76   
  77   
  78  #: Type description for changes as returned by L{_ApplyContainerMods}'s 
  79  #: callbacks 
  80  _TApplyContModsCbChanges = \ 
  81    ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([ 
  82      ht.TNonEmptyString, 
  83      ht.TAny, 
  84      ]))) 
85 86 87 -def _CheckHostnameSane(lu, name):
88 """Ensures that a given hostname resolves to a 'sane' name. 89 90 The given name is required to be a prefix of the resolved hostname, 91 to prevent accidental mismatches. 92 93 @param lu: the logical unit on behalf of which we're checking 94 @param name: the name we should resolve and check 95 @return: the resolved hostname object 96 97 """ 98 hostname = netutils.GetHostname(name=name) 99 if hostname.name != name: 100 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name) 101 if not utils.MatchNameComponent(name, [hostname.name]): 102 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the" 103 " same as given hostname '%s'") % 104 (hostname.name, name), errors.ECODE_INVAL) 105 return hostname
106
107 108 -def _CheckOpportunisticLocking(op):
109 """Generate error if opportunistic locking is not possible. 110 111 """ 112 if op.opportunistic_locking and not op.iallocator: 113 raise errors.OpPrereqError("Opportunistic locking is only available in" 114 " combination with an instance allocator", 115 errors.ECODE_INVAL)
116
117 118 -def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
119 """Wrapper around IAReqInstanceAlloc. 120 121 @param op: The instance opcode 122 @param disks: The computed disks 123 @param nics: The computed nics 124 @param beparams: The full filled beparams 125 @param node_name_whitelist: List of nodes which should appear as online to the 126 allocator (unless the node is already marked offline) 127 128 @returns: A filled L{iallocator.IAReqInstanceAlloc} 129 130 """ 131 spindle_use = beparams[constants.BE_SPINDLE_USE] 132 return iallocator.IAReqInstanceAlloc(name=op.instance_name, 133 disk_template=op.disk_template, 134 tags=op.tags, 135 os=op.os_type, 136 vcpus=beparams[constants.BE_VCPUS], 137 memory=beparams[constants.BE_MAXMEM], 138 spindle_use=spindle_use, 139 disks=disks, 140 nics=[n.ToDict() for n in nics], 141 hypervisor=op.hypervisor, 142 node_whitelist=node_name_whitelist)
143
144 145 -def _ComputeFullBeParams(op, cluster):
146 """Computes the full beparams. 147 148 @param op: The instance opcode 149 @param cluster: The cluster config object 150 151 @return: The fully filled beparams 152 153 """ 154 default_beparams = cluster.beparams[constants.PP_DEFAULT] 155 for param, value in op.beparams.iteritems(): 156 if value == constants.VALUE_AUTO: 157 op.beparams[param] = default_beparams[param] 158 objects.UpgradeBeParams(op.beparams) 159 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES) 160 return cluster.SimpleFillBE(op.beparams)
161
162 163 -def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
164 """Computes the nics. 165 166 @param op: The instance opcode 167 @param cluster: Cluster configuration object 168 @param default_ip: The default ip to assign 169 @param cfg: An instance of the configuration object 170 @param ec_id: Execution context ID 171 172 @returns: The build up nics 173 174 """ 175 nics = [] 176 for nic in op.nics: 177 nic_mode_req = nic.get(constants.INIC_MODE, None) 178 nic_mode = nic_mode_req 179 if nic_mode is None or nic_mode == constants.VALUE_AUTO: 180 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE] 181 182 net = nic.get(constants.INIC_NETWORK, None) 183 link = nic.get(constants.NIC_LINK, None) 184 ip = nic.get(constants.INIC_IP, None) 185 vlan = nic.get(constants.INIC_VLAN, None) 186 187 if net is None or net.lower() == constants.VALUE_NONE: 188 net = None 189 else: 190 if nic_mode_req is not None or link is not None: 191 raise errors.OpPrereqError("If network is given, no mode or link" 192 " is allowed to be passed", 193 errors.ECODE_INVAL) 194 195 # ip validity checks 196 if ip is None or ip.lower() == constants.VALUE_NONE: 197 nic_ip = None 198 elif ip.lower() == constants.VALUE_AUTO: 199 if not op.name_check: 200 raise errors.OpPrereqError("IP address set to auto but name checks" 201 " have been skipped", 202 errors.ECODE_INVAL) 203 nic_ip = default_ip 204 else: 205 # We defer pool operations until later, so that the iallocator has 206 # filled in the instance's node(s) dimara 207 if ip.lower() == constants.NIC_IP_POOL: 208 if net is None: 209 raise errors.OpPrereqError("if ip=pool, parameter network" 210 " must be passed too", 211 errors.ECODE_INVAL) 212 213 elif not netutils.IPAddress.IsValid(ip): 214 raise errors.OpPrereqError("Invalid IP address '%s'" % ip, 215 errors.ECODE_INVAL) 216 217 nic_ip = ip 218 219 # TODO: check the ip address for uniqueness 220 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip: 221 raise errors.OpPrereqError("Routed nic mode requires an ip address", 222 errors.ECODE_INVAL) 223 224 # MAC address verification 225 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO) 226 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 227 mac = utils.NormalizeAndValidateMac(mac) 228 229 try: 230 # TODO: We need to factor this out 231 cfg.ReserveMAC(mac, ec_id) 232 except errors.ReservationError: 233 raise errors.OpPrereqError("MAC address %s already in use" 234 " in cluster" % mac, 235 errors.ECODE_NOTUNIQUE) 236 237 # Build nic parameters 238 nicparams = {} 239 if nic_mode_req: 240 nicparams[constants.NIC_MODE] = nic_mode 241 if link: 242 nicparams[constants.NIC_LINK] = link 243 if vlan: 244 nicparams[constants.NIC_VLAN] = vlan 245 246 check_params = cluster.SimpleFillNIC(nicparams) 247 objects.NIC.CheckParameterSyntax(check_params) 248 net_uuid = cfg.LookupNetwork(net) 249 name = nic.get(constants.INIC_NAME, None) 250 if name is not None and name.lower() == constants.VALUE_NONE: 251 name = None 252 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name, 253 network=net_uuid, nicparams=nicparams) 254 nic_obj.uuid = cfg.GenerateUniqueID(ec_id) 255 nics.append(nic_obj) 256 257 return nics
258
259 260 -def _CheckForConflictingIp(lu, ip, node_uuid):
261 """In case of conflicting IP address raise error. 262 263 @type ip: string 264 @param ip: IP address 265 @type node_uuid: string 266 @param node_uuid: node UUID 267 268 """ 269 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid) 270 if conf_net is not None: 271 raise errors.OpPrereqError(("The requested IP address (%s) belongs to" 272 " network %s, but the target NIC does not." % 273 (ip, conf_net)), 274 errors.ECODE_STATE) 275 276 return (None, None)
277
278 279 -def _ComputeIPolicyInstanceSpecViolation( 280 ipolicy, instance_spec, disk_template, 281 _compute_fn=ComputeIPolicySpecViolation):
282 """Compute if instance specs meets the specs of ipolicy. 283 284 @type ipolicy: dict 285 @param ipolicy: The ipolicy to verify against 286 @param instance_spec: dict 287 @param instance_spec: The instance spec to verify 288 @type disk_template: string 289 @param disk_template: the disk template of the instance 290 @param _compute_fn: The function to verify ipolicy (unittest only) 291 @see: L{ComputeIPolicySpecViolation} 292 293 """ 294 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None) 295 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None) 296 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0) 297 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, []) 298 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0) 299 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None) 300 301 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count, 302 disk_sizes, spindle_use, disk_template)
303
304 305 -def _CheckOSVariant(os_obj, name):
306 """Check whether an OS name conforms to the os variants specification. 307 308 @type os_obj: L{objects.OS} 309 @param os_obj: OS object to check 310 @type name: string 311 @param name: OS name passed by the user, to check for validity 312 313 """ 314 variant = objects.OS.GetVariant(name) 315 if not os_obj.supported_variants: 316 if variant: 317 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'" 318 " passed)" % (os_obj.name, variant), 319 errors.ECODE_INVAL) 320 return 321 if not variant: 322 raise errors.OpPrereqError("OS name must include a variant", 323 errors.ECODE_INVAL) 324 325 if variant not in os_obj.supported_variants: 326 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
327
328 329 -class LUInstanceCreate(LogicalUnit):
330 """Create an instance. 331 332 """ 333 HPATH = "instance-add" 334 HTYPE = constants.HTYPE_INSTANCE 335 REQ_BGL = False 336
337 - def _CheckDiskTemplateValid(self):
338 """Checks validity of disk template. 339 340 """ 341 cluster = self.cfg.GetClusterInfo() 342 if self.op.disk_template is None: 343 # FIXME: It would be better to take the default disk template from the 344 # ipolicy, but for the ipolicy we need the primary node, which we get from 345 # the iallocator, which wants the disk template as input. To solve this 346 # chicken-and-egg problem, it should be possible to specify just a node 347 # group from the iallocator and take the ipolicy from that. 348 self.op.disk_template = cluster.enabled_disk_templates[0] 349 CheckDiskTemplateEnabled(cluster, self.op.disk_template)
350
351 - def _CheckDiskArguments(self):
352 """Checks validity of disk-related arguments. 353 354 """ 355 # check that disk's names are unique and valid 356 utils.ValidateDeviceNames("disk", self.op.disks) 357 358 self._CheckDiskTemplateValid() 359 360 # check disks. parameter names and consistent adopt/no-adopt strategy 361 has_adopt = has_no_adopt = False 362 for disk in self.op.disks: 363 if self.op.disk_template != constants.DT_EXT: 364 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES) 365 if constants.IDISK_ADOPT in disk: 366 has_adopt = True 367 else: 368 has_no_adopt = True 369 if has_adopt and has_no_adopt: 370 raise errors.OpPrereqError("Either all disks are adopted or none is", 371 errors.ECODE_INVAL) 372 if has_adopt: 373 if self.op.disk_template not in constants.DTS_MAY_ADOPT: 374 raise errors.OpPrereqError("Disk adoption is not supported for the" 375 " '%s' disk template" % 376 self.op.disk_template, 377 errors.ECODE_INVAL) 378 if self.op.iallocator is not None: 379 raise errors.OpPrereqError("Disk adoption not allowed with an" 380 " iallocator script", errors.ECODE_INVAL) 381 if self.op.mode == constants.INSTANCE_IMPORT: 382 raise errors.OpPrereqError("Disk adoption not allowed for" 383 " instance import", errors.ECODE_INVAL) 384 else: 385 if self.op.disk_template in constants.DTS_MUST_ADOPT: 386 raise errors.OpPrereqError("Disk template %s requires disk adoption," 387 " but no 'adopt' parameter given" % 388 self.op.disk_template, 389 errors.ECODE_INVAL) 390 391 self.adopt_disks = has_adopt
392
393 - def _CheckVLANArguments(self):
394 """ Check validity of VLANs if given 395 396 """ 397 for nic in self.op.nics: 398 vlan = nic.get(constants.INIC_VLAN, None) 399 if vlan: 400 if vlan[0] == ".": 401 # vlan starting with dot means single untagged vlan, 402 # might be followed by trunk (:) 403 if not vlan[1:].isdigit(): 404 vlanlist = vlan[1:].split(':') 405 for vl in vlanlist: 406 if not vl.isdigit(): 407 raise errors.OpPrereqError("Specified VLAN parameter is " 408 "invalid : %s" % vlan, 409 errors.ECODE_INVAL) 410 elif vlan[0] == ":": 411 # Trunk - tagged only 412 vlanlist = vlan[1:].split(':') 413 for vl in vlanlist: 414 if not vl.isdigit(): 415 raise errors.OpPrereqError("Specified VLAN parameter is invalid" 416 " : %s" % vlan, errors.ECODE_INVAL) 417 elif vlan.isdigit(): 418 # This is the simplest case. No dots, only single digit 419 # -> Create untagged access port, dot needs to be added 420 nic[constants.INIC_VLAN] = "." + vlan 421 else: 422 raise errors.OpPrereqError("Specified VLAN parameter is invalid" 423 " : %s" % vlan, errors.ECODE_INVAL)
424
425 - def CheckArguments(self):
426 """Check arguments. 427 428 """ 429 # do not require name_check to ease forward/backward compatibility 430 # for tools 431 if self.op.no_install and self.op.start: 432 self.LogInfo("No-installation mode selected, disabling startup") 433 self.op.start = False 434 # validate/normalize the instance name 435 self.op.instance_name = \ 436 netutils.Hostname.GetNormalizedName(self.op.instance_name) 437 438 if self.op.ip_check and not self.op.name_check: 439 # TODO: make the ip check more flexible and not depend on the name check 440 raise errors.OpPrereqError("Cannot do IP address check without a name" 441 " check", errors.ECODE_INVAL) 442 443 # check nics' parameter names 444 for nic in self.op.nics: 445 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES) 446 # check that NIC's parameters names are unique and valid 447 utils.ValidateDeviceNames("NIC", self.op.nics) 448 449 self._CheckVLANArguments() 450 451 self._CheckDiskArguments() 452 assert self.op.disk_template is not None 453 454 # instance name verification 455 if self.op.name_check: 456 self.hostname = _CheckHostnameSane(self, self.op.instance_name) 457 self.op.instance_name = self.hostname.name 458 # used in CheckPrereq for ip ping check 459 self.check_ip = self.hostname.ip 460 else: 461 self.check_ip = None 462 463 # file storage checks 464 if (self.op.file_driver and 465 not self.op.file_driver in constants.FILE_DRIVER): 466 raise errors.OpPrereqError("Invalid file driver name '%s'" % 467 self.op.file_driver, errors.ECODE_INVAL) 468 469 # set default file_driver if unset and required 470 if (not self.op.file_driver and 471 self.op.disk_template in [constants.DT_FILE, 472 constants.DT_SHARED_FILE]): 473 self.op.file_driver = constants.FD_DEFAULT 474 475 ### Node/iallocator related checks 476 CheckIAllocatorOrNode(self, "iallocator", "pnode") 477 478 if self.op.pnode is not None: 479 if self.op.disk_template in constants.DTS_INT_MIRROR: 480 if self.op.snode is None: 481 raise errors.OpPrereqError("The networked disk templates need" 482 " a mirror node", errors.ECODE_INVAL) 483 elif self.op.snode: 484 self.LogWarning("Secondary node will be ignored on non-mirrored disk" 485 " template") 486 self.op.snode = None 487 488 _CheckOpportunisticLocking(self.op) 489 490 if self.op.mode == constants.INSTANCE_IMPORT: 491 # On import force_variant must be True, because if we forced it at 492 # initial install, our only chance when importing it back is that it 493 # works again! 494 self.op.force_variant = True 495 496 if self.op.no_install: 497 self.LogInfo("No-installation mode has no effect during import") 498 499 elif self.op.mode == constants.INSTANCE_CREATE: 500 if self.op.os_type is None: 501 raise errors.OpPrereqError("No guest OS specified", 502 errors.ECODE_INVAL) 503 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os: 504 raise errors.OpPrereqError("Guest OS '%s' is not allowed for" 505 " installation" % self.op.os_type, 506 errors.ECODE_STATE) 507 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT: 508 self._cds = GetClusterDomainSecret() 509 510 # Check handshake to ensure both clusters have the same domain secret 511 src_handshake = self.op.source_handshake 512 if not src_handshake: 513 raise errors.OpPrereqError("Missing source handshake", 514 errors.ECODE_INVAL) 515 516 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds, 517 src_handshake) 518 if errmsg: 519 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg, 520 errors.ECODE_INVAL) 521 522 # Load and check source CA 523 self.source_x509_ca_pem = self.op.source_x509_ca 524 if not self.source_x509_ca_pem: 525 raise errors.OpPrereqError("Missing source X509 CA", 526 errors.ECODE_INVAL) 527 528 try: 529 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem, 530 self._cds) 531 except OpenSSL.crypto.Error, err: 532 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" % 533 (err, ), errors.ECODE_INVAL) 534 535 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None) 536 if errcode is not None: 537 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ), 538 errors.ECODE_INVAL) 539 540 self.source_x509_ca = cert 541 542 src_instance_name = self.op.source_instance_name 543 if not src_instance_name: 544 raise errors.OpPrereqError("Missing source instance name", 545 errors.ECODE_INVAL) 546 547 self.source_instance_name = \ 548 netutils.GetHostname(name=src_instance_name).name 549 550 else: 551 raise errors.OpPrereqError("Invalid instance creation mode %r" % 552 self.op.mode, errors.ECODE_INVAL)
553
554 - def ExpandNames(self):
555 """ExpandNames for CreateInstance. 556 557 Figure out the right locks for instance creation. 558 559 """ 560 self.needed_locks = {} 561 562 # this is just a preventive check, but someone might still add this 563 # instance in the meantime, and creation will fail at lock-add time 564 if self.op.instance_name in\ 565 [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]: 566 raise errors.OpPrereqError("Instance '%s' is already in the cluster" % 567 self.op.instance_name, errors.ECODE_EXISTS) 568 569 self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name 570 571 if self.op.iallocator: 572 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by 573 # specifying a group on instance creation and then selecting nodes from 574 # that group 575 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET 576 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET 577 578 if self.op.opportunistic_locking: 579 self.opportunistic_locks[locking.LEVEL_NODE] = True 580 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True 581 else: 582 (self.op.pnode_uuid, self.op.pnode) = \ 583 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode) 584 nodelist = [self.op.pnode_uuid] 585 if self.op.snode is not None: 586 (self.op.snode_uuid, self.op.snode) = \ 587 ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode) 588 nodelist.append(self.op.snode_uuid) 589 self.needed_locks[locking.LEVEL_NODE] = nodelist 590 591 # in case of import lock the source node too 592 if self.op.mode == constants.INSTANCE_IMPORT: 593 src_node = self.op.src_node 594 src_path = self.op.src_path 595 596 if src_path is None: 597 self.op.src_path = src_path = self.op.instance_name 598 599 if src_node is None: 600 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET 601 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET 602 self.op.src_node = None 603 if os.path.isabs(src_path): 604 raise errors.OpPrereqError("Importing an instance from a path" 605 " requires a source node option", 606 errors.ECODE_INVAL) 607 else: 608 (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \ 609 ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node) 610 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET: 611 self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid) 612 if not os.path.isabs(src_path): 613 self.op.src_path = \ 614 utils.PathJoin(pathutils.EXPORT_DIR, src_path) 615 616 self.needed_locks[locking.LEVEL_NODE_RES] = \ 617 CopyLockList(self.needed_locks[locking.LEVEL_NODE]) 618 619 # Optimistically acquire shared group locks (we're reading the 620 # configuration). We can't just call GetInstanceNodeGroups, because the 621 # instance doesn't exist yet. Therefore we lock all node groups of all 622 # nodes we have. 623 if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET: 624 # In the case we lock all nodes for opportunistic allocation, we have no 625 # choice than to lock all groups, because they're allocated before nodes. 626 # This is sad, but true. At least we release all those we don't need in 627 # CheckPrereq later. 628 self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET 629 else: 630 self.needed_locks[locking.LEVEL_NODEGROUP] = \ 631 list(self.cfg.GetNodeGroupsFromNodes( 632 self.needed_locks[locking.LEVEL_NODE])) 633 self.share_locks[locking.LEVEL_NODEGROUP] = 1
634
635 - def _RunAllocator(self):
636 """Run the allocator based on input opcode. 637 638 """ 639 if self.op.opportunistic_locking: 640 # Only consider nodes for which a lock is held 641 node_name_whitelist = self.cfg.GetNodeNames( 642 set(self.owned_locks(locking.LEVEL_NODE)) & 643 set(self.owned_locks(locking.LEVEL_NODE_RES))) 644 else: 645 node_name_whitelist = None 646 647 req = _CreateInstanceAllocRequest(self.op, self.disks, 648 self.nics, self.be_full, 649 node_name_whitelist) 650 ial = iallocator.IAllocator(self.cfg, self.rpc, req) 651 652 ial.Run(self.op.iallocator) 653 654 if not ial.success: 655 # When opportunistic locks are used only a temporary failure is generated 656 if self.op.opportunistic_locking: 657 ecode = errors.ECODE_TEMP_NORES 658 else: 659 ecode = errors.ECODE_NORES 660 661 raise errors.OpPrereqError("Can't compute nodes using" 662 " iallocator '%s': %s" % 663 (self.op.iallocator, ial.info), 664 ecode) 665 666 (self.op.pnode_uuid, self.op.pnode) = \ 667 ExpandNodeUuidAndName(self.cfg, None, ial.result[0]) 668 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s", 669 self.op.instance_name, self.op.iallocator, 670 utils.CommaJoin(ial.result)) 671 672 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator" 673 674 if req.RequiredNodes() == 2: 675 (self.op.snode_uuid, self.op.snode) = \ 676 ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
677
678 - def BuildHooksEnv(self):
679 """Build hooks env. 680 681 This runs on master, primary and secondary nodes of the instance. 682 683 """ 684 env = { 685 "ADD_MODE": self.op.mode, 686 } 687 if self.op.mode == constants.INSTANCE_IMPORT: 688 env["SRC_NODE"] = self.op.src_node 689 env["SRC_PATH"] = self.op.src_path 690 env["SRC_IMAGES"] = self.src_images 691 692 env.update(BuildInstanceHookEnv( 693 name=self.op.instance_name, 694 primary_node_name=self.op.pnode, 695 secondary_node_names=self.cfg.GetNodeNames(self.secondaries), 696 status=self.op.start, 697 os_type=self.op.os_type, 698 minmem=self.be_full[constants.BE_MINMEM], 699 maxmem=self.be_full[constants.BE_MAXMEM], 700 vcpus=self.be_full[constants.BE_VCPUS], 701 nics=NICListToTuple(self, self.nics), 702 disk_template=self.op.disk_template, 703 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""), 704 d[constants.IDISK_SIZE], d[constants.IDISK_MODE]) 705 for d in self.disks], 706 bep=self.be_full, 707 hvp=self.hv_full, 708 hypervisor_name=self.op.hypervisor, 709 tags=self.op.tags, 710 )) 711 712 return env
713
714 - def BuildHooksNodes(self):
715 """Build hooks nodes. 716 717 """ 718 nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries 719 return nl, nl
720
721 - def _ReadExportInfo(self):
722 """Reads the export information from disk. 723 724 It will override the opcode source node and path with the actual 725 information, if these two were not specified before. 726 727 @return: the export information 728 729 """ 730 assert self.op.mode == constants.INSTANCE_IMPORT 731 732 if self.op.src_node_uuid is None: 733 locked_nodes = self.owned_locks(locking.LEVEL_NODE) 734 exp_list = self.rpc.call_export_list(locked_nodes) 735 found = False 736 for node_uuid in exp_list: 737 if exp_list[node_uuid].fail_msg: 738 continue 739 if self.op.src_path in exp_list[node_uuid].payload: 740 found = True 741 self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name 742 self.op.src_node_uuid = node_uuid 743 self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR, 744 self.op.src_path) 745 break 746 if not found: 747 raise errors.OpPrereqError("No export found for relative path %s" % 748 self.op.src_path, errors.ECODE_INVAL) 749 750 CheckNodeOnline(self, self.op.src_node_uuid) 751 result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path) 752 result.Raise("No export or invalid export found in dir %s" % 753 self.op.src_path) 754 755 export_info = objects.SerializableConfigParser.Loads(str(result.payload)) 756 if not export_info.has_section(constants.INISECT_EXP): 757 raise errors.ProgrammerError("Corrupted export config", 758 errors.ECODE_ENVIRON) 759 760 ei_version = export_info.get(constants.INISECT_EXP, "version") 761 if int(ei_version) != constants.EXPORT_VERSION: 762 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" % 763 (ei_version, constants.EXPORT_VERSION), 764 errors.ECODE_ENVIRON) 765 return export_info
766
767 - def _ReadExportParams(self, einfo):
768 """Use export parameters as defaults. 769 770 In case the opcode doesn't specify (as in override) some instance 771 parameters, then try to use them from the export information, if 772 that declares them. 773 774 """ 775 self.op.os_type = einfo.get(constants.INISECT_EXP, "os") 776 777 if not self.op.disks: 778 disks = [] 779 # TODO: import the disk iv_name too 780 for idx in range(constants.MAX_DISKS): 781 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx): 782 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx) 783 disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx) 784 disk = { 785 constants.IDISK_SIZE: disk_sz, 786 constants.IDISK_NAME: disk_name 787 } 788 disks.append(disk) 789 self.op.disks = disks 790 if not disks and self.op.disk_template != constants.DT_DISKLESS: 791 raise errors.OpPrereqError("No disk info specified and the export" 792 " is missing the disk information", 793 errors.ECODE_INVAL) 794 795 if not self.op.nics: 796 nics = [] 797 for idx in range(constants.MAX_NICS): 798 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx): 799 ndict = {} 800 for name in [constants.INIC_IP, 801 constants.INIC_MAC, constants.INIC_NAME]: 802 nic_param_name = "nic%d_%s" % (idx, name) 803 if einfo.has_option(constants.INISECT_INS, nic_param_name): 804 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name)) 805 ndict[name] = v 806 network = einfo.get(constants.INISECT_INS, 807 "nic%d_%s" % (idx, constants.INIC_NETWORK)) 808 # in case network is given link and mode are inherited 809 # from nodegroup's netparams and thus should not be passed here 810 if network: 811 ndict[constants.INIC_NETWORK] = network 812 else: 813 for name in list(constants.NICS_PARAMETERS): 814 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name)) 815 ndict[name] = v 816 nics.append(ndict) 817 else: 818 break 819 self.op.nics = nics 820 821 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"): 822 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split() 823 824 if (self.op.hypervisor is None and 825 einfo.has_option(constants.INISECT_INS, "hypervisor")): 826 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor") 827 828 if einfo.has_section(constants.INISECT_HYP): 829 # use the export parameters but do not override the ones 830 # specified by the user 831 for name, value in einfo.items(constants.INISECT_HYP): 832 if name not in self.op.hvparams: 833 self.op.hvparams[name] = value 834 835 if einfo.has_section(constants.INISECT_BEP): 836 # use the parameters, without overriding 837 for name, value in einfo.items(constants.INISECT_BEP): 838 if name not in self.op.beparams: 839 self.op.beparams[name] = value 840 # Compatibility for the old "memory" be param 841 if name == constants.BE_MEMORY: 842 if constants.BE_MAXMEM not in self.op.beparams: 843 self.op.beparams[constants.BE_MAXMEM] = value 844 if constants.BE_MINMEM not in self.op.beparams: 845 self.op.beparams[constants.BE_MINMEM] = value 846 else: 847 # try to read the parameters old style, from the main section 848 for name in constants.BES_PARAMETERS: 849 if (name not in self.op.beparams and 850 einfo.has_option(constants.INISECT_INS, name)): 851 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name) 852 853 if einfo.has_section(constants.INISECT_OSP): 854 # use the parameters, without overriding 855 for name, value in einfo.items(constants.INISECT_OSP): 856 if name not in self.op.osparams: 857 self.op.osparams[name] = value
858
859 - def _RevertToDefaults(self, cluster):
860 """Revert the instance parameters to the default values. 861 862 """ 863 # hvparams 864 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {}) 865 for name in self.op.hvparams.keys(): 866 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]: 867 del self.op.hvparams[name] 868 # beparams 869 be_defs = cluster.SimpleFillBE({}) 870 for name in self.op.beparams.keys(): 871 if name in be_defs and be_defs[name] == self.op.beparams[name]: 872 del self.op.beparams[name] 873 # nic params 874 nic_defs = cluster.SimpleFillNIC({}) 875 for nic in self.op.nics: 876 for name in constants.NICS_PARAMETERS: 877 if name in nic and name in nic_defs and nic[name] == nic_defs[name]: 878 del nic[name] 879 # osparams 880 os_defs = cluster.SimpleFillOS(self.op.os_type, {}) 881 for name in self.op.osparams.keys(): 882 if name in os_defs and os_defs[name] == self.op.osparams[name]: 883 del self.op.osparams[name]
884
885 - def _CalculateFileStorageDir(self):
886 """Calculate final instance file storage dir. 887 888 """ 889 # file storage dir calculation/check 890 self.instance_file_storage_dir = None 891 if self.op.disk_template in constants.DTS_FILEBASED: 892 # build the full file storage dir path 893 joinargs = [] 894 895 if self.op.disk_template == constants.DT_SHARED_FILE: 896 get_fsd_fn = self.cfg.GetSharedFileStorageDir 897 else: 898 get_fsd_fn = self.cfg.GetFileStorageDir 899 900 cfg_storagedir = get_fsd_fn() 901 if not cfg_storagedir: 902 raise errors.OpPrereqError("Cluster file storage dir not defined", 903 errors.ECODE_STATE) 904 joinargs.append(cfg_storagedir) 905 906 if self.op.file_storage_dir is not None: 907 joinargs.append(self.op.file_storage_dir) 908 909 joinargs.append(self.op.instance_name) 910 911 # pylint: disable=W0142 912 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
913
914 - def CheckPrereq(self): # pylint: disable=R0914
915 """Check prerequisites. 916 917 """ 918 # Check that the optimistically acquired groups are correct wrt the 919 # acquired nodes 920 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) 921 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE)) 922 cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes)) 923 if not owned_groups.issuperset(cur_groups): 924 raise errors.OpPrereqError("New instance %s's node groups changed since" 925 " locks were acquired, current groups are" 926 " are '%s', owning groups '%s'; retry the" 927 " operation" % 928 (self.op.instance_name, 929 utils.CommaJoin(cur_groups), 930 utils.CommaJoin(owned_groups)), 931 errors.ECODE_STATE) 932 933 self._CalculateFileStorageDir() 934 935 if self.op.mode == constants.INSTANCE_IMPORT: 936 export_info = self._ReadExportInfo() 937 self._ReadExportParams(export_info) 938 self._old_instance_name = export_info.get(constants.INISECT_INS, "name") 939 else: 940 self._old_instance_name = None 941 942 if (not self.cfg.GetVGName() and 943 self.op.disk_template not in constants.DTS_NOT_LVM): 944 raise errors.OpPrereqError("Cluster does not support lvm-based" 945 " instances", errors.ECODE_STATE) 946 947 if (self.op.hypervisor is None or 948 self.op.hypervisor == constants.VALUE_AUTO): 949 self.op.hypervisor = self.cfg.GetHypervisorType() 950 951 cluster = self.cfg.GetClusterInfo() 952 enabled_hvs = cluster.enabled_hypervisors 953 if self.op.hypervisor not in enabled_hvs: 954 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the" 955 " cluster (%s)" % 956 (self.op.hypervisor, ",".join(enabled_hvs)), 957 errors.ECODE_STATE) 958 959 # Check tag validity 960 for tag in self.op.tags: 961 objects.TaggableObject.ValidateTag(tag) 962 963 # check hypervisor parameter syntax (locally) 964 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES) 965 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, 966 self.op.hvparams) 967 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor) 968 hv_type.CheckParameterSyntax(filled_hvp) 969 self.hv_full = filled_hvp 970 # check that we don't specify global parameters on an instance 971 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor", 972 "instance", "cluster") 973 974 # fill and remember the beparams dict 975 self.be_full = _ComputeFullBeParams(self.op, cluster) 976 977 # build os parameters 978 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams) 979 980 # now that hvp/bep are in final format, let's reset to defaults, 981 # if told to do so 982 if self.op.identify_defaults: 983 self._RevertToDefaults(cluster) 984 985 # NIC buildup 986 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg, 987 self.proc.GetECId()) 988 989 # disk checks/pre-build 990 default_vg = self.cfg.GetVGName() 991 self.disks = ComputeDisks(self.op, default_vg) 992 993 if self.op.mode == constants.INSTANCE_IMPORT: 994 disk_images = [] 995 for idx in range(len(self.disks)): 996 option = "disk%d_dump" % idx 997 if export_info.has_option(constants.INISECT_INS, option): 998 # FIXME: are the old os-es, disk sizes, etc. useful? 999 export_name = export_info.get(constants.INISECT_INS, option) 1000 image = utils.PathJoin(self.op.src_path, export_name) 1001 disk_images.append(image) 1002 else: 1003 disk_images.append(False) 1004 1005 self.src_images = disk_images 1006 1007 if self.op.instance_name == self._old_instance_name: 1008 for idx, nic in enumerate(self.nics): 1009 if nic.mac == constants.VALUE_AUTO: 1010 nic_mac_ini = "nic%d_mac" % idx 1011 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini) 1012 1013 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT 1014 1015 # ip ping checks (we use the same ip that was resolved in ExpandNames) 1016 if self.op.ip_check: 1017 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT): 1018 raise errors.OpPrereqError("IP %s of instance %s already in use" % 1019 (self.check_ip, self.op.instance_name), 1020 errors.ECODE_NOTUNIQUE) 1021 1022 #### mac address generation 1023 # By generating here the mac address both the allocator and the hooks get 1024 # the real final mac address rather than the 'auto' or 'generate' value. 1025 # There is a race condition between the generation and the instance object 1026 # creation, which means that we know the mac is valid now, but we're not 1027 # sure it will be when we actually add the instance. If things go bad 1028 # adding the instance will abort because of a duplicate mac, and the 1029 # creation job will fail. 1030 for nic in self.nics: 1031 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 1032 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId()) 1033 1034 #### allocator run 1035 1036 if self.op.iallocator is not None: 1037 self._RunAllocator() 1038 1039 # Release all unneeded node locks 1040 keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid, 1041 self.op.src_node_uuid]) 1042 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks) 1043 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks) 1044 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC) 1045 # Release all unneeded group locks 1046 ReleaseLocks(self, locking.LEVEL_NODEGROUP, 1047 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks)) 1048 1049 assert (self.owned_locks(locking.LEVEL_NODE) == 1050 self.owned_locks(locking.LEVEL_NODE_RES)), \ 1051 "Node locks differ from node resource locks" 1052 1053 #### node related checks 1054 1055 # check primary node 1056 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid) 1057 assert self.pnode is not None, \ 1058 "Cannot retrieve locked node %s" % self.op.pnode_uuid 1059 if pnode.offline: 1060 raise errors.OpPrereqError("Cannot use offline primary node '%s'" % 1061 pnode.name, errors.ECODE_STATE) 1062 if pnode.drained: 1063 raise errors.OpPrereqError("Cannot use drained primary node '%s'" % 1064 pnode.name, errors.ECODE_STATE) 1065 if not pnode.vm_capable: 1066 raise errors.OpPrereqError("Cannot use non-vm_capable primary node" 1067 " '%s'" % pnode.name, errors.ECODE_STATE) 1068 1069 self.secondaries = [] 1070 1071 # Fill in any IPs from IP pools. This must happen here, because we need to 1072 # know the nic's primary node, as specified by the iallocator 1073 for idx, nic in enumerate(self.nics): 1074 net_uuid = nic.network 1075 if net_uuid is not None: 1076 nobj = self.cfg.GetNetwork(net_uuid) 1077 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid) 1078 if netparams is None: 1079 raise errors.OpPrereqError("No netparams found for network" 1080 " %s. Probably not connected to" 1081 " node's %s nodegroup" % 1082 (nobj.name, self.pnode.name), 1083 errors.ECODE_INVAL) 1084 self.LogInfo("NIC/%d inherits netparams %s" % 1085 (idx, netparams.values())) 1086 nic.nicparams = dict(netparams) 1087 if nic.ip is not None: 1088 if nic.ip.lower() == constants.NIC_IP_POOL: 1089 try: 1090 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId()) 1091 except errors.ReservationError: 1092 raise errors.OpPrereqError("Unable to get a free IP for NIC %d" 1093 " from the address pool" % idx, 1094 errors.ECODE_STATE) 1095 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name) 1096 else: 1097 try: 1098 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(), 1099 check=self.op.conflicts_check) 1100 except errors.ReservationError: 1101 raise errors.OpPrereqError("IP address %s already in use" 1102 " or does not belong to network %s" % 1103 (nic.ip, nobj.name), 1104 errors.ECODE_NOTUNIQUE) 1105 1106 # net is None, ip None or given 1107 elif self.op.conflicts_check: 1108 _CheckForConflictingIp(self, nic.ip, self.pnode.uuid) 1109 1110 # mirror node verification 1111 if self.op.disk_template in constants.DTS_INT_MIRROR: 1112 if self.op.snode_uuid == pnode.uuid: 1113 raise errors.OpPrereqError("The secondary node cannot be the" 1114 " primary node", errors.ECODE_INVAL) 1115 CheckNodeOnline(self, self.op.snode_uuid) 1116 CheckNodeNotDrained(self, self.op.snode_uuid) 1117 CheckNodeVmCapable(self, self.op.snode_uuid) 1118 self.secondaries.append(self.op.snode_uuid) 1119 1120 snode = self.cfg.GetNodeInfo(self.op.snode_uuid) 1121 if pnode.group != snode.group: 1122 self.LogWarning("The primary and secondary nodes are in two" 1123 " different node groups; the disk parameters" 1124 " from the first disk's node group will be" 1125 " used") 1126 1127 nodes = [pnode] 1128 if self.op.disk_template in constants.DTS_INT_MIRROR: 1129 nodes.append(snode) 1130 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n) 1131 excl_stor = compat.any(map(has_es, nodes)) 1132 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE: 1133 raise errors.OpPrereqError("Disk template %s not supported with" 1134 " exclusive storage" % self.op.disk_template, 1135 errors.ECODE_STATE) 1136 for disk in self.disks: 1137 CheckSpindlesExclusiveStorage(disk, excl_stor, True) 1138 1139 node_uuids = [pnode.uuid] + self.secondaries 1140 1141 if not self.adopt_disks: 1142 if self.op.disk_template == constants.DT_RBD: 1143 # _CheckRADOSFreeSpace() is just a placeholder. 1144 # Any function that checks prerequisites can be placed here. 1145 # Check if there is enough space on the RADOS cluster. 1146 CheckRADOSFreeSpace() 1147 elif self.op.disk_template == constants.DT_EXT: 1148 # FIXME: Function that checks prereqs if needed 1149 pass 1150 elif self.op.disk_template in constants.DTS_LVM: 1151 # Check lv size requirements, if not adopting 1152 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks) 1153 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes) 1154 else: 1155 # FIXME: add checks for other, non-adopting, non-lvm disk templates 1156 pass 1157 1158 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data 1159 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG], 1160 disk[constants.IDISK_ADOPT]) 1161 for disk in self.disks]) 1162 if len(all_lvs) != len(self.disks): 1163 raise errors.OpPrereqError("Duplicate volume names given for adoption", 1164 errors.ECODE_INVAL) 1165 for lv_name in all_lvs: 1166 try: 1167 # FIXME: lv_name here is "vg/lv" need to ensure that other calls 1168 # to ReserveLV uses the same syntax 1169 self.cfg.ReserveLV(lv_name, self.proc.GetECId()) 1170 except errors.ReservationError: 1171 raise errors.OpPrereqError("LV named %s used by another instance" % 1172 lv_name, errors.ECODE_NOTUNIQUE) 1173 1174 vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid] 1175 vg_names.Raise("Cannot get VG information from node %s" % pnode.name, 1176 prereq=True) 1177 1178 node_lvs = self.rpc.call_lv_list([pnode.uuid], 1179 vg_names.payload.keys())[pnode.uuid] 1180 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name, 1181 prereq=True) 1182 node_lvs = node_lvs.payload 1183 1184 delta = all_lvs.difference(node_lvs.keys()) 1185 if delta: 1186 raise errors.OpPrereqError("Missing logical volume(s): %s" % 1187 utils.CommaJoin(delta), 1188 errors.ECODE_INVAL) 1189 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]] 1190 if online_lvs: 1191 raise errors.OpPrereqError("Online logical volumes found, cannot" 1192 " adopt: %s" % utils.CommaJoin(online_lvs), 1193 errors.ECODE_STATE) 1194 # update the size of disk based on what is found 1195 for dsk in self.disks: 1196 dsk[constants.IDISK_SIZE] = \ 1197 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG], 1198 dsk[constants.IDISK_ADOPT])][0])) 1199 1200 elif self.op.disk_template == constants.DT_BLOCK: 1201 # Normalize and de-duplicate device paths 1202 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT]) 1203 for disk in self.disks]) 1204 if len(all_disks) != len(self.disks): 1205 raise errors.OpPrereqError("Duplicate disk names given for adoption", 1206 errors.ECODE_INVAL) 1207 baddisks = [d for d in all_disks 1208 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)] 1209 if baddisks: 1210 raise errors.OpPrereqError("Device node(s) %s lie outside %s and" 1211 " cannot be adopted" % 1212 (utils.CommaJoin(baddisks), 1213 constants.ADOPTABLE_BLOCKDEV_ROOT), 1214 errors.ECODE_INVAL) 1215 1216 node_disks = self.rpc.call_bdev_sizes([pnode.uuid], 1217 list(all_disks))[pnode.uuid] 1218 node_disks.Raise("Cannot get block device information from node %s" % 1219 pnode.name, prereq=True) 1220 node_disks = node_disks.payload 1221 delta = all_disks.difference(node_disks.keys()) 1222 if delta: 1223 raise errors.OpPrereqError("Missing block device(s): %s" % 1224 utils.CommaJoin(delta), 1225 errors.ECODE_INVAL) 1226 for dsk in self.disks: 1227 dsk[constants.IDISK_SIZE] = \ 1228 int(float(node_disks[dsk[constants.IDISK_ADOPT]])) 1229 1230 # Check disk access param to be compatible with specified hypervisor 1231 node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid) 1232 node_group = self.cfg.GetNodeGroup(node_info.group) 1233 disk_params = self.cfg.GetGroupDiskParams(node_group) 1234 access_type = disk_params[self.op.disk_template].get( 1235 constants.RBD_ACCESS, constants.DISK_KERNELSPACE 1236 ) 1237 1238 if not IsValidDiskAccessModeCombination(self.op.hypervisor, 1239 self.op.disk_template, 1240 access_type): 1241 raise errors.OpPrereqError("Selected hypervisor (%s) cannot be" 1242 " used with %s disk access param" % 1243 (self.op.hypervisor, access_type), 1244 errors.ECODE_STATE) 1245 1246 # Verify instance specs 1247 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None) 1248 ispec = { 1249 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None), 1250 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None), 1251 constants.ISPEC_DISK_COUNT: len(self.disks), 1252 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE] 1253 for disk in self.disks], 1254 constants.ISPEC_NIC_COUNT: len(self.nics), 1255 constants.ISPEC_SPINDLE_USE: spindle_use, 1256 } 1257 1258 group_info = self.cfg.GetNodeGroup(pnode.group) 1259 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) 1260 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec, 1261 self.op.disk_template) 1262 if not self.op.ignore_ipolicy and res: 1263 msg = ("Instance allocation to group %s (%s) violates policy: %s" % 1264 (pnode.group, group_info.name, utils.CommaJoin(res))) 1265 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 1266 1267 CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams) 1268 1269 CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant) 1270 # check OS parameters (remotely) 1271 CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full) 1272 1273 CheckNicsBridgesExist(self, self.nics, self.pnode.uuid) 1274 1275 #TODO: _CheckExtParams (remotely) 1276 # Check parameters for extstorage 1277 1278 # memory check on primary node 1279 #TODO(dynmem): use MINMEM for checking 1280 if self.op.start: 1281 hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}), 1282 self.op.hvparams) 1283 CheckNodeFreeMemory(self, self.pnode.uuid, 1284 "creating instance %s" % self.op.instance_name, 1285 self.be_full[constants.BE_MAXMEM], 1286 self.op.hypervisor, hvfull) 1287 1288 self.dry_run_result = list(node_uuids)
1289
1290 - def Exec(self, feedback_fn):
1291 """Create and add the instance to the cluster. 1292 1293 """ 1294 assert not (self.owned_locks(locking.LEVEL_NODE_RES) - 1295 self.owned_locks(locking.LEVEL_NODE)), \ 1296 "Node locks differ from node resource locks" 1297 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC) 1298 1299 ht_kind = self.op.hypervisor 1300 if ht_kind in constants.HTS_REQ_PORT: 1301 network_port = self.cfg.AllocatePort() 1302 else: 1303 network_port = None 1304 1305 instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId()) 1306 1307 # This is ugly but we got a chicken-egg problem here 1308 # We can only take the group disk parameters, as the instance 1309 # has no disks yet (we are generating them right here). 1310 nodegroup = self.cfg.GetNodeGroup(self.pnode.group) 1311 disks = GenerateDiskTemplate(self, 1312 self.op.disk_template, 1313 instance_uuid, self.pnode.uuid, 1314 self.secondaries, 1315 self.disks, 1316 self.instance_file_storage_dir, 1317 self.op.file_driver, 1318 0, 1319 feedback_fn, 1320 self.cfg.GetGroupDiskParams(nodegroup)) 1321 1322 iobj = objects.Instance(name=self.op.instance_name, 1323 uuid=instance_uuid, 1324 os=self.op.os_type, 1325 primary_node=self.pnode.uuid, 1326 nics=self.nics, disks=disks, 1327 disk_template=self.op.disk_template, 1328 disks_active=False, 1329 admin_state=constants.ADMINST_DOWN, 1330 network_port=network_port, 1331 beparams=self.op.beparams, 1332 hvparams=self.op.hvparams, 1333 hypervisor=self.op.hypervisor, 1334 osparams=self.op.osparams, 1335 ) 1336 1337 if self.op.tags: 1338 for tag in self.op.tags: 1339 iobj.AddTag(tag) 1340 1341 if self.adopt_disks: 1342 if self.op.disk_template == constants.DT_PLAIN: 1343 # rename LVs to the newly-generated names; we need to construct 1344 # 'fake' LV disks with the old data, plus the new unique_id 1345 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks] 1346 rename_to = [] 1347 for t_dsk, a_dsk in zip(tmp_disks, self.disks): 1348 rename_to.append(t_dsk.logical_id) 1349 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT]) 1350 result = self.rpc.call_blockdev_rename(self.pnode.uuid, 1351 zip(tmp_disks, rename_to)) 1352 result.Raise("Failed to rename adoped LVs") 1353 else: 1354 feedback_fn("* creating instance disks...") 1355 try: 1356 CreateDisks(self, iobj) 1357 except errors.OpExecError: 1358 self.LogWarning("Device creation failed") 1359 self.cfg.ReleaseDRBDMinors(instance_uuid) 1360 raise 1361 1362 feedback_fn("adding instance %s to cluster config" % self.op.instance_name) 1363 1364 self.cfg.AddInstance(iobj, self.proc.GetECId()) 1365 1366 # Declare that we don't want to remove the instance lock anymore, as we've 1367 # added the instance to the config 1368 del self.remove_locks[locking.LEVEL_INSTANCE] 1369 1370 if self.op.mode == constants.INSTANCE_IMPORT: 1371 # Release unused nodes 1372 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid]) 1373 else: 1374 # Release all nodes 1375 ReleaseLocks(self, locking.LEVEL_NODE) 1376 1377 disk_abort = False 1378 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks: 1379 feedback_fn("* wiping instance disks...") 1380 try: 1381 WipeDisks(self, iobj) 1382 except errors.OpExecError, err: 1383 logging.exception("Wiping disks failed") 1384 self.LogWarning("Wiping instance disks failed (%s)", err) 1385 disk_abort = True 1386 1387 if disk_abort: 1388 # Something is already wrong with the disks, don't do anything else 1389 pass 1390 elif self.op.wait_for_sync: 1391 disk_abort = not WaitForSync(self, iobj) 1392 elif iobj.disk_template in constants.DTS_INT_MIRROR: 1393 # make sure the disks are not degraded (still sync-ing is ok) 1394 feedback_fn("* checking mirrors status") 1395 disk_abort = not WaitForSync(self, iobj, oneshot=True) 1396 else: 1397 disk_abort = False 1398 1399 if disk_abort: 1400 RemoveDisks(self, iobj) 1401 self.cfg.RemoveInstance(iobj.uuid) 1402 # Make sure the instance lock gets removed 1403 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name 1404 raise errors.OpExecError("There are some degraded disks for" 1405 " this instance") 1406 1407 # instance disks are now active 1408 iobj.disks_active = True 1409 1410 # Release all node resource locks 1411 ReleaseLocks(self, locking.LEVEL_NODE_RES) 1412 1413 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks: 1414 if self.op.mode == constants.INSTANCE_CREATE: 1415 if not self.op.no_install: 1416 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and 1417 not self.op.wait_for_sync) 1418 if pause_sync: 1419 feedback_fn("* pausing disk sync to install instance OS") 1420 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid, 1421 (iobj.disks, 1422 iobj), True) 1423 for idx, success in enumerate(result.payload): 1424 if not success: 1425 logging.warn("pause-sync of instance %s for disk %d failed", 1426 self.op.instance_name, idx) 1427 1428 feedback_fn("* running the instance OS create scripts...") 1429 # FIXME: pass debug option from opcode to backend 1430 os_add_result = \ 1431 self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False, 1432 self.op.debug_level) 1433 if pause_sync: 1434 feedback_fn("* resuming disk sync") 1435 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid, 1436 (iobj.disks, 1437 iobj), False) 1438 for idx, success in enumerate(result.payload): 1439 if not success: 1440 logging.warn("resume-sync of instance %s for disk %d failed", 1441 self.op.instance_name, idx) 1442 1443 os_add_result.Raise("Could not add os for instance %s" 1444 " on node %s" % (self.op.instance_name, 1445 self.pnode.name)) 1446 1447 else: 1448 if self.op.mode == constants.INSTANCE_IMPORT: 1449 feedback_fn("* running the instance OS import scripts...") 1450 1451 transfers = [] 1452 1453 for idx, image in enumerate(self.src_images): 1454 if not image: 1455 continue 1456 1457 # FIXME: pass debug option from opcode to backend 1458 dt = masterd.instance.DiskTransfer("disk/%s" % idx, 1459 constants.IEIO_FILE, (image, ), 1460 constants.IEIO_SCRIPT, 1461 ((iobj.disks[idx], iobj), idx), 1462 None) 1463 transfers.append(dt) 1464 1465 import_result = \ 1466 masterd.instance.TransferInstanceData(self, feedback_fn, 1467 self.op.src_node_uuid, 1468 self.pnode.uuid, 1469 self.pnode.secondary_ip, 1470 iobj, transfers) 1471 if not compat.all(import_result): 1472 self.LogWarning("Some disks for instance %s on node %s were not" 1473 " imported successfully" % (self.op.instance_name, 1474 self.pnode.name)) 1475 1476 rename_from = self._old_instance_name 1477 1478 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT: 1479 feedback_fn("* preparing remote import...") 1480 # The source cluster will stop the instance before attempting to make 1481 # a connection. In some cases stopping an instance can take a long 1482 # time, hence the shutdown timeout is added to the connection 1483 # timeout. 1484 connect_timeout = (constants.RIE_CONNECT_TIMEOUT + 1485 self.op.source_shutdown_timeout) 1486 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout) 1487 1488 assert iobj.primary_node == self.pnode.uuid 1489 disk_results = \ 1490 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode, 1491 self.source_x509_ca, 1492 self._cds, timeouts) 1493 if not compat.all(disk_results): 1494 # TODO: Should the instance still be started, even if some disks 1495 # failed to import (valid for local imports, too)? 1496 self.LogWarning("Some disks for instance %s on node %s were not" 1497 " imported successfully" % (self.op.instance_name, 1498 self.pnode.name)) 1499 1500 rename_from = self.source_instance_name 1501 1502 else: 1503 # also checked in the prereq part 1504 raise errors.ProgrammerError("Unknown OS initialization mode '%s'" 1505 % self.op.mode) 1506 1507 # Run rename script on newly imported instance 1508 assert iobj.name == self.op.instance_name 1509 feedback_fn("Running rename script for %s" % self.op.instance_name) 1510 result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj, 1511 rename_from, 1512 self.op.debug_level) 1513 result.Warn("Failed to run rename script for %s on node %s" % 1514 (self.op.instance_name, self.pnode.name), self.LogWarning) 1515 1516 assert not self.owned_locks(locking.LEVEL_NODE_RES) 1517 1518 if self.op.start: 1519 iobj.admin_state = constants.ADMINST_UP 1520 self.cfg.Update(iobj, feedback_fn) 1521 logging.info("Starting instance %s on node %s", self.op.instance_name, 1522 self.pnode.name) 1523 feedback_fn("* starting instance...") 1524 result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None), 1525 False, self.op.reason) 1526 result.Raise("Could not start instance") 1527 1528 return self.cfg.GetNodeNames(list(iobj.all_nodes))
1529
1530 1531 -class LUInstanceRename(LogicalUnit):
1532 """Rename an instance. 1533 1534 """ 1535 HPATH = "instance-rename" 1536 HTYPE = constants.HTYPE_INSTANCE 1537
1538 - def CheckArguments(self):
1539 """Check arguments. 1540 1541 """ 1542 if self.op.ip_check and not self.op.name_check: 1543 # TODO: make the ip check more flexible and not depend on the name check 1544 raise errors.OpPrereqError("IP address check requires a name check", 1545 errors.ECODE_INVAL)
1546
1547 - def BuildHooksEnv(self):
1548 """Build hooks env. 1549 1550 This runs on master, primary and secondary nodes of the instance. 1551 1552 """ 1553 env = BuildInstanceHookEnvByObject(self, self.instance) 1554 env["INSTANCE_NEW_NAME"] = self.op.new_name 1555 return env
1556
1557 - def BuildHooksNodes(self):
1558 """Build hooks nodes. 1559 1560 """ 1561 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) 1562 return (nl, nl)
1563
1564 - def CheckPrereq(self):
1565 """Check prerequisites. 1566 1567 This checks that the instance is in the cluster and is not running. 1568 1569 """ 1570 (self.op.instance_uuid, self.op.instance_name) = \ 1571 ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid, 1572 self.op.instance_name) 1573 instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 1574 assert instance is not None 1575 1576 # It should actually not happen that an instance is running with a disabled 1577 # disk template, but in case it does, the renaming of file-based instances 1578 # will fail horribly. Thus, we test it before. 1579 if (instance.disk_template in constants.DTS_FILEBASED and 1580 self.op.new_name != instance.name): 1581 CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(), 1582 instance.disk_template) 1583 1584 CheckNodeOnline(self, instance.primary_node) 1585 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING, 1586 msg="cannot rename") 1587 self.instance = instance 1588 1589 new_name = self.op.new_name 1590 if self.op.name_check: 1591 hostname = _CheckHostnameSane(self, new_name) 1592 new_name = self.op.new_name = hostname.name 1593 if (self.op.ip_check and 1594 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)): 1595 raise errors.OpPrereqError("IP %s of instance %s already in use" % 1596 (hostname.ip, new_name), 1597 errors.ECODE_NOTUNIQUE) 1598 1599 instance_names = [inst.name for 1600 inst in self.cfg.GetAllInstancesInfo().values()] 1601 if new_name in instance_names and new_name != instance.name: 1602 raise errors.OpPrereqError("Instance '%s' is already in the cluster" % 1603 new_name, errors.ECODE_EXISTS)
1604
1605 - def Exec(self, feedback_fn):
1606 """Rename the instance. 1607 1608 """ 1609 old_name = self.instance.name 1610 1611 rename_file_storage = False 1612 if (self.instance.disk_template in constants.DTS_FILEBASED and 1613 self.op.new_name != self.instance.name): 1614 old_file_storage_dir = os.path.dirname( 1615 self.instance.disks[0].logical_id[1]) 1616 rename_file_storage = True 1617 1618 self.cfg.RenameInstance(self.instance.uuid, self.op.new_name) 1619 # Change the instance lock. This is definitely safe while we hold the BGL. 1620 # Otherwise the new lock would have to be added in acquired mode. 1621 assert self.REQ_BGL 1622 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER) 1623 self.glm.remove(locking.LEVEL_INSTANCE, old_name) 1624 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name) 1625 1626 # re-read the instance from the configuration after rename 1627 renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid) 1628 1629 if rename_file_storage: 1630 new_file_storage_dir = os.path.dirname( 1631 renamed_inst.disks[0].logical_id[1]) 1632 result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node, 1633 old_file_storage_dir, 1634 new_file_storage_dir) 1635 result.Raise("Could not rename on node %s directory '%s' to '%s'" 1636 " (but the instance has been renamed in Ganeti)" % 1637 (self.cfg.GetNodeName(renamed_inst.primary_node), 1638 old_file_storage_dir, new_file_storage_dir)) 1639 1640 StartInstanceDisks(self, renamed_inst, None) 1641 # update info on disks 1642 info = GetInstanceInfoText(renamed_inst) 1643 for (idx, disk) in enumerate(renamed_inst.disks): 1644 for node_uuid in renamed_inst.all_nodes: 1645 result = self.rpc.call_blockdev_setinfo(node_uuid, 1646 (disk, renamed_inst), info) 1647 result.Warn("Error setting info on node %s for disk %s" % 1648 (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning) 1649 try: 1650 result = self.rpc.call_instance_run_rename(renamed_inst.primary_node, 1651 renamed_inst, old_name, 1652 self.op.debug_level) 1653 result.Warn("Could not run OS rename script for instance %s on node %s" 1654 " (but the instance has been renamed in Ganeti)" % 1655 (renamed_inst.name, 1656 self.cfg.GetNodeName(renamed_inst.primary_node)), 1657 self.LogWarning) 1658 finally: 1659 ShutdownInstanceDisks(self, renamed_inst) 1660 1661 return renamed_inst.name
1662
1663 1664 -class LUInstanceRemove(LogicalUnit):
1665 """Remove an instance. 1666 1667 """ 1668 HPATH = "instance-remove" 1669 HTYPE = constants.HTYPE_INSTANCE 1670 REQ_BGL = False 1671
1672 - def ExpandNames(self):
1673 self._ExpandAndLockInstance() 1674 self.needed_locks[locking.LEVEL_NODE] = [] 1675 self.needed_locks[locking.LEVEL_NODE_RES] = [] 1676 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1677
1678 - def DeclareLocks(self, level):
1679 if level == locking.LEVEL_NODE: 1680 self._LockInstancesNodes() 1681 elif level == locking.LEVEL_NODE_RES: 1682 # Copy node locks 1683 self.needed_locks[locking.LEVEL_NODE_RES] = \ 1684 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1685
1686 - def BuildHooksEnv(self):
1687 """Build hooks env. 1688 1689 This runs on master, primary and secondary nodes of the instance. 1690 1691 """ 1692 env = BuildInstanceHookEnvByObject(self, self.instance) 1693 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout 1694 return env
1695
1696 - def BuildHooksNodes(self):
1697 """Build hooks nodes. 1698 1699 """ 1700 nl = [self.cfg.GetMasterNode()] 1701 nl_post = list(self.instance.all_nodes) + nl 1702 return (nl, nl_post)
1703
1704 - def CheckPrereq(self):
1705 """Check prerequisites. 1706 1707 This checks that the instance is in the cluster. 1708 1709 """ 1710 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 1711 assert self.instance is not None, \ 1712 "Cannot retrieve locked instance %s" % self.op.instance_name
1713
1714 - def Exec(self, feedback_fn):
1715 """Remove the instance. 1716 1717 """ 1718 logging.info("Shutting down instance %s on node %s", self.instance.name, 1719 self.cfg.GetNodeName(self.instance.primary_node)) 1720 1721 result = self.rpc.call_instance_shutdown(self.instance.primary_node, 1722 self.instance, 1723 self.op.shutdown_timeout, 1724 self.op.reason) 1725 if self.op.ignore_failures: 1726 result.Warn("Warning: can't shutdown instance", feedback_fn) 1727 else: 1728 result.Raise("Could not shutdown instance %s on node %s" % 1729 (self.instance.name, 1730 self.cfg.GetNodeName(self.instance.primary_node))) 1731 1732 assert (self.owned_locks(locking.LEVEL_NODE) == 1733 self.owned_locks(locking.LEVEL_NODE_RES)) 1734 assert not (set(self.instance.all_nodes) - 1735 self.owned_locks(locking.LEVEL_NODE)), \ 1736 "Not owning correct locks" 1737 1738 RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1739
1740 1741 -class LUInstanceMove(LogicalUnit):
1742 """Move an instance by data-copying. 1743 1744 """ 1745 HPATH = "instance-move" 1746 HTYPE = constants.HTYPE_INSTANCE 1747 REQ_BGL = False 1748
1749 - def ExpandNames(self):
1750 self._ExpandAndLockInstance() 1751 (self.op.target_node_uuid, self.op.target_node) = \ 1752 ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid, 1753 self.op.target_node) 1754 self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid] 1755 self.needed_locks[locking.LEVEL_NODE_RES] = [] 1756 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1757
1758 - def DeclareLocks(self, level):
1759 if level == locking.LEVEL_NODE: 1760 self._LockInstancesNodes(primary_only=True) 1761 elif level == locking.LEVEL_NODE_RES: 1762 # Copy node locks 1763 self.needed_locks[locking.LEVEL_NODE_RES] = \ 1764 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1765
1766 - def BuildHooksEnv(self):
1767 """Build hooks env. 1768 1769 This runs on master, primary and secondary nodes of the instance. 1770 1771 """ 1772 env = { 1773 "TARGET_NODE": self.op.target_node, 1774 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout, 1775 } 1776 env.update(BuildInstanceHookEnvByObject(self, self.instance)) 1777 return env
1778
1779 - def BuildHooksNodes(self):
1780 """Build hooks nodes. 1781 1782 """ 1783 nl = [ 1784 self.cfg.GetMasterNode(), 1785 self.instance.primary_node, 1786 self.op.target_node_uuid, 1787 ] 1788 return (nl, nl)
1789
1790 - def CheckPrereq(self):
1791 """Check prerequisites. 1792 1793 This checks that the instance is in the cluster. 1794 1795 """ 1796 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 1797 assert self.instance is not None, \ 1798 "Cannot retrieve locked instance %s" % self.op.instance_name 1799 1800 if self.instance.disk_template not in constants.DTS_COPYABLE: 1801 raise errors.OpPrereqError("Disk template %s not suitable for copying" % 1802 self.instance.disk_template, 1803 errors.ECODE_STATE) 1804 1805 target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid) 1806 assert target_node is not None, \ 1807 "Cannot retrieve locked node %s" % self.op.target_node 1808 1809 self.target_node_uuid = target_node.uuid 1810 if target_node.uuid == self.instance.primary_node: 1811 raise errors.OpPrereqError("Instance %s is already on the node %s" % 1812 (self.instance.name, target_node.name), 1813 errors.ECODE_STATE) 1814 1815 bep = self.cfg.GetClusterInfo().FillBE(self.instance) 1816 1817 for idx, dsk in enumerate(self.instance.disks): 1818 if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE, 1819 constants.DT_SHARED_FILE): 1820 raise errors.OpPrereqError("Instance disk %d has a complex layout," 1821 " cannot copy" % idx, errors.ECODE_STATE) 1822 1823 CheckNodeOnline(self, target_node.uuid) 1824 CheckNodeNotDrained(self, target_node.uuid) 1825 CheckNodeVmCapable(self, target_node.uuid) 1826 cluster = self.cfg.GetClusterInfo() 1827 group_info = self.cfg.GetNodeGroup(target_node.group) 1828 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) 1829 CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg, 1830 ignore=self.op.ignore_ipolicy) 1831 1832 if self.instance.admin_state == constants.ADMINST_UP: 1833 # check memory requirements on the secondary node 1834 CheckNodeFreeMemory( 1835 self, target_node.uuid, "failing over instance %s" % 1836 self.instance.name, bep[constants.BE_MAXMEM], 1837 self.instance.hypervisor, 1838 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor]) 1839 else: 1840 self.LogInfo("Not checking memory on the secondary node as" 1841 " instance will not be started") 1842 1843 # check bridge existance 1844 CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1845
1846 - def Exec(self, feedback_fn):
1847 """Move an instance. 1848 1849 The move is done by shutting it down on its present node, copying 1850 the data over (slow) and starting it on the new node. 1851 1852 """ 1853 source_node = self.cfg.GetNodeInfo(self.instance.primary_node) 1854 target_node = self.cfg.GetNodeInfo(self.target_node_uuid) 1855 1856 self.LogInfo("Shutting down instance %s on source node %s", 1857 self.instance.name, source_node.name) 1858 1859 assert (self.owned_locks(locking.LEVEL_NODE) == 1860 self.owned_locks(locking.LEVEL_NODE_RES)) 1861 1862 result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance, 1863 self.op.shutdown_timeout, 1864 self.op.reason) 1865 if self.op.ignore_consistency: 1866 result.Warn("Could not shutdown instance %s on node %s. Proceeding" 1867 " anyway. Please make sure node %s is down. Error details" % 1868 (self.instance.name, source_node.name, source_node.name), 1869 self.LogWarning) 1870 else: 1871 result.Raise("Could not shutdown instance %s on node %s" % 1872 (self.instance.name, source_node.name)) 1873 1874 # create the target disks 1875 try: 1876 CreateDisks(self, self.instance, target_node_uuid=target_node.uuid) 1877 except errors.OpExecError: 1878 self.LogWarning("Device creation failed") 1879 self.cfg.ReleaseDRBDMinors(self.instance.uuid) 1880 raise 1881 1882 cluster_name = self.cfg.GetClusterInfo().cluster_name 1883 1884 errs = [] 1885 # activate, get path, copy the data over 1886 for idx, disk in enumerate(self.instance.disks): 1887 self.LogInfo("Copying data for disk %d", idx) 1888 result = self.rpc.call_blockdev_assemble( 1889 target_node.uuid, (disk, self.instance), self.instance.name, 1890 True, idx) 1891 if result.fail_msg: 1892 self.LogWarning("Can't assemble newly created disk %d: %s", 1893 idx, result.fail_msg) 1894 errs.append(result.fail_msg) 1895 break 1896 dev_path, _, __ = result.payload 1897 result = self.rpc.call_blockdev_export(source_node.uuid, (disk, 1898 self.instance), 1899 target_node.secondary_ip, 1900 dev_path, cluster_name) 1901 if result.fail_msg: 1902 self.LogWarning("Can't copy data over for disk %d: %s", 1903 idx, result.fail_msg) 1904 errs.append(result.fail_msg) 1905 break 1906 1907 if errs: 1908 self.LogWarning("Some disks failed to copy, aborting") 1909 try: 1910 RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid) 1911 finally: 1912 self.cfg.ReleaseDRBDMinors(self.instance.uuid) 1913 raise errors.OpExecError("Errors during disk copy: %s" % 1914 (",".join(errs),)) 1915 1916 self.instance.primary_node = target_node.uuid 1917 self.cfg.Update(self.instance, feedback_fn) 1918 1919 self.LogInfo("Removing the disks on the original node") 1920 RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid) 1921 1922 # Only start the instance if it's marked as up 1923 if self.instance.admin_state == constants.ADMINST_UP: 1924 self.LogInfo("Starting instance %s on node %s", 1925 self.instance.name, target_node.name) 1926 1927 disks_ok, _ = AssembleInstanceDisks(self, self.instance, 1928 ignore_secondaries=True) 1929 if not disks_ok: 1930 ShutdownInstanceDisks(self, self.instance) 1931 raise errors.OpExecError("Can't activate the instance's disks") 1932 1933 result = self.rpc.call_instance_start(target_node.uuid, 1934 (self.instance, None, None), False, 1935 self.op.reason) 1936 msg = result.fail_msg 1937 if msg: 1938 ShutdownInstanceDisks(self, self.instance) 1939 raise errors.OpExecError("Could not start instance %s on node %s: %s" % 1940 (self.instance.name, target_node.name, msg))
1941
1942 1943 -class LUInstanceMultiAlloc(NoHooksLU):
1944 """Allocates multiple instances at the same time. 1945 1946 """ 1947 REQ_BGL = False 1948
1949 - def CheckArguments(self):
1950 """Check arguments. 1951 1952 """ 1953 nodes = [] 1954 for inst in self.op.instances: 1955 if inst.iallocator is not None: 1956 raise errors.OpPrereqError("iallocator are not allowed to be set on" 1957 " instance objects", errors.ECODE_INVAL) 1958 nodes.append(bool(inst.pnode)) 1959 if inst.disk_template in constants.DTS_INT_MIRROR: 1960 nodes.append(bool(inst.snode)) 1961 1962 has_nodes = compat.any(nodes) 1963 if compat.all(nodes) ^ has_nodes: 1964 raise errors.OpPrereqError("There are instance objects providing" 1965 " pnode/snode while others do not", 1966 errors.ECODE_INVAL) 1967 1968 if not has_nodes and self.op.iallocator is None: 1969 default_iallocator = self.cfg.GetDefaultIAllocator() 1970 if default_iallocator: 1971 self.op.iallocator = default_iallocator 1972 else: 1973 raise errors.OpPrereqError("No iallocator or nodes on the instances" 1974 " given and no cluster-wide default" 1975 " iallocator found; please specify either" 1976 " an iallocator or nodes on the instances" 1977 " or set a cluster-wide default iallocator", 1978 errors.ECODE_INVAL) 1979 1980 _CheckOpportunisticLocking(self.op) 1981 1982 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances]) 1983 if dups: 1984 raise errors.OpPrereqError("There are duplicate instance names: %s" % 1985 utils.CommaJoin(dups), errors.ECODE_INVAL)
1986
1987 - def ExpandNames(self):
1988 """Calculate the locks. 1989 1990 """ 1991 self.share_locks = ShareAll() 1992 self.needed_locks = { 1993 # iallocator will select nodes and even if no iallocator is used, 1994 # collisions with LUInstanceCreate should be avoided 1995 locking.LEVEL_NODE_ALLOC: locking.ALL_SET, 1996 } 1997 1998 if self.op.iallocator: 1999 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET 2000 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET 2001 2002 if self.op.opportunistic_locking: 2003 self.opportunistic_locks[locking.LEVEL_NODE] = True 2004 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True 2005 else: 2006 nodeslist = [] 2007 for inst in self.op.instances: 2008 (inst.pnode_uuid, inst.pnode) = \ 2009 ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode) 2010 nodeslist.append(inst.pnode_uuid) 2011 if inst.snode is not None: 2012 (inst.snode_uuid, inst.snode) = \ 2013 ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode) 2014 nodeslist.append(inst.snode_uuid) 2015 2016 self.needed_locks[locking.LEVEL_NODE] = nodeslist 2017 # Lock resources of instance's primary and secondary nodes (copy to 2018 # prevent accidential modification) 2019 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2020
2021 - def CheckPrereq(self):
2022 """Check prerequisite. 2023 2024 """ 2025 if self.op.iallocator: 2026 cluster = self.cfg.GetClusterInfo() 2027 default_vg = self.cfg.GetVGName() 2028 ec_id = self.proc.GetECId() 2029 2030 if self.op.opportunistic_locking: 2031 # Only consider nodes for which a lock is held 2032 node_whitelist = self.cfg.GetNodeNames( 2033 set(self.owned_locks(locking.LEVEL_NODE)) & 2034 set(self.owned_locks(locking.LEVEL_NODE_RES))) 2035 else: 2036 node_whitelist = None 2037 2038 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg), 2039 _ComputeNics(op, cluster, None, 2040 self.cfg, ec_id), 2041 _ComputeFullBeParams(op, cluster), 2042 node_whitelist) 2043 for op in self.op.instances] 2044 2045 req = iallocator.IAReqMultiInstanceAlloc(instances=insts) 2046 ial = iallocator.IAllocator(self.cfg, self.rpc, req) 2047 2048 ial.Run(self.op.iallocator) 2049 2050 if not ial.success: 2051 raise errors.OpPrereqError("Can't compute nodes using" 2052 " iallocator '%s': %s" % 2053 (self.op.iallocator, ial.info), 2054 errors.ECODE_NORES) 2055 2056 self.ia_result = ial.result 2057 2058 if self.op.dry_run: 2059 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), { 2060 constants.JOB_IDS_KEY: [], 2061 })
2062
2063 - def _ConstructPartialResult(self):
2064 """Contructs the partial result. 2065 2066 """ 2067 if self.op.iallocator: 2068 (allocatable, failed_insts) = self.ia_result 2069 allocatable_insts = map(compat.fst, allocatable) 2070 else: 2071 allocatable_insts = [op.instance_name for op in self.op.instances] 2072 failed_insts = [] 2073 2074 return { 2075 constants.ALLOCATABLE_KEY: allocatable_insts, 2076 constants.FAILED_KEY: failed_insts, 2077 }
2078
2079 - def Exec(self, feedback_fn):
2080 """Executes the opcode. 2081 2082 """ 2083 jobs = [] 2084 if self.op.iallocator: 2085 op2inst = dict((op.instance_name, op) for op in self.op.instances) 2086 (allocatable, failed) = self.ia_result 2087 2088 for (name, node_names) in allocatable: 2089 op = op2inst.pop(name) 2090 2091 (op.pnode_uuid, op.pnode) = \ 2092 ExpandNodeUuidAndName(self.cfg, None, node_names[0]) 2093 if len(node_names) > 1: 2094 (op.snode_uuid, op.snode) = \ 2095 ExpandNodeUuidAndName(self.cfg, None, node_names[1]) 2096 2097 jobs.append([op]) 2098 2099 missing = set(op2inst.keys()) - set(failed) 2100 assert not missing, \ 2101 "Iallocator did return incomplete result: %s" % \ 2102 utils.CommaJoin(missing) 2103 else: 2104 jobs.extend([op] for op in self.op.instances) 2105 2106 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2107
2108 2109 -class _InstNicModPrivate(object):
2110 """Data structure for network interface modifications. 2111 2112 Used by L{LUInstanceSetParams}. 2113 2114 """
2115 - def __init__(self):
2116 self.params = None 2117 self.filled = None
2118
2119 2120 -def _PrepareContainerMods(mods, private_fn):
2121 """Prepares a list of container modifications by adding a private data field. 2122 2123 @type mods: list of tuples; (operation, index, parameters) 2124 @param mods: List of modifications 2125 @type private_fn: callable or None 2126 @param private_fn: Callable for constructing a private data field for a 2127 modification 2128 @rtype: list 2129 2130 """ 2131 if private_fn is None: 2132 fn = lambda: None 2133 else: 2134 fn = private_fn 2135 2136 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2137
2138 2139 -def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2140 """Checks if nodes have enough physical CPUs 2141 2142 This function checks if all given nodes have the needed number of 2143 physical CPUs. In case any node has less CPUs or we cannot get the 2144 information from the node, this function raises an OpPrereqError 2145 exception. 2146 2147 @type lu: C{LogicalUnit} 2148 @param lu: a logical unit from which we get configuration data 2149 @type node_uuids: C{list} 2150 @param node_uuids: the list of node UUIDs to check 2151 @type requested: C{int} 2152 @param requested: the minimum acceptable number of physical CPUs 2153 @type hypervisor_specs: list of pairs (string, dict of strings) 2154 @param hypervisor_specs: list of hypervisor specifications in 2155 pairs (hypervisor_name, hvparams) 2156 @raise errors.OpPrereqError: if the node doesn't have enough CPUs, 2157 or we cannot check the node 2158 2159 """ 2160 nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs) 2161 for node_uuid in node_uuids: 2162 info = nodeinfo[node_uuid] 2163 node_name = lu.cfg.GetNodeName(node_uuid) 2164 info.Raise("Cannot get current information from node %s" % node_name, 2165 prereq=True, ecode=errors.ECODE_ENVIRON) 2166 (_, _, (hv_info, )) = info.payload 2167 num_cpus = hv_info.get("cpu_total", None) 2168 if not isinstance(num_cpus, int): 2169 raise errors.OpPrereqError("Can't compute the number of physical CPUs" 2170 " on node %s, result was '%s'" % 2171 (node_name, num_cpus), errors.ECODE_ENVIRON) 2172 if requested > num_cpus: 2173 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are " 2174 "required" % (node_name, num_cpus, requested), 2175 errors.ECODE_NORES)
2176
2177 2178 -def GetItemFromContainer(identifier, kind, container):
2179 """Return the item refered by the identifier. 2180 2181 @type identifier: string 2182 @param identifier: Item index or name or UUID 2183 @type kind: string 2184 @param kind: One-word item description 2185 @type container: list 2186 @param container: Container to get the item from 2187 2188 """ 2189 # Index 2190 try: 2191 idx = int(identifier) 2192 if idx == -1: 2193 # Append 2194 absidx = len(container) - 1 2195 elif idx < 0: 2196 raise IndexError("Not accepting negative indices other than -1") 2197 elif idx > len(container): 2198 raise IndexError("Got %s index %s, but there are only %s" % 2199 (kind, idx, len(container))) 2200 else: 2201 absidx = idx 2202 return (absidx, container[idx]) 2203 except ValueError: 2204 pass 2205 2206 for idx, item in enumerate(container): 2207 if item.uuid == identifier or item.name == identifier: 2208 return (idx, item) 2209 2210 raise errors.OpPrereqError("Cannot find %s with identifier %s" % 2211 (kind, identifier), errors.ECODE_NOENT)
2212
2213 2214 -def _ApplyContainerMods(kind, container, chgdesc, mods, 2215 create_fn, modify_fn, remove_fn, 2216 post_add_fn=None):
2217 """Applies descriptions in C{mods} to C{container}. 2218 2219 @type kind: string 2220 @param kind: One-word item description 2221 @type container: list 2222 @param container: Container to modify 2223 @type chgdesc: None or list 2224 @param chgdesc: List of applied changes 2225 @type mods: list 2226 @param mods: Modifications as returned by L{_PrepareContainerMods} 2227 @type create_fn: callable 2228 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD}); 2229 receives absolute item index, parameters and private data object as added 2230 by L{_PrepareContainerMods}, returns tuple containing new item and changes 2231 as list 2232 @type modify_fn: callable 2233 @param modify_fn: Callback for modifying an existing item 2234 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters 2235 and private data object as added by L{_PrepareContainerMods}, returns 2236 changes as list 2237 @type remove_fn: callable 2238 @param remove_fn: Callback on removing item; receives absolute item index, 2239 item and private data object as added by L{_PrepareContainerMods} 2240 @type post_add_fn: callable 2241 @param post_add_fn: Callable for post-processing a newly created item after 2242 it has been put into the container. It receives the index of the new item 2243 and the new item as parameters. 2244 2245 """ 2246 for (op, identifier, params, private) in mods: 2247 changes = None 2248 2249 if op == constants.DDM_ADD: 2250 # Calculate where item will be added 2251 # When adding an item, identifier can only be an index 2252 try: 2253 idx = int(identifier) 2254 except ValueError: 2255 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as" 2256 " identifier for %s" % constants.DDM_ADD, 2257 errors.ECODE_INVAL) 2258 if idx == -1: 2259 addidx = len(container) 2260 else: 2261 if idx < 0: 2262 raise IndexError("Not accepting negative indices other than -1") 2263 elif idx > len(container): 2264 raise IndexError("Got %s index %s, but there are only %s" % 2265 (kind, idx, len(container))) 2266 addidx = idx 2267 2268 if create_fn is None: 2269 item = params 2270 else: 2271 (item, changes) = create_fn(addidx, params, private) 2272 2273 if idx == -1: 2274 container.append(item) 2275 else: 2276 assert idx >= 0 2277 assert idx <= len(container) 2278 # list.insert does so before the specified index 2279 container.insert(idx, item) 2280 2281 if post_add_fn is not None: 2282 post_add_fn(addidx, item) 2283 2284 else: 2285 # Retrieve existing item 2286 (absidx, item) = GetItemFromContainer(identifier, kind, container) 2287 2288 if op == constants.DDM_REMOVE: 2289 assert not params 2290 2291 changes = [("%s/%s" % (kind, absidx), "remove")] 2292 2293 if remove_fn is not None: 2294 msg = remove_fn(absidx, item, private) 2295 if msg: 2296 changes.append(("%s/%s" % (kind, absidx), msg)) 2297 2298 assert container[absidx] == item 2299 del container[absidx] 2300 elif op == constants.DDM_MODIFY: 2301 if modify_fn is not None: 2302 changes = modify_fn(absidx, item, params, private) 2303 else: 2304 raise errors.ProgrammerError("Unhandled operation '%s'" % op) 2305 2306 assert _TApplyContModsCbChanges(changes) 2307 2308 if not (chgdesc is None or changes is None): 2309 chgdesc.extend(changes)
2310
2311 2312 -def _UpdateIvNames(base_index, disks):
2313 """Updates the C{iv_name} attribute of disks. 2314 2315 @type disks: list of L{objects.Disk} 2316 2317 """ 2318 for (idx, disk) in enumerate(disks): 2319 disk.iv_name = "disk/%s" % (base_index + idx, )
2320
2321 2322 -class LUInstanceSetParams(LogicalUnit):
2323 """Modifies an instances's parameters. 2324 2325 """ 2326 HPATH = "instance-modify" 2327 HTYPE = constants.HTYPE_INSTANCE 2328 REQ_BGL = False 2329 2330 @staticmethod
2331 - def _UpgradeDiskNicMods(kind, mods, verify_fn):
2332 assert ht.TList(mods) 2333 assert not mods or len(mods[0]) in (2, 3) 2334 2335 if mods and len(mods[0]) == 2: 2336 result = [] 2337 2338 addremove = 0 2339 for op, params in mods: 2340 if op in (constants.DDM_ADD, constants.DDM_REMOVE): 2341 result.append((op, -1, params)) 2342 addremove += 1 2343 2344 if addremove > 1: 2345 raise errors.OpPrereqError("Only one %s add or remove operation is" 2346 " supported at a time" % kind, 2347 errors.ECODE_INVAL) 2348 else: 2349 result.append((constants.DDM_MODIFY, op, params)) 2350 2351 assert verify_fn(result) 2352 else: 2353 result = mods 2354 2355 return result
2356 2357 @staticmethod
2358 - def _CheckMods(kind, mods, key_types, item_fn):
2359 """Ensures requested disk/NIC modifications are valid. 2360 2361 """ 2362 for (op, _, params) in mods: 2363 assert ht.TDict(params) 2364 2365 # If 'key_types' is an empty dict, we assume we have an 2366 # 'ext' template and thus do not ForceDictType 2367 if key_types: 2368 utils.ForceDictType(params, key_types) 2369 2370 if op == constants.DDM_REMOVE: 2371 if params: 2372 raise errors.OpPrereqError("No settings should be passed when" 2373 " removing a %s" % kind, 2374 errors.ECODE_INVAL) 2375 elif op in (constants.DDM_ADD, constants.DDM_MODIFY): 2376 item_fn(op, params) 2377 else: 2378 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2379
2380 - def _VerifyDiskModification(self, op, params, excl_stor):
2381 """Verifies a disk modification. 2382 2383 """ 2384 if op == constants.DDM_ADD: 2385 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR) 2386 if mode not in constants.DISK_ACCESS_SET: 2387 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode, 2388 errors.ECODE_INVAL) 2389 2390 size = params.get(constants.IDISK_SIZE, None) 2391 if size is None: 2392 raise errors.OpPrereqError("Required disk parameter '%s' missing" % 2393 constants.IDISK_SIZE, errors.ECODE_INVAL) 2394 size = int(size) 2395 2396 params[constants.IDISK_SIZE] = size 2397 name = params.get(constants.IDISK_NAME, None) 2398 if name is not None and name.lower() == constants.VALUE_NONE: 2399 params[constants.IDISK_NAME] = None 2400 2401 CheckSpindlesExclusiveStorage(params, excl_stor, True) 2402 2403 elif op == constants.DDM_MODIFY: 2404 if constants.IDISK_SIZE in params: 2405 raise errors.OpPrereqError("Disk size change not possible, use" 2406 " grow-disk", errors.ECODE_INVAL) 2407 2408 # Disk modification supports changing only the disk name and mode. 2409 # Changing arbitrary parameters is allowed only for ext disk template", 2410 if self.instance.disk_template != constants.DT_EXT: 2411 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES) 2412 2413 name = params.get(constants.IDISK_NAME, None) 2414 if name is not None and name.lower() == constants.VALUE_NONE: 2415 params[constants.IDISK_NAME] = None
2416 2417 @staticmethod
2418 - def _VerifyNicModification(op, params):
2419 """Verifies a network interface modification. 2420 2421 """ 2422 if op in (constants.DDM_ADD, constants.DDM_MODIFY): 2423 ip = params.get(constants.INIC_IP, None) 2424 name = params.get(constants.INIC_NAME, None) 2425 req_net = params.get(constants.INIC_NETWORK, None) 2426 link = params.get(constants.NIC_LINK, None) 2427 mode = params.get(constants.NIC_MODE, None) 2428 if name is not None and name.lower() == constants.VALUE_NONE: 2429 params[constants.INIC_NAME] = None 2430 if req_net is not None: 2431 if req_net.lower() == constants.VALUE_NONE: 2432 params[constants.INIC_NETWORK] = None 2433 req_net = None 2434 elif link is not None or mode is not None: 2435 raise errors.OpPrereqError("If network is given" 2436 " mode or link should not", 2437 errors.ECODE_INVAL) 2438 2439 if op == constants.DDM_ADD: 2440 macaddr = params.get(constants.INIC_MAC, None) 2441 if macaddr is None: 2442 params[constants.INIC_MAC] = constants.VALUE_AUTO 2443 2444 if ip is not None: 2445 if ip.lower() == constants.VALUE_NONE: 2446 params[constants.INIC_IP] = None 2447 else: 2448 if ip.lower() == constants.NIC_IP_POOL: 2449 if op == constants.DDM_ADD and req_net is None: 2450 raise errors.OpPrereqError("If ip=pool, parameter network" 2451 " cannot be none", 2452 errors.ECODE_INVAL) 2453 else: 2454 if not netutils.IPAddress.IsValid(ip): 2455 raise errors.OpPrereqError("Invalid IP address '%s'" % ip, 2456 errors.ECODE_INVAL) 2457 2458 if constants.INIC_MAC in params: 2459 macaddr = params[constants.INIC_MAC] 2460 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 2461 macaddr = utils.NormalizeAndValidateMac(macaddr) 2462 2463 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO: 2464 raise errors.OpPrereqError("'auto' is not a valid MAC address when" 2465 " modifying an existing NIC", 2466 errors.ECODE_INVAL)
2467
2468 - def CheckArguments(self):
2469 if not (self.op.nics or self.op.disks or self.op.disk_template or 2470 self.op.hvparams or self.op.beparams or self.op.os_name or 2471 self.op.osparams or self.op.offline is not None or 2472 self.op.runtime_mem or self.op.pnode): 2473 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL) 2474 2475 if self.op.hvparams: 2476 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, 2477 "hypervisor", "instance", "cluster") 2478 2479 self.op.disks = self._UpgradeDiskNicMods( 2480 "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams)) 2481 self.op.nics = self._UpgradeDiskNicMods( 2482 "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams)) 2483 2484 if self.op.disks and self.op.disk_template is not None: 2485 raise errors.OpPrereqError("Disk template conversion and other disk" 2486 " changes not supported at the same time", 2487 errors.ECODE_INVAL) 2488 2489 if (self.op.disk_template and 2490 self.op.disk_template in constants.DTS_INT_MIRROR and 2491 self.op.remote_node is None): 2492 raise errors.OpPrereqError("Changing the disk template to a mirrored" 2493 " one requires specifying a secondary node", 2494 errors.ECODE_INVAL) 2495 2496 # Check NIC modifications 2497 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES, 2498 self._VerifyNicModification) 2499 2500 if self.op.pnode: 2501 (self.op.pnode_uuid, self.op.pnode) = \ 2502 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2503
2504 - def ExpandNames(self):
2505 self._ExpandAndLockInstance() 2506 self.needed_locks[locking.LEVEL_NODEGROUP] = [] 2507 # Can't even acquire node locks in shared mode as upcoming changes in 2508 # Ganeti 2.6 will start to modify the node object on disk conversion 2509 self.needed_locks[locking.LEVEL_NODE] = [] 2510 self.needed_locks[locking.LEVEL_NODE_RES] = [] 2511 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE 2512 # Look node group to look up the ipolicy 2513 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2514
2515 - def DeclareLocks(self, level):
2516 if level == locking.LEVEL_NODEGROUP: 2517 assert not self.needed_locks[locking.LEVEL_NODEGROUP] 2518 # Acquire locks for the instance's nodegroups optimistically. Needs 2519 # to be verified in CheckPrereq 2520 self.needed_locks[locking.LEVEL_NODEGROUP] = \ 2521 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid) 2522 elif level == locking.LEVEL_NODE: 2523 self._LockInstancesNodes() 2524 if self.op.disk_template and self.op.remote_node: 2525 (self.op.remote_node_uuid, self.op.remote_node) = \ 2526 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid, 2527 self.op.remote_node) 2528 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid) 2529 elif level == locking.LEVEL_NODE_RES and self.op.disk_template: 2530 # Copy node locks 2531 self.needed_locks[locking.LEVEL_NODE_RES] = \ 2532 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2533
2534 - def BuildHooksEnv(self):
2535 """Build hooks env. 2536 2537 This runs on the master, primary and secondaries. 2538 2539 """ 2540 args = {} 2541 if constants.BE_MINMEM in self.be_new: 2542 args["minmem"] = self.be_new[constants.BE_MINMEM] 2543 if constants.BE_MAXMEM in self.be_new: 2544 args["maxmem"] = self.be_new[constants.BE_MAXMEM] 2545 if constants.BE_VCPUS in self.be_new: 2546 args["vcpus"] = self.be_new[constants.BE_VCPUS] 2547 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk 2548 # information at all. 2549 2550 if self._new_nics is not None: 2551 nics = [] 2552 2553 for nic in self._new_nics: 2554 n = copy.deepcopy(nic) 2555 nicparams = self.cluster.SimpleFillNIC(n.nicparams) 2556 n.nicparams = nicparams 2557 nics.append(NICToTuple(self, n)) 2558 2559 args["nics"] = nics 2560 2561 env = BuildInstanceHookEnvByObject(self, self.instance, override=args) 2562 if self.op.disk_template: 2563 env["NEW_DISK_TEMPLATE"] = self.op.disk_template 2564 if self.op.runtime_mem: 2565 env["RUNTIME_MEMORY"] = self.op.runtime_mem 2566 2567 return env
2568
2569 - def BuildHooksNodes(self):
2570 """Build hooks nodes. 2571 2572 """ 2573 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) 2574 return (nl, nl)
2575
2576 - def _PrepareNicModification(self, params, private, old_ip, old_net_uuid, 2577 old_params, cluster, pnode_uuid):
2578 2579 update_params_dict = dict([(key, params[key]) 2580 for key in constants.NICS_PARAMETERS 2581 if key in params]) 2582 2583 req_link = update_params_dict.get(constants.NIC_LINK, None) 2584 req_mode = update_params_dict.get(constants.NIC_MODE, None) 2585 2586 new_net_uuid = None 2587 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid) 2588 if new_net_uuid_or_name: 2589 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name) 2590 new_net_obj = self.cfg.GetNetwork(new_net_uuid) 2591 2592 if old_net_uuid: 2593 old_net_obj = self.cfg.GetNetwork(old_net_uuid) 2594 2595 if new_net_uuid: 2596 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid) 2597 if not netparams: 2598 raise errors.OpPrereqError("No netparams found for the network" 2599 " %s, probably not connected" % 2600 new_net_obj.name, errors.ECODE_INVAL) 2601 new_params = dict(netparams) 2602 else: 2603 new_params = GetUpdatedParams(old_params, update_params_dict) 2604 2605 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES) 2606 2607 new_filled_params = cluster.SimpleFillNIC(new_params) 2608 objects.NIC.CheckParameterSyntax(new_filled_params) 2609 2610 new_mode = new_filled_params[constants.NIC_MODE] 2611 if new_mode == constants.NIC_MODE_BRIDGED: 2612 bridge = new_filled_params[constants.NIC_LINK] 2613 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg 2614 if msg: 2615 msg = "Error checking bridges on node '%s': %s" % \ 2616 (self.cfg.GetNodeName(pnode_uuid), msg) 2617 if self.op.force: 2618 self.warn.append(msg) 2619 else: 2620 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON) 2621 2622 elif new_mode == constants.NIC_MODE_ROUTED: 2623 ip = params.get(constants.INIC_IP, old_ip) 2624 if ip is None: 2625 raise errors.OpPrereqError("Cannot set the NIC IP address to None" 2626 " on a routed NIC", errors.ECODE_INVAL) 2627 2628 elif new_mode == constants.NIC_MODE_OVS: 2629 # TODO: check OVS link 2630 self.LogInfo("OVS links are currently not checked for correctness") 2631 2632 if constants.INIC_MAC in params: 2633 mac = params[constants.INIC_MAC] 2634 if mac is None: 2635 raise errors.OpPrereqError("Cannot unset the NIC MAC address", 2636 errors.ECODE_INVAL) 2637 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 2638 # otherwise generate the MAC address 2639 params[constants.INIC_MAC] = \ 2640 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId()) 2641 else: 2642 # or validate/reserve the current one 2643 try: 2644 self.cfg.ReserveMAC(mac, self.proc.GetECId()) 2645 except errors.ReservationError: 2646 raise errors.OpPrereqError("MAC address '%s' already in use" 2647 " in cluster" % mac, 2648 errors.ECODE_NOTUNIQUE) 2649 elif new_net_uuid != old_net_uuid: 2650 2651 def get_net_prefix(net_uuid): 2652 mac_prefix = None 2653 if net_uuid: 2654 nobj = self.cfg.GetNetwork(net_uuid) 2655 mac_prefix = nobj.mac_prefix 2656 2657 return mac_prefix
2658 2659 new_prefix = get_net_prefix(new_net_uuid) 2660 old_prefix = get_net_prefix(old_net_uuid) 2661 if old_prefix != new_prefix: 2662 params[constants.INIC_MAC] = \ 2663 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId()) 2664 2665 # if there is a change in (ip, network) tuple 2666 new_ip = params.get(constants.INIC_IP, old_ip) 2667 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid): 2668 if new_ip: 2669 # if IP is pool then require a network and generate one IP 2670 if new_ip.lower() == constants.NIC_IP_POOL: 2671 if new_net_uuid: 2672 try: 2673 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId()) 2674 except errors.ReservationError: 2675 raise errors.OpPrereqError("Unable to get a free IP" 2676 " from the address pool", 2677 errors.ECODE_STATE) 2678 self.LogInfo("Chose IP %s from network %s", 2679 new_ip, 2680 new_net_obj.name) 2681 params[constants.INIC_IP] = new_ip 2682 else: 2683 raise errors.OpPrereqError("ip=pool, but no network found", 2684 errors.ECODE_INVAL) 2685 # Reserve new IP if in the new network if any 2686 elif new_net_uuid: 2687 try: 2688 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(), 2689 check=self.op.conflicts_check) 2690 self.LogInfo("Reserving IP %s in network %s", 2691 new_ip, new_net_obj.name) 2692 except errors.ReservationError: 2693 raise errors.OpPrereqError("IP %s not available in network %s" % 2694 (new_ip, new_net_obj.name), 2695 errors.ECODE_NOTUNIQUE) 2696 # new network is None so check if new IP is a conflicting IP 2697 elif self.op.conflicts_check: 2698 _CheckForConflictingIp(self, new_ip, pnode_uuid) 2699 2700 # release old IP if old network is not None 2701 if old_ip and old_net_uuid: 2702 try: 2703 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId()) 2704 except errors.AddressPoolError: 2705 logging.warning("Release IP %s not contained in network %s", 2706 old_ip, old_net_obj.name) 2707 2708 # there are no changes in (ip, network) tuple and old network is not None 2709 elif (old_net_uuid is not None and 2710 (req_link is not None or req_mode is not None)): 2711 raise errors.OpPrereqError("Not allowed to change link or mode of" 2712 " a NIC that is connected to a network", 2713 errors.ECODE_INVAL) 2714 2715 private.params = new_params 2716 private.filled = new_filled_params
2717
2718 - def _PreCheckDiskTemplate(self, pnode_info):
2719 """CheckPrereq checks related to a new disk template.""" 2720 # Arguments are passed to avoid configuration lookups 2721 pnode_uuid = self.instance.primary_node 2722 if self.instance.disk_template == self.op.disk_template: 2723 raise errors.OpPrereqError("Instance already has disk template %s" % 2724 self.instance.disk_template, 2725 errors.ECODE_INVAL) 2726 2727 if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template): 2728 raise errors.OpPrereqError("Disk template '%s' is not enabled for this" 2729 " cluster." % self.op.disk_template) 2730 2731 if (self.instance.disk_template, 2732 self.op.disk_template) not in self._DISK_CONVERSIONS: 2733 raise errors.OpPrereqError("Unsupported disk template conversion from" 2734 " %s to %s" % (self.instance.disk_template, 2735 self.op.disk_template), 2736 errors.ECODE_INVAL) 2737 CheckInstanceState(self, self.instance, INSTANCE_DOWN, 2738 msg="cannot change disk template") 2739 if self.op.disk_template in constants.DTS_INT_MIRROR: 2740 if self.op.remote_node_uuid == pnode_uuid: 2741 raise errors.OpPrereqError("Given new secondary node %s is the same" 2742 " as the primary node of the instance" % 2743 self.op.remote_node, errors.ECODE_STATE) 2744 CheckNodeOnline(self, self.op.remote_node_uuid) 2745 CheckNodeNotDrained(self, self.op.remote_node_uuid) 2746 # FIXME: here we assume that the old instance type is DT_PLAIN 2747 assert self.instance.disk_template == constants.DT_PLAIN 2748 disks = [{constants.IDISK_SIZE: d.size, 2749 constants.IDISK_VG: d.logical_id[0]} 2750 for d in self.instance.disks] 2751 required = ComputeDiskSizePerVG(self.op.disk_template, disks) 2752 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required) 2753 2754 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid) 2755 snode_group = self.cfg.GetNodeGroup(snode_info.group) 2756 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster, 2757 snode_group) 2758 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg, 2759 ignore=self.op.ignore_ipolicy) 2760 if pnode_info.group != snode_info.group: 2761 self.LogWarning("The primary and secondary nodes are in two" 2762 " different node groups; the disk parameters" 2763 " from the first disk's node group will be" 2764 " used") 2765 2766 if not self.op.disk_template in constants.DTS_EXCL_STORAGE: 2767 # Make sure none of the nodes require exclusive storage 2768 nodes = [pnode_info] 2769 if self.op.disk_template in constants.DTS_INT_MIRROR: 2770 assert snode_info 2771 nodes.append(snode_info) 2772 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n) 2773 if compat.any(map(has_es, nodes)): 2774 errmsg = ("Cannot convert disk template from %s to %s when exclusive" 2775 " storage is enabled" % (self.instance.disk_template, 2776 self.op.disk_template)) 2777 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2778
2779 - def _PreCheckDisks(self, ispec):
2780 """CheckPrereq checks related to disk changes. 2781 2782 @type ispec: dict 2783 @param ispec: instance specs to be updated with the new disks 2784 2785 """ 2786 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance) 2787 2788 excl_stor = compat.any( 2789 rpc.GetExclusiveStorageForNodes(self.cfg, 2790 self.instance.all_nodes).values() 2791 ) 2792 2793 # Check disk modifications. This is done here and not in CheckArguments 2794 # (as with NICs), because we need to know the instance's disk template 2795 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor) 2796 if self.instance.disk_template == constants.DT_EXT: 2797 self._CheckMods("disk", self.op.disks, {}, ver_fn) 2798 else: 2799 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES, 2800 ver_fn) 2801 2802 self.diskmod = _PrepareContainerMods(self.op.disks, None) 2803 2804 # Check the validity of the `provider' parameter 2805 if self.instance.disk_template in constants.DT_EXT: 2806 for mod in self.diskmod: 2807 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None) 2808 if mod[0] == constants.DDM_ADD: 2809 if ext_provider is None: 2810 raise errors.OpPrereqError("Instance template is '%s' and parameter" 2811 " '%s' missing, during disk add" % 2812 (constants.DT_EXT, 2813 constants.IDISK_PROVIDER), 2814 errors.ECODE_NOENT) 2815 elif mod[0] == constants.DDM_MODIFY: 2816 if ext_provider: 2817 raise errors.OpPrereqError("Parameter '%s' is invalid during disk" 2818 " modification" % 2819 constants.IDISK_PROVIDER, 2820 errors.ECODE_INVAL) 2821 else: 2822 for mod in self.diskmod: 2823 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None) 2824 if ext_provider is not None: 2825 raise errors.OpPrereqError("Parameter '%s' is only valid for" 2826 " instances of type '%s'" % 2827 (constants.IDISK_PROVIDER, 2828 constants.DT_EXT), 2829 errors.ECODE_INVAL) 2830 2831 if not self.op.wait_for_sync and not self.instance.disks_active: 2832 for mod in self.diskmod: 2833 if mod[0] == constants.DDM_ADD: 2834 raise errors.OpPrereqError("Can't add a disk to an instance with" 2835 " deactivated disks and" 2836 " --no-wait-for-sync given.", 2837 errors.ECODE_INVAL) 2838 2839 if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS: 2840 raise errors.OpPrereqError("Disk operations not supported for" 2841 " diskless instances", errors.ECODE_INVAL) 2842 2843 def _PrepareDiskMod(_, disk, params, __): 2844 disk.name = params.get(constants.IDISK_NAME, None)
2845 2846 # Verify disk changes (operating on a copy) 2847 disks = copy.deepcopy(self.instance.disks) 2848 _ApplyContainerMods("disk", disks, None, self.diskmod, None, 2849 _PrepareDiskMod, None) 2850 utils.ValidateDeviceNames("disk", disks) 2851 if len(disks) > constants.MAX_DISKS: 2852 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add" 2853 " more" % constants.MAX_DISKS, 2854 errors.ECODE_STATE) 2855 disk_sizes = [disk.size for disk in self.instance.disks] 2856 disk_sizes.extend(params["size"] for (op, idx, params, private) in 2857 self.diskmod if op == constants.DDM_ADD) 2858 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes) 2859 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes 2860 2861 # either --online or --offline was passed 2862 if self.op.offline is not None: 2863 if self.op.offline: 2864 msg = "can't change to offline without being down first" 2865 else: 2866 msg = "can't change to online (down) without being offline first" 2867 CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE, 2868 msg=msg) 2869
2870 - def CheckPrereq(self):
2871 """Check prerequisites. 2872 2873 This only checks the instance list against the existing names. 2874 2875 """ 2876 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE) 2877 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 2878 self.cluster = self.cfg.GetClusterInfo() 2879 cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor] 2880 2881 assert self.instance is not None, \ 2882 "Cannot retrieve locked instance %s" % self.op.instance_name 2883 2884 pnode_uuid = self.instance.primary_node 2885 2886 self.warn = [] 2887 2888 if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and 2889 not self.op.force): 2890 # verify that the instance is not up 2891 instance_info = self.rpc.call_instance_info( 2892 pnode_uuid, self.instance.name, self.instance.hypervisor, 2893 cluster_hvparams) 2894 if instance_info.fail_msg: 2895 self.warn.append("Can't get instance runtime information: %s" % 2896 instance_info.fail_msg) 2897 elif instance_info.payload: 2898 raise errors.OpPrereqError("Instance is still running on %s" % 2899 self.cfg.GetNodeName(pnode_uuid), 2900 errors.ECODE_STATE) 2901 2902 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE) 2903 node_uuids = list(self.instance.all_nodes) 2904 pnode_info = self.cfg.GetNodeInfo(pnode_uuid) 2905 2906 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups) 2907 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP) 2908 group_info = self.cfg.GetNodeGroup(pnode_info.group) 2909 2910 # dictionary with instance information after the modification 2911 ispec = {} 2912 2913 if self.op.hotplug or self.op.hotplug_if_possible: 2914 result = self.rpc.call_hotplug_supported(self.instance.primary_node, 2915 self.instance) 2916 if result.fail_msg: 2917 if self.op.hotplug: 2918 result.Raise("Hotplug is not possible: %s" % result.fail_msg, 2919 prereq=True) 2920 else: 2921 self.LogWarning(result.fail_msg) 2922 self.op.hotplug = False 2923 self.LogInfo("Modification will take place without hotplugging.") 2924 else: 2925 self.op.hotplug = True 2926 2927 # Prepare NIC modifications 2928 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate) 2929 2930 # OS change 2931 if self.op.os_name and not self.op.force: 2932 CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name, 2933 self.op.force_variant) 2934 instance_os = self.op.os_name 2935 else: 2936 instance_os = self.instance.os 2937 2938 assert not (self.op.disk_template and self.op.disks), \ 2939 "Can't modify disk template and apply disk changes at the same time" 2940 2941 if self.op.disk_template: 2942 self._PreCheckDiskTemplate(pnode_info) 2943 2944 self._PreCheckDisks(ispec) 2945 2946 # hvparams processing 2947 if self.op.hvparams: 2948 hv_type = self.instance.hypervisor 2949 i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams) 2950 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES) 2951 hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict) 2952 2953 # local check 2954 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new) 2955 CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new) 2956 self.hv_proposed = self.hv_new = hv_new # the new actual values 2957 self.hv_inst = i_hvdict # the new dict (without defaults) 2958 else: 2959 self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor, 2960 self.instance.os, 2961 self.instance.hvparams) 2962 self.hv_new = self.hv_inst = {} 2963 2964 # beparams processing 2965 if self.op.beparams: 2966 i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams, 2967 use_none=True) 2968 objects.UpgradeBeParams(i_bedict) 2969 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES) 2970 be_new = self.cluster.SimpleFillBE(i_bedict) 2971 self.be_proposed = self.be_new = be_new # the new actual values 2972 self.be_inst = i_bedict # the new dict (without defaults) 2973 else: 2974 self.be_new = self.be_inst = {} 2975 self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams) 2976 be_old = self.cluster.FillBE(self.instance) 2977 2978 # CPU param validation -- checking every time a parameter is 2979 # changed to cover all cases where either CPU mask or vcpus have 2980 # changed 2981 if (constants.BE_VCPUS in self.be_proposed and 2982 constants.HV_CPU_MASK in self.hv_proposed): 2983 cpu_list = \ 2984 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK]) 2985 # Verify mask is consistent with number of vCPUs. Can skip this 2986 # test if only 1 entry in the CPU mask, which means same mask 2987 # is applied to all vCPUs. 2988 if (len(cpu_list) > 1 and 2989 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]): 2990 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the" 2991 " CPU mask [%s]" % 2992 (self.be_proposed[constants.BE_VCPUS], 2993 self.hv_proposed[constants.HV_CPU_MASK]), 2994 errors.ECODE_INVAL) 2995 2996 # Only perform this test if a new CPU mask is given 2997 if constants.HV_CPU_MASK in self.hv_new: 2998 # Calculate the largest CPU number requested 2999 max_requested_cpu = max(map(max, cpu_list)) 3000 # Check that all of the instance's nodes have enough physical CPUs to 3001 # satisfy the requested CPU mask 3002 hvspecs = [(self.instance.hypervisor, 3003 self.cfg.GetClusterInfo() 3004 .hvparams[self.instance.hypervisor])] 3005 _CheckNodesPhysicalCPUs(self, self.instance.all_nodes, 3006 max_requested_cpu + 1, 3007 hvspecs) 3008 3009 # osparams processing 3010 if self.op.osparams: 3011 i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams) 3012 CheckOSParams(self, True, node_uuids, instance_os, i_osdict) 3013 self.os_inst = i_osdict # the new dict (without defaults) 3014 else: 3015 self.os_inst = {} 3016 3017 #TODO(dynmem): do the appropriate check involving MINMEM 3018 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and 3019 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]): 3020 mem_check_list = [pnode_uuid] 3021 if be_new[constants.BE_AUTO_BALANCE]: 3022 # either we changed auto_balance to yes or it was from before 3023 mem_check_list.extend(self.instance.secondary_nodes) 3024 instance_info = self.rpc.call_instance_info( 3025 pnode_uuid, self.instance.name, self.instance.hypervisor, 3026 cluster_hvparams) 3027 hvspecs = [(self.instance.hypervisor, 3028 cluster_hvparams)] 3029 nodeinfo = self.rpc.call_node_info(mem_check_list, None, 3030 hvspecs) 3031 pninfo = nodeinfo[pnode_uuid] 3032 msg = pninfo.fail_msg 3033 if msg: 3034 # Assume the primary node is unreachable and go ahead 3035 self.warn.append("Can't get info from primary node %s: %s" % 3036 (self.cfg.GetNodeName(pnode_uuid), msg)) 3037 else: 3038 (_, _, (pnhvinfo, )) = pninfo.payload 3039 if not isinstance(pnhvinfo.get("memory_free", None), int): 3040 self.warn.append("Node data from primary node %s doesn't contain" 3041 " free memory information" % 3042 self.cfg.GetNodeName(pnode_uuid)) 3043 elif instance_info.fail_msg: 3044 self.warn.append("Can't get instance runtime information: %s" % 3045 instance_info.fail_msg) 3046 else: 3047 if instance_info.payload: 3048 current_mem = int(instance_info.payload["memory"]) 3049 else: 3050 # Assume instance not running 3051 # (there is a slight race condition here, but it's not very 3052 # probable, and we have no other way to check) 3053 # TODO: Describe race condition 3054 current_mem = 0 3055 #TODO(dynmem): do the appropriate check involving MINMEM 3056 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem - 3057 pnhvinfo["memory_free"]) 3058 if miss_mem > 0: 3059 raise errors.OpPrereqError("This change will prevent the instance" 3060 " from starting, due to %d MB of memory" 3061 " missing on its primary node" % 3062 miss_mem, errors.ECODE_NORES) 3063 3064 if be_new[constants.BE_AUTO_BALANCE]: 3065 for node_uuid, nres in nodeinfo.items(): 3066 if node_uuid not in self.instance.secondary_nodes: 3067 continue 3068 nres.Raise("Can't get info from secondary node %s" % 3069 self.cfg.GetNodeName(node_uuid), prereq=True, 3070 ecode=errors.ECODE_STATE) 3071 (_, _, (nhvinfo, )) = nres.payload 3072 if not isinstance(nhvinfo.get("memory_free", None), int): 3073 raise errors.OpPrereqError("Secondary node %s didn't return free" 3074 " memory information" % 3075 self.cfg.GetNodeName(node_uuid), 3076 errors.ECODE_STATE) 3077 #TODO(dynmem): do the appropriate check involving MINMEM 3078 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]: 3079 raise errors.OpPrereqError("This change will prevent the instance" 3080 " from failover to its secondary node" 3081 " %s, due to not enough memory" % 3082 self.cfg.GetNodeName(node_uuid), 3083 errors.ECODE_STATE) 3084 3085 if self.op.runtime_mem: 3086 remote_info = self.rpc.call_instance_info( 3087 self.instance.primary_node, self.instance.name, 3088 self.instance.hypervisor, 3089 cluster_hvparams) 3090 remote_info.Raise("Error checking node %s" % 3091 self.cfg.GetNodeName(self.instance.primary_node), 3092 prereq=True) 3093 if not remote_info.payload: # not running already 3094 raise errors.OpPrereqError("Instance %s is not running" % 3095 self.instance.name, errors.ECODE_STATE) 3096 3097 current_memory = remote_info.payload["memory"] 3098 if (not self.op.force and 3099 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or 3100 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])): 3101 raise errors.OpPrereqError("Instance %s must have memory between %d" 3102 " and %d MB of memory unless --force is" 3103 " given" % 3104 (self.instance.name, 3105 self.be_proposed[constants.BE_MINMEM], 3106 self.be_proposed[constants.BE_MAXMEM]), 3107 errors.ECODE_INVAL) 3108 3109 delta = self.op.runtime_mem - current_memory 3110 if delta > 0: 3111 CheckNodeFreeMemory( 3112 self, self.instance.primary_node, 3113 "ballooning memory for instance %s" % self.instance.name, delta, 3114 self.instance.hypervisor, 3115 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor]) 3116 3117 # make self.cluster visible in the functions below 3118 cluster = self.cluster 3119 3120 def _PrepareNicCreate(_, params, private): 3121 self._PrepareNicModification(params, private, None, None, 3122 {}, cluster, pnode_uuid) 3123 return (None, None)
3124 3125 def _PrepareNicMod(_, nic, params, private): 3126 self._PrepareNicModification(params, private, nic.ip, nic.network, 3127 nic.nicparams, cluster, pnode_uuid) 3128 return None 3129 3130 def _PrepareNicRemove(_, params, __): 3131 ip = params.ip 3132 net = params.network 3133 if net is not None and ip is not None: 3134 self.cfg.ReleaseIp(net, ip, self.proc.GetECId()) 3135 3136 # Verify NIC changes (operating on copy) 3137 nics = self.instance.nics[:] 3138 _ApplyContainerMods("NIC", nics, None, self.nicmod, 3139 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove) 3140 if len(nics) > constants.MAX_NICS: 3141 raise errors.OpPrereqError("Instance has too many network interfaces" 3142 " (%d), cannot add more" % constants.MAX_NICS, 3143 errors.ECODE_STATE) 3144 3145 # Pre-compute NIC changes (necessary to use result in hooks) 3146 self._nic_chgdesc = [] 3147 if self.nicmod: 3148 # Operate on copies as this is still in prereq 3149 nics = [nic.Copy() for nic in self.instance.nics] 3150 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod, 3151 self._CreateNewNic, self._ApplyNicMods, 3152 self._RemoveNic) 3153 # Verify that NIC names are unique and valid 3154 utils.ValidateDeviceNames("NIC", nics) 3155 self._new_nics = nics 3156 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics) 3157 else: 3158 self._new_nics = None 3159 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics) 3160 3161 if not self.op.ignore_ipolicy: 3162 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster, 3163 group_info) 3164 3165 # Fill ispec with backend parameters 3166 ispec[constants.ISPEC_SPINDLE_USE] = \ 3167 self.be_new.get(constants.BE_SPINDLE_USE, None) 3168 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS, 3169 None) 3170 3171 # Copy ispec to verify parameters with min/max values separately 3172 if self.op.disk_template: 3173 new_disk_template = self.op.disk_template 3174 else: 3175 new_disk_template = self.instance.disk_template 3176 ispec_max = ispec.copy() 3177 ispec_max[constants.ISPEC_MEM_SIZE] = \ 3178 self.be_new.get(constants.BE_MAXMEM, None) 3179 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max, 3180 new_disk_template) 3181 ispec_min = ispec.copy() 3182 ispec_min[constants.ISPEC_MEM_SIZE] = \ 3183 self.be_new.get(constants.BE_MINMEM, None) 3184 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min, 3185 new_disk_template) 3186 3187 if (res_max or res_min): 3188 # FIXME: Improve error message by including information about whether 3189 # the upper or lower limit of the parameter fails the ipolicy. 3190 msg = ("Instance allocation to group %s (%s) violates policy: %s" % 3191 (group_info, group_info.name, 3192 utils.CommaJoin(set(res_max + res_min)))) 3193 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 3194
3195 - def _ConvertPlainToDrbd(self, feedback_fn):
3196 """Converts an instance from plain to drbd. 3197 3198 """ 3199 feedback_fn("Converting template to drbd") 3200 pnode_uuid = self.instance.primary_node 3201 snode_uuid = self.op.remote_node_uuid 3202 3203 assert self.instance.disk_template == constants.DT_PLAIN 3204 3205 # create a fake disk info for _GenerateDiskTemplate 3206 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode, 3207 constants.IDISK_VG: d.logical_id[0], 3208 constants.IDISK_NAME: d.name} 3209 for d in self.instance.disks] 3210 new_disks = GenerateDiskTemplate(self, self.op.disk_template, 3211 self.instance.uuid, pnode_uuid, 3212 [snode_uuid], disk_info, None, None, 0, 3213 feedback_fn, self.diskparams) 3214 anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams) 3215 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid) 3216 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid) 3217 info = GetInstanceInfoText(self.instance) 3218 feedback_fn("Creating additional volumes...") 3219 # first, create the missing data and meta devices 3220 for disk in anno_disks: 3221 # unfortunately this is... not too nice 3222 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1], 3223 info, True, p_excl_stor) 3224 for child in disk.children: 3225 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True, 3226 s_excl_stor) 3227 # at this stage, all new LVs have been created, we can rename the 3228 # old ones 3229 feedback_fn("Renaming original volumes...") 3230 rename_list = [(o, n.children[0].logical_id) 3231 for (o, n) in zip(self.instance.disks, new_disks)] 3232 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list) 3233 result.Raise("Failed to rename original LVs") 3234 3235 feedback_fn("Initializing DRBD devices...") 3236 # all child devices are in place, we can now create the DRBD devices 3237 try: 3238 for disk in anno_disks: 3239 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor), 3240 (snode_uuid, s_excl_stor)]: 3241 f_create = node_uuid == pnode_uuid 3242 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info, 3243 f_create, excl_stor) 3244 except errors.GenericError, e: 3245 feedback_fn("Initializing of DRBD devices failed;" 3246 " renaming back original volumes...") 3247 rename_back_list = [(n.children[0], o.logical_id) 3248 for (n, o) in zip(new_disks, self.instance.disks)] 3249 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list) 3250 result.Raise("Failed to rename LVs back after error %s" % str(e)) 3251 raise 3252 3253 # at this point, the instance has been modified 3254 self.instance.disk_template = constants.DT_DRBD8 3255 self.instance.disks = new_disks 3256 self.cfg.Update(self.instance, feedback_fn) 3257 3258 # Release node locks while waiting for sync 3259 ReleaseLocks(self, locking.LEVEL_NODE) 3260 3261 # disks are created, waiting for sync 3262 disk_abort = not WaitForSync(self, self.instance, 3263 oneshot=not self.op.wait_for_sync) 3264 if disk_abort: 3265 raise errors.OpExecError("There are some degraded disks for" 3266 " this instance, please cleanup manually")
3267 3268 # Node resource locks will be released by caller 3269
3270 - def _ConvertDrbdToPlain(self, feedback_fn):
3271 """Converts an instance from drbd to plain. 3272 3273 """ 3274 assert self.instance.disk_template == constants.DT_DRBD8 3275 assert len(self.instance.secondary_nodes) == 1 or not self.instance.disks 3276 3277 pnode_uuid = self.instance.primary_node 3278 3279 # it will not be possible to calculate the snode_uuid later 3280 snode_uuid = None 3281 if self.instance.secondary_nodes: 3282 snode_uuid = self.instance.secondary_nodes[0] 3283 3284 feedback_fn("Converting template to plain") 3285 3286 old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg) 3287 new_disks = [d.children[0] for d in self.instance.disks] 3288 3289 # copy over size, mode and name 3290 for parent, child in zip(old_disks, new_disks): 3291 child.size = parent.size 3292 child.mode = parent.mode 3293 child.name = parent.name 3294 3295 # this is a DRBD disk, return its port to the pool 3296 # NOTE: this must be done right before the call to cfg.Update! 3297 for disk in old_disks: 3298 tcp_port = disk.logical_id[2] 3299 self.cfg.AddTcpUdpPort(tcp_port) 3300 3301 # update instance structure 3302 self.instance.disks = new_disks 3303 self.instance.disk_template = constants.DT_PLAIN 3304 _UpdateIvNames(0, self.instance.disks) 3305 self.cfg.Update(self.instance, feedback_fn) 3306 3307 # Release locks in case removing disks takes a while 3308 ReleaseLocks(self, locking.LEVEL_NODE) 3309 3310 feedback_fn("Removing volumes on the secondary node...") 3311 for disk in old_disks: 3312 result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance)) 3313 result.Warn("Could not remove block device %s on node %s," 3314 " continuing anyway" % 3315 (disk.iv_name, self.cfg.GetNodeName(snode_uuid)), 3316 self.LogWarning) 3317 3318 feedback_fn("Removing unneeded volumes on the primary node...") 3319 for idx, disk in enumerate(old_disks): 3320 meta = disk.children[1] 3321 result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance)) 3322 result.Warn("Could not remove metadata for disk %d on node %s," 3323 " continuing anyway" % 3324 (idx, self.cfg.GetNodeName(pnode_uuid)), 3325 self.LogWarning)
3326
3327 - def _HotplugDevice(self, action, dev_type, device, extra, seq):
3328 self.LogInfo("Trying to hotplug device...") 3329 msg = "hotplug:" 3330 result = self.rpc.call_hotplug_device(self.instance.primary_node, 3331 self.instance, action, dev_type, 3332 (device, self.instance), 3333 extra, seq) 3334 if result.fail_msg: 3335 self.LogWarning("Could not hotplug device: %s" % result.fail_msg) 3336 self.LogInfo("Continuing execution..") 3337 msg += "failed" 3338 else: 3339 self.LogInfo("Hotplug done.") 3340 msg += "done" 3341 return msg
3342
3343 - def _CreateNewDisk(self, idx, params, _):
3344 """Creates a new disk. 3345 3346 """ 3347 # add a new disk 3348 if self.instance.disk_template in constants.DTS_FILEBASED: 3349 (file_driver, file_path) = self.instance.disks[0].logical_id 3350 file_path = os.path.dirname(file_path) 3351 else: 3352 file_driver = file_path = None 3353 3354 disk = \ 3355 GenerateDiskTemplate(self, self.instance.disk_template, 3356 self.instance.uuid, self.instance.primary_node, 3357 self.instance.secondary_nodes, [params], file_path, 3358 file_driver, idx, self.Log, self.diskparams)[0] 3359 3360 new_disks = CreateDisks(self, self.instance, disks=[disk]) 3361 3362 if self.cluster.prealloc_wipe_disks: 3363 # Wipe new disk 3364 WipeOrCleanupDisks(self, self.instance, 3365 disks=[(idx, disk, 0)], 3366 cleanup=new_disks) 3367 3368 changes = [ 3369 ("disk/%d" % idx, 3370 "add:size=%s,mode=%s" % (disk.size, disk.mode)), 3371 ] 3372 if self.op.hotplug: 3373 result = self.rpc.call_blockdev_assemble(self.instance.primary_node, 3374 (disk, self.instance), 3375 self.instance, True, idx) 3376 if result.fail_msg: 3377 changes.append(("disk/%d" % idx, "assemble:failed")) 3378 self.LogWarning("Can't assemble newly created disk %d: %s", 3379 idx, result.fail_msg) 3380 else: 3381 _, link_name, uri = result.payload 3382 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD, 3383 constants.HOTPLUG_TARGET_DISK, 3384 disk, (link_name, uri), idx) 3385 changes.append(("disk/%d" % idx, msg)) 3386 3387 return (disk, changes)
3388
3389 - def _PostAddDisk(self, _, disk):
3390 if not WaitForSync(self, self.instance, disks=[disk], 3391 oneshot=not self.op.wait_for_sync): 3392 raise errors.OpExecError("Failed to sync disks of %s" % 3393 self.instance.name) 3394 3395 # the disk is active at this point, so deactivate it if the instance disks 3396 # are supposed to be inactive 3397 if not self.instance.disks_active: 3398 ShutdownInstanceDisks(self, self.instance, disks=[disk])
3399
3400 - def _ModifyDisk(self, idx, disk, params, _):
3401 """Modifies a disk. 3402 3403 """ 3404 changes = [] 3405 if constants.IDISK_MODE in params: 3406 disk.mode = params.get(constants.IDISK_MODE) 3407 changes.append(("disk.mode/%d" % idx, disk.mode)) 3408 3409 if constants.IDISK_NAME in params: 3410 disk.name = params.get(constants.IDISK_NAME) 3411 changes.append(("disk.name/%d" % idx, disk.name)) 3412 3413 # Modify arbitrary params in case instance template is ext 3414 for key, value in params.iteritems(): 3415 if (key not in constants.MODIFIABLE_IDISK_PARAMS and 3416 self.instance.disk_template == constants.DT_EXT): 3417 # stolen from GetUpdatedParams: default means reset/delete 3418 if value.lower() == constants.VALUE_DEFAULT: 3419 try: 3420 del disk.params[key] 3421 except KeyError: 3422 pass 3423 else: 3424 disk.params[key] = value 3425 changes.append(("disk.params:%s/%d" % (key, idx), value)) 3426 3427 return changes
3428
3429 - def _RemoveDisk(self, idx, root, _):
3430 """Removes a disk. 3431 3432 """ 3433 hotmsg = "" 3434 if self.op.hotplug: 3435 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE, 3436 constants.HOTPLUG_TARGET_DISK, 3437 root, None, idx) 3438 ShutdownInstanceDisks(self, self.instance, [root]) 3439 3440 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg) 3441 for node_uuid, disk in anno_disk.ComputeNodeTree( 3442 self.instance.primary_node): 3443 msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \ 3444 .fail_msg 3445 if msg: 3446 self.LogWarning("Could not remove disk/%d on node '%s': %s," 3447 " continuing anyway", idx, 3448 self.cfg.GetNodeName(node_uuid), msg) 3449 3450 # if this is a DRBD disk, return its port to the pool 3451 if root.dev_type in constants.DTS_DRBD: 3452 self.cfg.AddTcpUdpPort(root.logical_id[2]) 3453 3454 return hotmsg
3455
3456 - def _CreateNewNic(self, idx, params, private):
3457 """Creates data structure for a new network interface. 3458 3459 """ 3460 mac = params[constants.INIC_MAC] 3461 ip = params.get(constants.INIC_IP, None) 3462 net = params.get(constants.INIC_NETWORK, None) 3463 name = params.get(constants.INIC_NAME, None) 3464 net_uuid = self.cfg.LookupNetwork(net) 3465 #TODO: not private.filled?? can a nic have no nicparams?? 3466 nicparams = private.filled 3467 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name, 3468 nicparams=nicparams) 3469 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId()) 3470 3471 changes = [ 3472 ("nic.%d" % idx, 3473 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" % 3474 (mac, ip, private.filled[constants.NIC_MODE], 3475 private.filled[constants.NIC_LINK], net)), 3476 ] 3477 3478 if self.op.hotplug: 3479 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD, 3480 constants.HOTPLUG_TARGET_NIC, 3481 nobj, None, idx) 3482 changes.append(("nic.%d" % idx, msg)) 3483 3484 return (nobj, changes)
3485
3486 - def _ApplyNicMods(self, idx, nic, params, private):
3487 """Modifies a network interface. 3488 3489 """ 3490 changes = [] 3491 3492 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]: 3493 if key in params: 3494 changes.append(("nic.%s/%d" % (key, idx), params[key])) 3495 setattr(nic, key, params[key]) 3496 3497 new_net = params.get(constants.INIC_NETWORK, nic.network) 3498 new_net_uuid = self.cfg.LookupNetwork(new_net) 3499 if new_net_uuid != nic.network: 3500 changes.append(("nic.network/%d" % idx, new_net)) 3501 nic.network = new_net_uuid 3502 3503 if private.filled: 3504 nic.nicparams = private.filled 3505 3506 for (key, val) in nic.nicparams.items(): 3507 changes.append(("nic.%s/%d" % (key, idx), val)) 3508 3509 if self.op.hotplug: 3510 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY, 3511 constants.HOTPLUG_TARGET_NIC, 3512 nic, None, idx) 3513 changes.append(("nic/%d" % idx, msg)) 3514 3515 return changes
3516
3517 - def _RemoveNic(self, idx, nic, _):
3518 if self.op.hotplug: 3519 return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE, 3520 constants.HOTPLUG_TARGET_NIC, 3521 nic, None, idx)
3522
3523 - def Exec(self, feedback_fn):
3524 """Modifies an instance. 3525 3526 All parameters take effect only at the next restart of the instance. 3527 3528 """ 3529 # Process here the warnings from CheckPrereq, as we don't have a 3530 # feedback_fn there. 3531 # TODO: Replace with self.LogWarning 3532 for warn in self.warn: 3533 feedback_fn("WARNING: %s" % warn) 3534 3535 assert ((self.op.disk_template is None) ^ 3536 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \ 3537 "Not owning any node resource locks" 3538 3539 result = [] 3540 3541 # New primary node 3542 if self.op.pnode_uuid: 3543 self.instance.primary_node = self.op.pnode_uuid 3544 3545 # runtime memory 3546 if self.op.runtime_mem: 3547 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node, 3548 self.instance, 3549 self.op.runtime_mem) 3550 rpcres.Raise("Cannot modify instance runtime memory") 3551 result.append(("runtime_memory", self.op.runtime_mem)) 3552 3553 # Apply disk changes 3554 _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod, 3555 self._CreateNewDisk, self._ModifyDisk, 3556 self._RemoveDisk, post_add_fn=self._PostAddDisk) 3557 _UpdateIvNames(0, self.instance.disks) 3558 3559 if self.op.disk_template: 3560 if __debug__: 3561 check_nodes = set(self.instance.all_nodes) 3562 if self.op.remote_node_uuid: 3563 check_nodes.add(self.op.remote_node_uuid) 3564 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]: 3565 owned = self.owned_locks(level) 3566 assert not (check_nodes - owned), \ 3567 ("Not owning the correct locks, owning %r, expected at least %r" % 3568 (owned, check_nodes)) 3569 3570 r_shut = ShutdownInstanceDisks(self, self.instance) 3571 if not r_shut: 3572 raise errors.OpExecError("Cannot shutdown instance disks, unable to" 3573 " proceed with disk template conversion") 3574 mode = (self.instance.disk_template, self.op.disk_template) 3575 try: 3576 self._DISK_CONVERSIONS[mode](self, feedback_fn) 3577 except: 3578 self.cfg.ReleaseDRBDMinors(self.instance.uuid) 3579 raise 3580 result.append(("disk_template", self.op.disk_template)) 3581 3582 assert self.instance.disk_template == self.op.disk_template, \ 3583 ("Expected disk template '%s', found '%s'" % 3584 (self.op.disk_template, self.instance.disk_template)) 3585 3586 # Release node and resource locks if there are any (they might already have 3587 # been released during disk conversion) 3588 ReleaseLocks(self, locking.LEVEL_NODE) 3589 ReleaseLocks(self, locking.LEVEL_NODE_RES) 3590 3591 # Apply NIC changes 3592 if self._new_nics is not None: 3593 self.instance.nics = self._new_nics 3594 result.extend(self._nic_chgdesc) 3595 3596 # hvparams changes 3597 if self.op.hvparams: 3598 self.instance.hvparams = self.hv_inst 3599 for key, val in self.op.hvparams.iteritems(): 3600 result.append(("hv/%s" % key, val)) 3601 3602 # beparams changes 3603 if self.op.beparams: 3604 self.instance.beparams = self.be_inst 3605 for key, val in self.op.beparams.iteritems(): 3606 result.append(("be/%s" % key, val)) 3607 3608 # OS change 3609 if self.op.os_name: 3610 self.instance.os = self.op.os_name 3611 3612 # osparams changes 3613 if self.op.osparams: 3614 self.instance.osparams = self.os_inst 3615 for key, val in self.op.osparams.iteritems(): 3616 result.append(("os/%s" % key, val)) 3617 3618 if self.op.offline is None: 3619 # Ignore 3620 pass 3621 elif self.op.offline: 3622 # Mark instance as offline 3623 self.cfg.MarkInstanceOffline(self.instance.uuid) 3624 result.append(("admin_state", constants.ADMINST_OFFLINE)) 3625 else: 3626 # Mark instance as online, but stopped 3627 self.cfg.MarkInstanceDown(self.instance.uuid) 3628 result.append(("admin_state", constants.ADMINST_DOWN)) 3629 3630 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId()) 3631 3632 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or 3633 self.owned_locks(locking.LEVEL_NODE)), \ 3634 "All node locks should have been released by now" 3635 3636 return result
3637 3638 _DISK_CONVERSIONS = { 3639 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd, 3640 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain, 3641 } 3642
3643 3644 -class LUInstanceChangeGroup(LogicalUnit):
3645 HPATH = "instance-change-group" 3646 HTYPE = constants.HTYPE_INSTANCE 3647 REQ_BGL = False 3648
3649 - def ExpandNames(self):
3650 self.share_locks = ShareAll() 3651 3652 self.needed_locks = { 3653 locking.LEVEL_NODEGROUP: [], 3654 locking.LEVEL_NODE: [], 3655 locking.LEVEL_NODE_ALLOC: locking.ALL_SET, 3656 } 3657 3658 self._ExpandAndLockInstance() 3659 3660 if self.op.target_groups: 3661 self.req_target_uuids = map(self.cfg.LookupNodeGroup, 3662 self.op.target_groups) 3663 else: 3664 self.req_target_uuids = None 3665 3666 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
3667
3668 - def DeclareLocks(self, level):
3669 if level == locking.LEVEL_NODEGROUP: 3670 assert not self.needed_locks[locking.LEVEL_NODEGROUP] 3671 3672 if self.req_target_uuids: 3673 lock_groups = set(self.req_target_uuids) 3674 3675 # Lock all groups used by instance optimistically; this requires going 3676 # via the node before it's locked, requiring verification later on 3677 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid) 3678 lock_groups.update(instance_groups) 3679 else: 3680 # No target groups, need to lock all of them 3681 lock_groups = locking.ALL_SET 3682 3683 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups 3684 3685 elif level == locking.LEVEL_NODE: 3686 if self.req_target_uuids: 3687 # Lock all nodes used by instances 3688 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND 3689 self._LockInstancesNodes() 3690 3691 # Lock all nodes in all potential target groups 3692 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) - 3693 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)) 3694 member_nodes = [node_uuid 3695 for group in lock_groups 3696 for node_uuid in self.cfg.GetNodeGroup(group).members] 3697 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes) 3698 else: 3699 # Lock all nodes as all groups are potential targets 3700 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3701
3702 - def CheckPrereq(self):
3703 owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) 3704 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) 3705 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE)) 3706 3707 assert (self.req_target_uuids is None or 3708 owned_groups.issuperset(self.req_target_uuids)) 3709 assert owned_instance_names == set([self.op.instance_name]) 3710 3711 # Get instance information 3712 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 3713 3714 # Check if node groups for locked instance are still correct 3715 assert owned_nodes.issuperset(self.instance.all_nodes), \ 3716 ("Instance %s's nodes changed while we kept the lock" % 3717 self.op.instance_name) 3718 3719 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid, 3720 owned_groups) 3721 3722 if self.req_target_uuids: 3723 # User requested specific target groups 3724 self.target_uuids = frozenset(self.req_target_uuids) 3725 else: 3726 # All groups except those used by the instance are potential targets 3727 self.target_uuids = owned_groups - inst_groups 3728 3729 conflicting_groups = self.target_uuids & inst_groups 3730 if conflicting_groups: 3731 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are" 3732 " used by the instance '%s'" % 3733 (utils.CommaJoin(conflicting_groups), 3734 self.op.instance_name), 3735 errors.ECODE_INVAL) 3736 3737 if not self.target_uuids: 3738 raise errors.OpPrereqError("There are no possible target groups", 3739 errors.ECODE_INVAL)
3740
3741 - def BuildHooksEnv(self):
3742 """Build hooks env. 3743 3744 """ 3745 assert self.target_uuids 3746 3747 env = { 3748 "TARGET_GROUPS": " ".join(self.target_uuids), 3749 } 3750 3751 env.update(BuildInstanceHookEnvByObject(self, self.instance)) 3752 3753 return env
3754
3755 - def BuildHooksNodes(self):
3756 """Build hooks nodes. 3757 3758 """ 3759 mn = self.cfg.GetMasterNode() 3760 return ([mn], [mn])
3761
3762 - def Exec(self, feedback_fn):
3763 instances = list(self.owned_locks(locking.LEVEL_INSTANCE)) 3764 3765 assert instances == [self.op.instance_name], "Instance not locked" 3766 3767 req = iallocator.IAReqGroupChange(instances=instances, 3768 target_groups=list(self.target_uuids)) 3769 ial = iallocator.IAllocator(self.cfg, self.rpc, req) 3770 3771 ial.Run(self.op.iallocator) 3772 3773 if not ial.success: 3774 raise errors.OpPrereqError("Can't compute solution for changing group of" 3775 " instance '%s' using iallocator '%s': %s" % 3776 (self.op.instance_name, self.op.iallocator, 3777 ial.info), errors.ECODE_NORES) 3778 3779 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False) 3780 3781 self.LogInfo("Iallocator returned %s job(s) for changing group of" 3782 " instance '%s'", len(jobs), self.op.instance_name) 3783 3784 return ResultWithJobs(jobs)
3785