Package ganeti :: Package cmdlib :: Module instance
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cmdlib.instance

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Logical units dealing with instances.""" 
  32   
  33  import OpenSSL 
  34  import copy 
  35  import logging 
  36  import os 
  37   
  38  from ganeti import compat 
  39  from ganeti import constants 
  40  from ganeti import errors 
  41  from ganeti import ht 
  42  from ganeti import hypervisor 
  43  from ganeti import locking 
  44  from ganeti.masterd import iallocator 
  45  from ganeti import masterd 
  46  from ganeti import netutils 
  47  from ganeti import objects 
  48  from ganeti import pathutils 
  49  from ganeti import serializer 
  50  import ganeti.rpc.node as rpc 
  51  from ganeti import utils 
  52  from ganeti.utils import retry 
  53   
  54  from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs 
  55   
  56  from ganeti.cmdlib.common import INSTANCE_DOWN, \ 
  57    INSTANCE_NOT_RUNNING, CheckNodeOnline, \ 
  58    ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \ 
  59    LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \ 
  60    IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, CheckOSImage, \ 
  61    AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \ 
  62    ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \ 
  63    CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination, \ 
  64    DetermineImageSize, IsInstanceRunning 
  65  from ganeti.cmdlib.instance_storage import CreateDisks, \ 
  66    CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, ImageDisks, \ 
  67    WaitForSync, IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, \ 
  68    ComputeDisks, ComputeDisksInfo, CheckRADOSFreeSpace, ComputeDiskSizePerVG, \ 
  69    GenerateDiskTemplate, StartInstanceDisks, ShutdownInstanceDisks, \ 
  70    AssembleInstanceDisks, CheckSpindlesExclusiveStorage, TemporaryDisk, \ 
  71    CalculateFileStorageDir 
  72  from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \ 
  73    GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \ 
  74    NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \ 
  75    ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \ 
  76    GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \ 
  77    CheckInstanceBridgesExist, CheckNicsBridgesExist, UpdateMetadata, \ 
  78    CheckCompressionTool, CheckInstanceExistence 
  79  import ganeti.masterd.instance 
  80   
  81   
  82  #: Type description for changes as returned by L{_ApplyContainerMods}'s 
  83  #: callbacks 
  84  _TApplyContModsCbChanges = \ 
  85    ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([ 
  86      ht.TNonEmptyString, 
  87      ht.TAny, 
  88      ]))) 
89 90 91 -def _CheckHostnameSane(lu, name):
92 """Ensures that a given hostname resolves to a 'sane' name. 93 94 The given name is required to be a prefix of the resolved hostname, 95 to prevent accidental mismatches. 96 97 @param lu: the logical unit on behalf of which we're checking 98 @param name: the name we should resolve and check 99 @return: the resolved hostname object 100 101 """ 102 hostname = netutils.GetHostname(name=name) 103 if hostname.name != name: 104 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name) 105 if not utils.MatchNameComponent(name, [hostname.name]): 106 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the" 107 " same as given hostname '%s'") % 108 (hostname.name, name), errors.ECODE_INVAL) 109 return hostname
110
111 112 -def _CheckOpportunisticLocking(op):
113 """Generate error if opportunistic locking is not possible. 114 115 """ 116 if op.opportunistic_locking and not op.iallocator: 117 raise errors.OpPrereqError("Opportunistic locking is only available in" 118 " combination with an instance allocator", 119 errors.ECODE_INVAL)
120
121 122 -def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
123 """Wrapper around IAReqInstanceAlloc. 124 125 @param op: The instance opcode 126 @param disks: The computed disks 127 @param nics: The computed nics 128 @param beparams: The full filled beparams 129 @param node_name_whitelist: List of nodes which should appear as online to the 130 allocator (unless the node is already marked offline) 131 132 @returns: A filled L{iallocator.IAReqInstanceAlloc} 133 134 """ 135 spindle_use = beparams[constants.BE_SPINDLE_USE] 136 return iallocator.IAReqInstanceAlloc(name=op.instance_name, 137 disk_template=op.disk_template, 138 group_name=op.group_name, 139 tags=op.tags, 140 os=op.os_type, 141 vcpus=beparams[constants.BE_VCPUS], 142 memory=beparams[constants.BE_MAXMEM], 143 spindle_use=spindle_use, 144 disks=disks, 145 nics=[n.ToDict() for n in nics], 146 hypervisor=op.hypervisor, 147 node_whitelist=node_name_whitelist)
148
149 150 -def _ComputeFullBeParams(op, cluster):
151 """Computes the full beparams. 152 153 @param op: The instance opcode 154 @param cluster: The cluster config object 155 156 @return: The fully filled beparams 157 158 """ 159 default_beparams = cluster.beparams[constants.PP_DEFAULT] 160 for param, value in op.beparams.iteritems(): 161 if value == constants.VALUE_AUTO: 162 op.beparams[param] = default_beparams[param] 163 objects.UpgradeBeParams(op.beparams) 164 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES) 165 return cluster.SimpleFillBE(op.beparams)
166
167 168 -def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
169 """Computes the nics. 170 171 @param op: The instance opcode 172 @param cluster: Cluster configuration object 173 @param default_ip: The default ip to assign 174 @param cfg: An instance of the configuration object 175 @param ec_id: Execution context ID 176 177 @returns: The build up nics 178 179 """ 180 nics = [] 181 for nic in op.nics: 182 nic_mode_req = nic.get(constants.INIC_MODE, None) 183 nic_mode = nic_mode_req 184 if nic_mode is None or nic_mode == constants.VALUE_AUTO: 185 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE] 186 187 net = nic.get(constants.INIC_NETWORK, None) 188 link = nic.get(constants.NIC_LINK, None) 189 ip = nic.get(constants.INIC_IP, None) 190 vlan = nic.get(constants.INIC_VLAN, None) 191 192 if net is None or net.lower() == constants.VALUE_NONE: 193 net = None 194 else: 195 if nic_mode_req is not None or link is not None: 196 raise errors.OpPrereqError("If network is given, no mode or link" 197 " is allowed to be passed", 198 errors.ECODE_INVAL) 199 200 # ip validity checks 201 if ip is None or ip.lower() == constants.VALUE_NONE: 202 nic_ip = None 203 elif ip.lower() == constants.VALUE_AUTO: 204 if not op.name_check: 205 raise errors.OpPrereqError("IP address set to auto but name checks" 206 " have been skipped", 207 errors.ECODE_INVAL) 208 nic_ip = default_ip 209 else: 210 # We defer pool operations until later, so that the iallocator has 211 # filled in the instance's node(s) dimara 212 if ip.lower() == constants.NIC_IP_POOL: 213 if net is None: 214 raise errors.OpPrereqError("if ip=pool, parameter network" 215 " must be passed too", 216 errors.ECODE_INVAL) 217 218 elif not netutils.IPAddress.IsValid(ip): 219 raise errors.OpPrereqError("Invalid IP address '%s'" % ip, 220 errors.ECODE_INVAL) 221 222 nic_ip = ip 223 224 # TODO: check the ip address for uniqueness 225 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip and not net: 226 raise errors.OpPrereqError("Routed nic mode requires an ip address" 227 " if not attached to a network", 228 errors.ECODE_INVAL) 229 230 # MAC address verification 231 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO) 232 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 233 mac = utils.NormalizeAndValidateMac(mac) 234 235 try: 236 # TODO: We need to factor this out 237 cfg.ReserveMAC(mac, ec_id) 238 except errors.ReservationError: 239 raise errors.OpPrereqError("MAC address %s already in use" 240 " in cluster" % mac, 241 errors.ECODE_NOTUNIQUE) 242 243 # Build nic parameters 244 nicparams = {} 245 if nic_mode_req: 246 nicparams[constants.NIC_MODE] = nic_mode 247 if link: 248 nicparams[constants.NIC_LINK] = link 249 if vlan: 250 nicparams[constants.NIC_VLAN] = vlan 251 252 check_params = cluster.SimpleFillNIC(nicparams) 253 objects.NIC.CheckParameterSyntax(check_params) 254 net_uuid = cfg.LookupNetwork(net) 255 name = nic.get(constants.INIC_NAME, None) 256 if name is not None and name.lower() == constants.VALUE_NONE: 257 name = None 258 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name, 259 network=net_uuid, nicparams=nicparams) 260 nic_obj.uuid = cfg.GenerateUniqueID(ec_id) 261 nics.append(nic_obj) 262 263 return nics
264
265 266 -def _CheckForConflictingIp(lu, ip, node_uuid):
267 """In case of conflicting IP address raise error. 268 269 @type ip: string 270 @param ip: IP address 271 @type node_uuid: string 272 @param node_uuid: node UUID 273 274 """ 275 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid) 276 if conf_net is not None: 277 raise errors.OpPrereqError(("The requested IP address (%s) belongs to" 278 " network %s, but the target NIC does not." % 279 (ip, conf_net)), 280 errors.ECODE_STATE) 281 282 return (None, None)
283
284 285 -def _ComputeIPolicyInstanceSpecViolation( 286 ipolicy, instance_spec, disk_template, 287 _compute_fn=ComputeIPolicySpecViolation):
288 """Compute if instance specs meets the specs of ipolicy. 289 290 @type ipolicy: dict 291 @param ipolicy: The ipolicy to verify against 292 @param instance_spec: dict 293 @param instance_spec: The instance spec to verify 294 @type disk_template: string 295 @param disk_template: the disk template of the instance 296 @param _compute_fn: The function to verify ipolicy (unittest only) 297 @see: L{ComputeIPolicySpecViolation} 298 299 """ 300 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None) 301 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None) 302 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0) 303 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, []) 304 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0) 305 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None) 306 307 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count, 308 disk_sizes, spindle_use, disk_template)
309
310 311 -def _ComputeInstanceCommunicationNIC(instance_name):
312 """Compute the name of the instance NIC used by instance 313 communication. 314 315 With instance communication, a new NIC is added to the instance. 316 This NIC has a special name that identities it as being part of 317 instance communication, and not just a normal NIC. This function 318 generates the name of the NIC based on a prefix and the instance 319 name 320 321 @type instance_name: string 322 @param instance_name: name of the instance the NIC belongs to 323 324 @rtype: string 325 @return: name of the NIC 326 327 """ 328 return constants.INSTANCE_COMMUNICATION_NIC_PREFIX + instance_name
329
330 331 -class LUInstanceCreate(LogicalUnit):
332 """Create an instance. 333 334 """ 335 HPATH = "instance-add" 336 HTYPE = constants.HTYPE_INSTANCE 337 REQ_BGL = False 338
339 - def _CheckDiskTemplateValid(self):
340 """Checks validity of disk template. 341 342 """ 343 cluster = self.cfg.GetClusterInfo() 344 if self.op.disk_template is None: 345 # FIXME: It would be better to take the default disk template from the 346 # ipolicy, but for the ipolicy we need the primary node, which we get from 347 # the iallocator, which wants the disk template as input. To solve this 348 # chicken-and-egg problem, it should be possible to specify just a node 349 # group from the iallocator and take the ipolicy from that. 350 self.op.disk_template = cluster.enabled_disk_templates[0] 351 CheckDiskTemplateEnabled(cluster, self.op.disk_template)
352
353 - def _CheckDiskArguments(self):
354 """Checks validity of disk-related arguments. 355 356 """ 357 # check that disk's names are unique and valid 358 utils.ValidateDeviceNames("disk", self.op.disks) 359 360 self._CheckDiskTemplateValid() 361 362 # check disks. parameter names and consistent adopt/no-adopt strategy 363 has_adopt = has_no_adopt = False 364 for disk in self.op.disks: 365 if self.op.disk_template != constants.DT_EXT: 366 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES) 367 if constants.IDISK_ADOPT in disk: 368 has_adopt = True 369 else: 370 has_no_adopt = True 371 if has_adopt and has_no_adopt: 372 raise errors.OpPrereqError("Either all disks are adopted or none is", 373 errors.ECODE_INVAL) 374 if has_adopt: 375 if self.op.disk_template not in constants.DTS_MAY_ADOPT: 376 raise errors.OpPrereqError("Disk adoption is not supported for the" 377 " '%s' disk template" % 378 self.op.disk_template, 379 errors.ECODE_INVAL) 380 if self.op.iallocator is not None: 381 raise errors.OpPrereqError("Disk adoption not allowed with an" 382 " iallocator script", errors.ECODE_INVAL) 383 if self.op.mode == constants.INSTANCE_IMPORT: 384 raise errors.OpPrereqError("Disk adoption not allowed for" 385 " instance import", errors.ECODE_INVAL) 386 else: 387 if self.op.disk_template in constants.DTS_MUST_ADOPT: 388 raise errors.OpPrereqError("Disk template %s requires disk adoption," 389 " but no 'adopt' parameter given" % 390 self.op.disk_template, 391 errors.ECODE_INVAL) 392 393 self.adopt_disks = has_adopt
394
395 - def _CheckVLANArguments(self):
396 """ Check validity of VLANs if given 397 398 """ 399 for nic in self.op.nics: 400 vlan = nic.get(constants.INIC_VLAN, None) 401 if vlan: 402 if vlan[0] == ".": 403 # vlan starting with dot means single untagged vlan, 404 # might be followed by trunk (:) 405 if not vlan[1:].isdigit(): 406 vlanlist = vlan[1:].split(':') 407 for vl in vlanlist: 408 if not vl.isdigit(): 409 raise errors.OpPrereqError("Specified VLAN parameter is " 410 "invalid : %s" % vlan, 411 errors.ECODE_INVAL) 412 elif vlan[0] == ":": 413 # Trunk - tagged only 414 vlanlist = vlan[1:].split(':') 415 for vl in vlanlist: 416 if not vl.isdigit(): 417 raise errors.OpPrereqError("Specified VLAN parameter is invalid" 418 " : %s" % vlan, errors.ECODE_INVAL) 419 elif vlan.isdigit(): 420 # This is the simplest case. No dots, only single digit 421 # -> Create untagged access port, dot needs to be added 422 nic[constants.INIC_VLAN] = "." + vlan 423 else: 424 raise errors.OpPrereqError("Specified VLAN parameter is invalid" 425 " : %s" % vlan, errors.ECODE_INVAL)
426
427 - def CheckArguments(self):
428 """Check arguments. 429 430 """ 431 # do not require name_check to ease forward/backward compatibility 432 # for tools 433 if self.op.no_install and self.op.start: 434 self.LogInfo("No-installation mode selected, disabling startup") 435 self.op.start = False 436 # validate/normalize the instance name 437 self.op.instance_name = \ 438 netutils.Hostname.GetNormalizedName(self.op.instance_name) 439 440 if self.op.ip_check and not self.op.name_check: 441 # TODO: make the ip check more flexible and not depend on the name check 442 raise errors.OpPrereqError("Cannot do IP address check without a name" 443 " check", errors.ECODE_INVAL) 444 445 # instance name verification 446 if self.op.name_check: 447 self.hostname = _CheckHostnameSane(self, self.op.instance_name) 448 self.op.instance_name = self.hostname.name 449 # used in CheckPrereq for ip ping check 450 self.check_ip = self.hostname.ip 451 else: 452 self.check_ip = None 453 454 # add NIC for instance communication 455 if self.op.instance_communication: 456 nic_name = _ComputeInstanceCommunicationNIC(self.op.instance_name) 457 458 for nic in self.op.nics: 459 if nic.get(constants.INIC_NAME, None) == nic_name: 460 break 461 else: 462 self.op.nics.append({constants.INIC_NAME: nic_name, 463 constants.INIC_MAC: constants.VALUE_GENERATE, 464 constants.INIC_IP: constants.NIC_IP_POOL, 465 constants.INIC_NETWORK: 466 self.cfg.GetInstanceCommunicationNetwork()}) 467 468 # timeouts for unsafe OS installs 469 if self.op.helper_startup_timeout is None: 470 self.op.helper_startup_timeout = constants.HELPER_VM_STARTUP 471 472 if self.op.helper_shutdown_timeout is None: 473 self.op.helper_shutdown_timeout = constants.HELPER_VM_SHUTDOWN 474 475 # check nics' parameter names 476 for nic in self.op.nics: 477 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES) 478 # check that NIC's parameters names are unique and valid 479 utils.ValidateDeviceNames("NIC", self.op.nics) 480 481 self._CheckVLANArguments() 482 483 self._CheckDiskArguments() 484 assert self.op.disk_template is not None 485 486 # file storage checks 487 if (self.op.file_driver and 488 not self.op.file_driver in constants.FILE_DRIVER): 489 raise errors.OpPrereqError("Invalid file driver name '%s'" % 490 self.op.file_driver, errors.ECODE_INVAL) 491 492 # set default file_driver if unset and required 493 if (not self.op.file_driver and 494 self.op.disk_template in constants.DTS_FILEBASED): 495 self.op.file_driver = constants.FD_DEFAULT 496 497 ### Node/iallocator related checks 498 CheckIAllocatorOrNode(self, "iallocator", "pnode") 499 500 if self.op.pnode is not None: 501 if self.op.disk_template in constants.DTS_INT_MIRROR: 502 if self.op.snode is None: 503 raise errors.OpPrereqError("The networked disk templates need" 504 " a mirror node", errors.ECODE_INVAL) 505 elif self.op.snode: 506 self.LogWarning("Secondary node will be ignored on non-mirrored disk" 507 " template") 508 self.op.snode = None 509 510 _CheckOpportunisticLocking(self.op) 511 512 if self.op.mode == constants.INSTANCE_IMPORT: 513 # On import force_variant must be True, because if we forced it at 514 # initial install, our only chance when importing it back is that it 515 # works again! 516 self.op.force_variant = True 517 518 if self.op.no_install: 519 self.LogInfo("No-installation mode has no effect during import") 520 521 if objects.GetOSImage(self.op.osparams): 522 self.LogInfo("OS image has no effect during import") 523 elif self.op.mode == constants.INSTANCE_CREATE: 524 os_image = CheckOSImage(self.op) 525 526 if self.op.os_type is None and os_image is None: 527 raise errors.OpPrereqError("No guest OS or OS image specified", 528 errors.ECODE_INVAL) 529 530 if self.op.os_type is not None \ 531 and self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os: 532 raise errors.OpPrereqError("Guest OS '%s' is not allowed for" 533 " installation" % self.op.os_type, 534 errors.ECODE_STATE) 535 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT: 536 if objects.GetOSImage(self.op.osparams): 537 self.LogInfo("OS image has no effect during import") 538 539 self._cds = GetClusterDomainSecret() 540 541 # Check handshake to ensure both clusters have the same domain secret 542 src_handshake = self.op.source_handshake 543 if not src_handshake: 544 raise errors.OpPrereqError("Missing source handshake", 545 errors.ECODE_INVAL) 546 547 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds, 548 src_handshake) 549 if errmsg: 550 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg, 551 errors.ECODE_INVAL) 552 553 # Load and check source CA 554 self.source_x509_ca_pem = self.op.source_x509_ca 555 if not self.source_x509_ca_pem: 556 raise errors.OpPrereqError("Missing source X509 CA", 557 errors.ECODE_INVAL) 558 559 try: 560 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem, 561 self._cds) 562 except OpenSSL.crypto.Error, err: 563 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" % 564 (err, ), errors.ECODE_INVAL) 565 566 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None) 567 if errcode is not None: 568 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ), 569 errors.ECODE_INVAL) 570 571 self.source_x509_ca = cert 572 573 src_instance_name = self.op.source_instance_name 574 if not src_instance_name: 575 raise errors.OpPrereqError("Missing source instance name", 576 errors.ECODE_INVAL) 577 578 self.source_instance_name = \ 579 netutils.GetHostname(name=src_instance_name).name 580 581 else: 582 raise errors.OpPrereqError("Invalid instance creation mode %r" % 583 self.op.mode, errors.ECODE_INVAL)
584
585 - def ExpandNames(self):
586 """ExpandNames for CreateInstance. 587 588 Figure out the right locks for instance creation. 589 590 """ 591 self.needed_locks = {} 592 593 # this is just a preventive check, but someone might still add this 594 # instance in the meantime, and creation will fail at lock-add time 595 CheckInstanceExistence(self, self.op.instance_name) 596 597 self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name 598 599 if self.op.iallocator: 600 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by 601 # specifying a group on instance creation and then selecting nodes from 602 # that group 603 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET 604 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET 605 606 if self.op.opportunistic_locking: 607 self.opportunistic_locks[locking.LEVEL_NODE] = True 608 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True 609 if self.op.disk_template == constants.DT_DRBD8: 610 self.opportunistic_locks_count[locking.LEVEL_NODE] = 2 611 self.opportunistic_locks_count[locking.LEVEL_NODE_RES] = 2 612 else: 613 (self.op.pnode_uuid, self.op.pnode) = \ 614 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode) 615 nodelist = [self.op.pnode_uuid] 616 if self.op.snode is not None: 617 (self.op.snode_uuid, self.op.snode) = \ 618 ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode) 619 nodelist.append(self.op.snode_uuid) 620 self.needed_locks[locking.LEVEL_NODE] = nodelist 621 622 # in case of import lock the source node too 623 if self.op.mode == constants.INSTANCE_IMPORT: 624 src_node = self.op.src_node 625 src_path = self.op.src_path 626 627 if src_path is None: 628 self.op.src_path = src_path = self.op.instance_name 629 630 if src_node is None: 631 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET 632 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET 633 self.op.src_node = None 634 if os.path.isabs(src_path): 635 raise errors.OpPrereqError("Importing an instance from a path" 636 " requires a source node option", 637 errors.ECODE_INVAL) 638 else: 639 (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \ 640 ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node) 641 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET: 642 self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid) 643 if not os.path.isabs(src_path): 644 self.op.src_path = \ 645 utils.PathJoin(pathutils.EXPORT_DIR, src_path) 646 647 self.needed_locks[locking.LEVEL_NODE_RES] = \ 648 CopyLockList(self.needed_locks[locking.LEVEL_NODE]) 649 650 # Optimistically acquire shared group locks (we're reading the 651 # configuration). We can't just call GetInstanceNodeGroups, because the 652 # instance doesn't exist yet. Therefore we lock all node groups of all 653 # nodes we have. 654 if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET: 655 # In the case we lock all nodes for opportunistic allocation, we have no 656 # choice than to lock all groups, because they're allocated before nodes. 657 # This is sad, but true. At least we release all those we don't need in 658 # CheckPrereq later. 659 self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET 660 else: 661 self.needed_locks[locking.LEVEL_NODEGROUP] = \ 662 list(self.cfg.GetNodeGroupsFromNodes( 663 self.needed_locks[locking.LEVEL_NODE])) 664 self.share_locks[locking.LEVEL_NODEGROUP] = 1
665
666 - def _RunAllocator(self):
667 """Run the allocator based on input opcode. 668 669 """ 670 if self.op.opportunistic_locking: 671 # Only consider nodes for which a lock is held 672 node_name_whitelist = self.cfg.GetNodeNames( 673 set(self.owned_locks(locking.LEVEL_NODE)) & 674 set(self.owned_locks(locking.LEVEL_NODE_RES))) 675 else: 676 node_name_whitelist = None 677 678 req = _CreateInstanceAllocRequest(self.op, self.disks, 679 self.nics, self.be_full, 680 node_name_whitelist) 681 ial = iallocator.IAllocator(self.cfg, self.rpc, req) 682 683 ial.Run(self.op.iallocator) 684 685 if not ial.success: 686 # When opportunistic locks are used only a temporary failure is generated 687 if self.op.opportunistic_locking: 688 ecode = errors.ECODE_TEMP_NORES 689 self.LogInfo("IAllocator '%s' failed on opportunistically acquired" 690 " nodes: %s", self.op.iallocator, ial.info) 691 else: 692 ecode = errors.ECODE_NORES 693 694 raise errors.OpPrereqError("Can't compute nodes using" 695 " iallocator '%s': %s" % 696 (self.op.iallocator, ial.info), 697 ecode) 698 699 (self.op.pnode_uuid, self.op.pnode) = \ 700 ExpandNodeUuidAndName(self.cfg, None, ial.result[0]) 701 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s", 702 self.op.instance_name, self.op.iallocator, 703 utils.CommaJoin(ial.result)) 704 705 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator" 706 707 if req.RequiredNodes() == 2: 708 (self.op.snode_uuid, self.op.snode) = \ 709 ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
710
711 - def BuildHooksEnv(self):
712 """Build hooks env. 713 714 This runs on master, primary and secondary nodes of the instance. 715 716 """ 717 env = { 718 "ADD_MODE": self.op.mode, 719 } 720 if self.op.mode == constants.INSTANCE_IMPORT: 721 env["SRC_NODE"] = self.op.src_node 722 env["SRC_PATH"] = self.op.src_path 723 env["SRC_IMAGES"] = self.src_images 724 725 env.update(BuildInstanceHookEnv( 726 name=self.op.instance_name, 727 primary_node_name=self.op.pnode, 728 secondary_node_names=self.cfg.GetNodeNames(self.secondaries), 729 status=self.op.start, 730 os_type=self.op.os_type, 731 minmem=self.be_full[constants.BE_MINMEM], 732 maxmem=self.be_full[constants.BE_MAXMEM], 733 vcpus=self.be_full[constants.BE_VCPUS], 734 nics=NICListToTuple(self, self.nics), 735 disk_template=self.op.disk_template, 736 # Note that self.disks here is not a list with objects.Disk 737 # but with dicts as returned by ComputeDisks. 738 disks=self.disks, 739 bep=self.be_full, 740 hvp=self.hv_full, 741 hypervisor_name=self.op.hypervisor, 742 tags=self.op.tags, 743 )) 744 745 return env
746
747 - def BuildHooksNodes(self):
748 """Build hooks nodes. 749 750 """ 751 nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries 752 return nl, nl
753
754 - def _ReadExportInfo(self):
755 """Reads the export information from disk. 756 757 It will override the opcode source node and path with the actual 758 information, if these two were not specified before. 759 760 @return: the export information 761 762 """ 763 assert self.op.mode == constants.INSTANCE_IMPORT 764 765 if self.op.src_node_uuid is None: 766 locked_nodes = self.owned_locks(locking.LEVEL_NODE) 767 exp_list = self.rpc.call_export_list(locked_nodes) 768 found = False 769 for node_uuid in exp_list: 770 if exp_list[node_uuid].fail_msg: 771 continue 772 if self.op.src_path in exp_list[node_uuid].payload: 773 found = True 774 self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name 775 self.op.src_node_uuid = node_uuid 776 self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR, 777 self.op.src_path) 778 break 779 if not found: 780 raise errors.OpPrereqError("No export found for relative path %s" % 781 self.op.src_path, errors.ECODE_INVAL) 782 783 CheckNodeOnline(self, self.op.src_node_uuid) 784 result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path) 785 result.Raise("No export or invalid export found in dir %s" % 786 self.op.src_path) 787 788 export_info = objects.SerializableConfigParser.Loads(str(result.payload)) 789 if not export_info.has_section(constants.INISECT_EXP): 790 raise errors.ProgrammerError("Corrupted export config", 791 errors.ECODE_ENVIRON) 792 793 ei_version = export_info.get(constants.INISECT_EXP, "version") 794 if int(ei_version) != constants.EXPORT_VERSION: 795 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" % 796 (ei_version, constants.EXPORT_VERSION), 797 errors.ECODE_ENVIRON) 798 return export_info
799
800 - def _ReadExportParams(self, einfo):
801 """Use export parameters as defaults. 802 803 In case the opcode doesn't specify (as in override) some instance 804 parameters, then try to use them from the export information, if 805 that declares them. 806 807 """ 808 self.op.os_type = einfo.get(constants.INISECT_EXP, "os") 809 810 if not self.op.disks: 811 disks = [] 812 # TODO: import the disk iv_name too 813 for idx in range(constants.MAX_DISKS): 814 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx): 815 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx) 816 disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx) 817 disk = { 818 constants.IDISK_SIZE: disk_sz, 819 constants.IDISK_NAME: disk_name 820 } 821 disks.append(disk) 822 self.op.disks = disks 823 if not disks and self.op.disk_template != constants.DT_DISKLESS: 824 raise errors.OpPrereqError("No disk info specified and the export" 825 " is missing the disk information", 826 errors.ECODE_INVAL) 827 828 if not self.op.nics: 829 nics = [] 830 for idx in range(constants.MAX_NICS): 831 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx): 832 ndict = {} 833 for name in [constants.INIC_IP, 834 constants.INIC_MAC, constants.INIC_NAME]: 835 nic_param_name = "nic%d_%s" % (idx, name) 836 if einfo.has_option(constants.INISECT_INS, nic_param_name): 837 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name)) 838 ndict[name] = v 839 network = einfo.get(constants.INISECT_INS, 840 "nic%d_%s" % (idx, constants.INIC_NETWORK)) 841 # in case network is given link and mode are inherited 842 # from nodegroup's netparams and thus should not be passed here 843 if network: 844 ndict[constants.INIC_NETWORK] = network 845 else: 846 for name in list(constants.NICS_PARAMETERS): 847 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name)) 848 ndict[name] = v 849 nics.append(ndict) 850 else: 851 break 852 self.op.nics = nics 853 854 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"): 855 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split() 856 857 if (self.op.hypervisor is None and 858 einfo.has_option(constants.INISECT_INS, "hypervisor")): 859 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor") 860 861 if einfo.has_section(constants.INISECT_HYP): 862 # use the export parameters but do not override the ones 863 # specified by the user 864 for name, value in einfo.items(constants.INISECT_HYP): 865 if name not in self.op.hvparams: 866 self.op.hvparams[name] = value 867 868 if einfo.has_section(constants.INISECT_BEP): 869 # use the parameters, without overriding 870 for name, value in einfo.items(constants.INISECT_BEP): 871 if name not in self.op.beparams: 872 self.op.beparams[name] = value 873 # Compatibility for the old "memory" be param 874 if name == constants.BE_MEMORY: 875 if constants.BE_MAXMEM not in self.op.beparams: 876 self.op.beparams[constants.BE_MAXMEM] = value 877 if constants.BE_MINMEM not in self.op.beparams: 878 self.op.beparams[constants.BE_MINMEM] = value 879 else: 880 # try to read the parameters old style, from the main section 881 for name in constants.BES_PARAMETERS: 882 if (name not in self.op.beparams and 883 einfo.has_option(constants.INISECT_INS, name)): 884 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name) 885 886 if einfo.has_section(constants.INISECT_OSP): 887 # use the parameters, without overriding 888 for name, value in einfo.items(constants.INISECT_OSP): 889 if name not in self.op.osparams: 890 self.op.osparams[name] = value 891 892 if einfo.has_section(constants.INISECT_OSP_PRIVATE): 893 # use the parameters, without overriding 894 for name, value in einfo.items(constants.INISECT_OSP_PRIVATE): 895 if name not in self.op.osparams_private: 896 self.op.osparams_private[name] = serializer.Private(value, descr=name)
897
898 - def _RevertToDefaults(self, cluster):
899 """Revert the instance parameters to the default values. 900 901 """ 902 # hvparams 903 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {}) 904 for name in self.op.hvparams.keys(): 905 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]: 906 del self.op.hvparams[name] 907 # beparams 908 be_defs = cluster.SimpleFillBE({}) 909 for name in self.op.beparams.keys(): 910 if name in be_defs and be_defs[name] == self.op.beparams[name]: 911 del self.op.beparams[name] 912 # nic params 913 nic_defs = cluster.SimpleFillNIC({}) 914 for nic in self.op.nics: 915 for name in constants.NICS_PARAMETERS: 916 if name in nic and name in nic_defs and nic[name] == nic_defs[name]: 917 del nic[name] 918 # osparams 919 os_defs = cluster.SimpleFillOS(self.op.os_type, {}) 920 for name in self.op.osparams.keys(): 921 if name in os_defs and os_defs[name] == self.op.osparams[name]: 922 del self.op.osparams[name] 923 924 os_defs_ = cluster.SimpleFillOS(self.op.os_type, {}, 925 os_params_private={}) 926 for name in self.op.osparams_private.keys(): 927 if name in os_defs_ and os_defs_[name] == self.op.osparams_private[name]: 928 del self.op.osparams_private[name]
929
930 - def CheckPrereq(self): # pylint: disable=R0914
931 """Check prerequisites. 932 933 """ 934 # Check that the optimistically acquired groups are correct wrt the 935 # acquired nodes 936 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) 937 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE)) 938 cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes)) 939 if not owned_groups.issuperset(cur_groups): 940 raise errors.OpPrereqError("New instance %s's node groups changed since" 941 " locks were acquired, current groups are" 942 " are '%s', owning groups '%s'; retry the" 943 " operation" % 944 (self.op.instance_name, 945 utils.CommaJoin(cur_groups), 946 utils.CommaJoin(owned_groups)), 947 errors.ECODE_STATE) 948 949 self.instance_file_storage_dir = CalculateFileStorageDir(self) 950 951 if self.op.mode == constants.INSTANCE_IMPORT: 952 export_info = self._ReadExportInfo() 953 self._ReadExportParams(export_info) 954 self._old_instance_name = export_info.get(constants.INISECT_INS, "name") 955 else: 956 self._old_instance_name = None 957 958 if (not self.cfg.GetVGName() and 959 self.op.disk_template not in constants.DTS_NOT_LVM): 960 raise errors.OpPrereqError("Cluster does not support lvm-based" 961 " instances", errors.ECODE_STATE) 962 963 if (self.op.hypervisor is None or 964 self.op.hypervisor == constants.VALUE_AUTO): 965 self.op.hypervisor = self.cfg.GetHypervisorType() 966 967 cluster = self.cfg.GetClusterInfo() 968 enabled_hvs = cluster.enabled_hypervisors 969 if self.op.hypervisor not in enabled_hvs: 970 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the" 971 " cluster (%s)" % 972 (self.op.hypervisor, ",".join(enabled_hvs)), 973 errors.ECODE_STATE) 974 975 # Check tag validity 976 for tag in self.op.tags: 977 objects.TaggableObject.ValidateTag(tag) 978 979 # check hypervisor parameter syntax (locally) 980 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES) 981 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, 982 self.op.hvparams) 983 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor) 984 hv_type.CheckParameterSyntax(filled_hvp) 985 self.hv_full = filled_hvp 986 # check that we don't specify global parameters on an instance 987 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor", 988 "instance", "cluster") 989 990 # fill and remember the beparams dict 991 self.be_full = _ComputeFullBeParams(self.op, cluster) 992 993 # build os parameters 994 if self.op.osparams_private is None: 995 self.op.osparams_private = serializer.PrivateDict() 996 if self.op.osparams_secret is None: 997 self.op.osparams_secret = serializer.PrivateDict() 998 999 self.os_full = cluster.SimpleFillOS( 1000 self.op.os_type, 1001 self.op.osparams, 1002 os_params_private=self.op.osparams_private, 1003 os_params_secret=self.op.osparams_secret 1004 ) 1005 1006 # now that hvp/bep are in final format, let's reset to defaults, 1007 # if told to do so 1008 if self.op.identify_defaults: 1009 self._RevertToDefaults(cluster) 1010 1011 # NIC buildup 1012 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg, 1013 self.proc.GetECId()) 1014 1015 # disk checks/pre-build 1016 default_vg = self.cfg.GetVGName() 1017 self.disks = ComputeDisks(self.op.disks, self.op.disk_template, default_vg) 1018 1019 if self.op.mode == constants.INSTANCE_IMPORT: 1020 disk_images = [] 1021 for idx in range(len(self.disks)): 1022 option = "disk%d_dump" % idx 1023 if export_info.has_option(constants.INISECT_INS, option): 1024 # FIXME: are the old os-es, disk sizes, etc. useful? 1025 export_name = export_info.get(constants.INISECT_INS, option) 1026 image = utils.PathJoin(self.op.src_path, export_name) 1027 disk_images.append(image) 1028 else: 1029 disk_images.append(False) 1030 1031 self.src_images = disk_images 1032 1033 if self.op.instance_name == self._old_instance_name: 1034 for idx, nic in enumerate(self.nics): 1035 if nic.mac == constants.VALUE_AUTO: 1036 nic_mac_ini = "nic%d_mac" % idx 1037 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini) 1038 1039 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT 1040 1041 # ip ping checks (we use the same ip that was resolved in ExpandNames) 1042 if self.op.ip_check: 1043 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT): 1044 raise errors.OpPrereqError("IP %s of instance %s already in use" % 1045 (self.check_ip, self.op.instance_name), 1046 errors.ECODE_NOTUNIQUE) 1047 1048 #### mac address generation 1049 # By generating here the mac address both the allocator and the hooks get 1050 # the real final mac address rather than the 'auto' or 'generate' value. 1051 # There is a race condition between the generation and the instance object 1052 # creation, which means that we know the mac is valid now, but we're not 1053 # sure it will be when we actually add the instance. If things go bad 1054 # adding the instance will abort because of a duplicate mac, and the 1055 # creation job will fail. 1056 for nic in self.nics: 1057 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 1058 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId()) 1059 1060 #### allocator run 1061 1062 if self.op.iallocator is not None: 1063 self._RunAllocator() 1064 1065 # Release all unneeded node locks 1066 keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid, 1067 self.op.src_node_uuid]) 1068 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks) 1069 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks) 1070 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC) 1071 # Release all unneeded group locks 1072 ReleaseLocks(self, locking.LEVEL_NODEGROUP, 1073 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks)) 1074 1075 assert (self.owned_locks(locking.LEVEL_NODE) == 1076 self.owned_locks(locking.LEVEL_NODE_RES)), \ 1077 "Node locks differ from node resource locks" 1078 1079 #### node related checks 1080 1081 # check primary node 1082 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid) 1083 assert self.pnode is not None, \ 1084 "Cannot retrieve locked node %s" % self.op.pnode_uuid 1085 if pnode.offline: 1086 raise errors.OpPrereqError("Cannot use offline primary node '%s'" % 1087 pnode.name, errors.ECODE_STATE) 1088 if pnode.drained: 1089 raise errors.OpPrereqError("Cannot use drained primary node '%s'" % 1090 pnode.name, errors.ECODE_STATE) 1091 if not pnode.vm_capable: 1092 raise errors.OpPrereqError("Cannot use non-vm_capable primary node" 1093 " '%s'" % pnode.name, errors.ECODE_STATE) 1094 1095 self.secondaries = [] 1096 1097 # Fill in any IPs from IP pools. This must happen here, because we need to 1098 # know the nic's primary node, as specified by the iallocator 1099 for idx, nic in enumerate(self.nics): 1100 net_uuid = nic.network 1101 if net_uuid is not None: 1102 nobj = self.cfg.GetNetwork(net_uuid) 1103 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid) 1104 if netparams is None: 1105 raise errors.OpPrereqError("No netparams found for network" 1106 " %s. Probably not connected to" 1107 " node's %s nodegroup" % 1108 (nobj.name, self.pnode.name), 1109 errors.ECODE_INVAL) 1110 self.LogInfo("NIC/%d inherits netparams %s" % 1111 (idx, netparams.values())) 1112 nic.nicparams = dict(netparams) 1113 if nic.ip is not None: 1114 if nic.ip.lower() == constants.NIC_IP_POOL: 1115 try: 1116 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId()) 1117 except errors.ReservationError: 1118 raise errors.OpPrereqError("Unable to get a free IP for NIC %d" 1119 " from the address pool" % idx, 1120 errors.ECODE_STATE) 1121 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name) 1122 else: 1123 try: 1124 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(), 1125 check=self.op.conflicts_check) 1126 except errors.ReservationError: 1127 raise errors.OpPrereqError("IP address %s already in use" 1128 " or does not belong to network %s" % 1129 (nic.ip, nobj.name), 1130 errors.ECODE_NOTUNIQUE) 1131 1132 # net is None, ip None or given 1133 elif self.op.conflicts_check: 1134 _CheckForConflictingIp(self, nic.ip, self.pnode.uuid) 1135 1136 # mirror node verification 1137 if self.op.disk_template in constants.DTS_INT_MIRROR: 1138 if self.op.snode_uuid == pnode.uuid: 1139 raise errors.OpPrereqError("The secondary node cannot be the" 1140 " primary node", errors.ECODE_INVAL) 1141 CheckNodeOnline(self, self.op.snode_uuid) 1142 CheckNodeNotDrained(self, self.op.snode_uuid) 1143 CheckNodeVmCapable(self, self.op.snode_uuid) 1144 self.secondaries.append(self.op.snode_uuid) 1145 1146 snode = self.cfg.GetNodeInfo(self.op.snode_uuid) 1147 if pnode.group != snode.group: 1148 self.LogWarning("The primary and secondary nodes are in two" 1149 " different node groups; the disk parameters" 1150 " from the first disk's node group will be" 1151 " used") 1152 1153 nodes = [pnode] 1154 if self.op.disk_template in constants.DTS_INT_MIRROR: 1155 nodes.append(snode) 1156 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n) 1157 excl_stor = compat.any(map(has_es, nodes)) 1158 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE: 1159 raise errors.OpPrereqError("Disk template %s not supported with" 1160 " exclusive storage" % self.op.disk_template, 1161 errors.ECODE_STATE) 1162 for disk in self.disks: 1163 CheckSpindlesExclusiveStorage(disk, excl_stor, True) 1164 1165 node_uuids = [pnode.uuid] + self.secondaries 1166 1167 if not self.adopt_disks: 1168 if self.op.disk_template == constants.DT_RBD: 1169 # _CheckRADOSFreeSpace() is just a placeholder. 1170 # Any function that checks prerequisites can be placed here. 1171 # Check if there is enough space on the RADOS cluster. 1172 CheckRADOSFreeSpace() 1173 elif self.op.disk_template == constants.DT_EXT: 1174 # FIXME: Function that checks prereqs if needed 1175 pass 1176 elif self.op.disk_template in constants.DTS_LVM: 1177 # Check lv size requirements, if not adopting 1178 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks) 1179 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes) 1180 else: 1181 # FIXME: add checks for other, non-adopting, non-lvm disk templates 1182 pass 1183 1184 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data 1185 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG], 1186 disk[constants.IDISK_ADOPT]) 1187 for disk in self.disks]) 1188 if len(all_lvs) != len(self.disks): 1189 raise errors.OpPrereqError("Duplicate volume names given for adoption", 1190 errors.ECODE_INVAL) 1191 for lv_name in all_lvs: 1192 try: 1193 # FIXME: lv_name here is "vg/lv" need to ensure that other calls 1194 # to ReserveLV uses the same syntax 1195 self.cfg.ReserveLV(lv_name, self.proc.GetECId()) 1196 except errors.ReservationError: 1197 raise errors.OpPrereqError("LV named %s used by another instance" % 1198 lv_name, errors.ECODE_NOTUNIQUE) 1199 1200 vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid] 1201 vg_names.Raise("Cannot get VG information from node %s" % pnode.name, 1202 prereq=True) 1203 1204 node_lvs = self.rpc.call_lv_list([pnode.uuid], 1205 vg_names.payload.keys())[pnode.uuid] 1206 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name, 1207 prereq=True) 1208 node_lvs = node_lvs.payload 1209 1210 delta = all_lvs.difference(node_lvs.keys()) 1211 if delta: 1212 raise errors.OpPrereqError("Missing logical volume(s): %s" % 1213 utils.CommaJoin(delta), 1214 errors.ECODE_INVAL) 1215 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]] 1216 if online_lvs: 1217 raise errors.OpPrereqError("Online logical volumes found, cannot" 1218 " adopt: %s" % utils.CommaJoin(online_lvs), 1219 errors.ECODE_STATE) 1220 # update the size of disk based on what is found 1221 for dsk in self.disks: 1222 dsk[constants.IDISK_SIZE] = \ 1223 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG], 1224 dsk[constants.IDISK_ADOPT])][0])) 1225 1226 elif self.op.disk_template == constants.DT_BLOCK: 1227 # Normalize and de-duplicate device paths 1228 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT]) 1229 for disk in self.disks]) 1230 if len(all_disks) != len(self.disks): 1231 raise errors.OpPrereqError("Duplicate disk names given for adoption", 1232 errors.ECODE_INVAL) 1233 baddisks = [d for d in all_disks 1234 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)] 1235 if baddisks: 1236 raise errors.OpPrereqError("Device node(s) %s lie outside %s and" 1237 " cannot be adopted" % 1238 (utils.CommaJoin(baddisks), 1239 constants.ADOPTABLE_BLOCKDEV_ROOT), 1240 errors.ECODE_INVAL) 1241 1242 node_disks = self.rpc.call_bdev_sizes([pnode.uuid], 1243 list(all_disks))[pnode.uuid] 1244 node_disks.Raise("Cannot get block device information from node %s" % 1245 pnode.name, prereq=True) 1246 node_disks = node_disks.payload 1247 delta = all_disks.difference(node_disks.keys()) 1248 if delta: 1249 raise errors.OpPrereqError("Missing block device(s): %s" % 1250 utils.CommaJoin(delta), 1251 errors.ECODE_INVAL) 1252 for dsk in self.disks: 1253 dsk[constants.IDISK_SIZE] = \ 1254 int(float(node_disks[dsk[constants.IDISK_ADOPT]])) 1255 1256 # Check disk access param to be compatible with specified hypervisor 1257 node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid) 1258 node_group = self.cfg.GetNodeGroup(node_info.group) 1259 group_disk_params = self.cfg.GetGroupDiskParams(node_group) 1260 group_access_type = group_disk_params[self.op.disk_template].get( 1261 constants.RBD_ACCESS, constants.DISK_KERNELSPACE 1262 ) 1263 for dsk in self.disks: 1264 access_type = dsk.get(constants.IDISK_ACCESS, group_access_type) 1265 if not IsValidDiskAccessModeCombination(self.op.hypervisor, 1266 self.op.disk_template, 1267 access_type): 1268 raise errors.OpPrereqError("Selected hypervisor (%s) cannot be" 1269 " used with %s disk access param" % 1270 (self.op.hypervisor, access_type), 1271 errors.ECODE_STATE) 1272 1273 # Verify instance specs 1274 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None) 1275 ispec = { 1276 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None), 1277 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None), 1278 constants.ISPEC_DISK_COUNT: len(self.disks), 1279 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE] 1280 for disk in self.disks], 1281 constants.ISPEC_NIC_COUNT: len(self.nics), 1282 constants.ISPEC_SPINDLE_USE: spindle_use, 1283 } 1284 1285 group_info = self.cfg.GetNodeGroup(pnode.group) 1286 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) 1287 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec, 1288 self.op.disk_template) 1289 if not self.op.ignore_ipolicy and res: 1290 msg = ("Instance allocation to group %s (%s) violates policy: %s" % 1291 (pnode.group, group_info.name, utils.CommaJoin(res))) 1292 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 1293 1294 CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams) 1295 1296 CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full, 1297 self.op.force_variant) 1298 1299 CheckNicsBridgesExist(self, self.nics, self.pnode.uuid) 1300 1301 CheckCompressionTool(self, self.op.compress) 1302 1303 #TODO: _CheckExtParams (remotely) 1304 # Check parameters for extstorage 1305 1306 # memory check on primary node 1307 #TODO(dynmem): use MINMEM for checking 1308 if self.op.start: 1309 hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}), 1310 self.op.hvparams) 1311 CheckNodeFreeMemory(self, self.pnode.uuid, 1312 "creating instance %s" % self.op.instance_name, 1313 self.be_full[constants.BE_MAXMEM], 1314 self.op.hypervisor, hvfull) 1315 1316 self.dry_run_result = list(node_uuids)
1317
1318 - def _RemoveDegradedDisks(self, feedback_fn, disk_abort, instance):
1319 """Removes degraded disks and instance. 1320 1321 It optionally checks whether disks are degraded. If the disks are 1322 degraded, they are removed and the instance is also removed from 1323 the configuration. 1324 1325 If L{disk_abort} is True, then the disks are considered degraded 1326 and removed, and the instance is removed from the configuration. 1327 1328 If L{disk_abort} is False, then it first checks whether disks are 1329 degraded and, if so, it removes the disks and the instance is 1330 removed from the configuration. 1331 1332 @type feedback_fn: callable 1333 @param feedback_fn: function used send feedback back to the caller 1334 1335 @type disk_abort: boolean 1336 @param disk_abort: 1337 True if disks are degraded, False to first check if disks are 1338 degraded 1339 @type instance: L{objects.Instance} 1340 @param instance: instance containing the disks to check 1341 1342 @rtype: NoneType 1343 @return: None 1344 @raise errors.OpPrereqError: if disks are degraded 1345 1346 """ 1347 if disk_abort: 1348 pass 1349 elif self.op.wait_for_sync: 1350 disk_abort = not WaitForSync(self, instance) 1351 elif instance.disk_template in constants.DTS_INT_MIRROR: 1352 # make sure the disks are not degraded (still sync-ing is ok) 1353 feedback_fn("* checking mirrors status") 1354 disk_abort = not WaitForSync(self, instance, oneshot=True) 1355 else: 1356 disk_abort = False 1357 1358 if disk_abort: 1359 RemoveDisks(self, instance) 1360 for disk_uuid in instance.disks: 1361 self.cfg.RemoveInstanceDisk(instance.uuid, disk_uuid) 1362 self.cfg.RemoveInstance(instance.uuid) 1363 raise errors.OpExecError("There are some degraded disks for" 1364 " this instance")
1365
1366 - def RunOsScripts(self, feedback_fn, iobj):
1367 """Run OS scripts 1368 1369 If necessary, disks are paused. It handles instance create, 1370 import, and remote import. 1371 1372 @type feedback_fn: callable 1373 @param feedback_fn: function used send feedback back to the caller 1374 1375 @type iobj: L{objects.Instance} 1376 @param iobj: instance object 1377 1378 """ 1379 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks: 1380 disks = self.cfg.GetInstanceDisks(iobj.uuid) 1381 if self.op.mode == constants.INSTANCE_CREATE: 1382 os_image = objects.GetOSImage(self.op.osparams) 1383 1384 if os_image is None and not self.op.no_install: 1385 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and 1386 not self.op.wait_for_sync) 1387 if pause_sync: 1388 feedback_fn("* pausing disk sync to install instance OS") 1389 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid, 1390 (disks, iobj), 1391 True) 1392 for idx, success in enumerate(result.payload): 1393 if not success: 1394 logging.warn("pause-sync of instance %s for disk %d failed", 1395 self.op.instance_name, idx) 1396 1397 feedback_fn("* running the instance OS create scripts...") 1398 # FIXME: pass debug option from opcode to backend 1399 os_add_result = \ 1400 self.rpc.call_instance_os_add(self.pnode.uuid, 1401 (iobj, self.op.osparams_secret), 1402 False, 1403 self.op.debug_level) 1404 if pause_sync: 1405 feedback_fn("* resuming disk sync") 1406 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid, 1407 (disks, iobj), 1408 False) 1409 for idx, success in enumerate(result.payload): 1410 if not success: 1411 logging.warn("resume-sync of instance %s for disk %d failed", 1412 self.op.instance_name, idx) 1413 1414 os_add_result.Raise("Could not add os for instance %s" 1415 " on node %s" % (self.op.instance_name, 1416 self.pnode.name)) 1417 1418 else: 1419 if self.op.mode == constants.INSTANCE_IMPORT: 1420 feedback_fn("* running the instance OS import scripts...") 1421 1422 transfers = [] 1423 1424 for idx, image in enumerate(self.src_images): 1425 if not image: 1426 continue 1427 1428 if iobj.os: 1429 dst_io = constants.IEIO_SCRIPT 1430 dst_ioargs = ((disks[idx], iobj), idx) 1431 else: 1432 dst_io = constants.IEIO_RAW_DISK 1433 dst_ioargs = (disks[idx], iobj) 1434 1435 # FIXME: pass debug option from opcode to backend 1436 dt = masterd.instance.DiskTransfer("disk/%s" % idx, 1437 constants.IEIO_FILE, (image, ), 1438 dst_io, dst_ioargs, 1439 None) 1440 transfers.append(dt) 1441 1442 import_result = \ 1443 masterd.instance.TransferInstanceData(self, feedback_fn, 1444 self.op.src_node_uuid, 1445 self.pnode.uuid, 1446 self.pnode.secondary_ip, 1447 self.op.compress, 1448 iobj, transfers) 1449 if not compat.all(import_result): 1450 self.LogWarning("Some disks for instance %s on node %s were not" 1451 " imported successfully" % (self.op.instance_name, 1452 self.pnode.name)) 1453 1454 rename_from = self._old_instance_name 1455 1456 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT: 1457 feedback_fn("* preparing remote import...") 1458 # The source cluster will stop the instance before attempting to make 1459 # a connection. In some cases stopping an instance can take a long 1460 # time, hence the shutdown timeout is added to the connection 1461 # timeout. 1462 connect_timeout = (constants.RIE_CONNECT_TIMEOUT + 1463 self.op.source_shutdown_timeout) 1464 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout) 1465 1466 assert iobj.primary_node == self.pnode.uuid 1467 disk_results = \ 1468 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode, 1469 self.source_x509_ca, 1470 self._cds, self.op.compress, timeouts) 1471 if not compat.all(disk_results): 1472 # TODO: Should the instance still be started, even if some disks 1473 # failed to import (valid for local imports, too)? 1474 self.LogWarning("Some disks for instance %s on node %s were not" 1475 " imported successfully" % (self.op.instance_name, 1476 self.pnode.name)) 1477 1478 rename_from = self.source_instance_name 1479 1480 else: 1481 # also checked in the prereq part 1482 raise errors.ProgrammerError("Unknown OS initialization mode '%s'" 1483 % self.op.mode) 1484 1485 assert iobj.name == self.op.instance_name 1486 1487 # Run rename script on newly imported instance 1488 if iobj.os: 1489 feedback_fn("Running rename script for %s" % self.op.instance_name) 1490 result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj, 1491 rename_from, 1492 self.op.debug_level) 1493 result.Warn("Failed to run rename script for %s on node %s" % 1494 (self.op.instance_name, self.pnode.name), self.LogWarning)
1495
1496 - def GetOsInstallPackageEnvironment(self, instance, script):
1497 """Returns the OS scripts environment for the helper VM 1498 1499 @type instance: L{objects.Instance} 1500 @param instance: instance for which the OS scripts are run 1501 1502 @type script: string 1503 @param script: script to run (e.g., 1504 constants.OS_SCRIPT_CREATE_UNTRUSTED) 1505 1506 @rtype: dict of string to string 1507 @return: OS scripts environment for the helper VM 1508 1509 """ 1510 env = {"OS_SCRIPT": script} 1511 1512 # We pass only the instance's disks, not the helper VM's disks. 1513 if instance.hypervisor == constants.HT_KVM: 1514 prefix = "/dev/vd" 1515 elif instance.hypervisor in [constants.HT_XEN_PVM, constants.HT_XEN_HVM]: 1516 prefix = "/dev/xvd" 1517 else: 1518 raise errors.OpExecError("Cannot run OS scripts in a virtualized" 1519 " environment for hypervisor '%s'" 1520 % instance.hypervisor) 1521 1522 num_disks = len(self.cfg.GetInstanceDisks(instance.uuid)) 1523 1524 for idx, disk_label in enumerate(utils.GetDiskLabels(prefix, num_disks + 1, 1525 start=1)): 1526 env["DISK_%d_PATH" % idx] = disk_label 1527 1528 return env
1529
1530 - def UpdateInstanceOsInstallPackage(self, feedback_fn, instance, override_env):
1531 """Updates the OS parameter 'os-install-package' for an instance. 1532 1533 The OS install package is an archive containing an OS definition 1534 and a file containing the environment variables needed to run the 1535 OS scripts. 1536 1537 The OS install package is served by the metadata daemon to the 1538 instances, so the OS scripts can run inside the virtualized 1539 environment. 1540 1541 @type feedback_fn: callable 1542 @param feedback_fn: function used send feedback back to the caller 1543 1544 @type instance: L{objects.Instance} 1545 @param instance: instance for which the OS parameter 1546 'os-install-package' is updated 1547 1548 @type override_env: dict of string to string 1549 @param override_env: if supplied, it overrides the environment of 1550 the export OS scripts archive 1551 1552 """ 1553 if "os-install-package" in instance.osparams: 1554 feedback_fn("Using OS install package '%s'" % 1555 instance.osparams["os-install-package"]) 1556 else: 1557 result = self.rpc.call_os_export(instance.primary_node, instance, 1558 override_env) 1559 result.Raise("Could not export OS '%s'" % instance.os) 1560 instance.osparams["os-install-package"] = result.payload 1561 1562 feedback_fn("Created OS install package '%s'" % result.payload)
1563
1564 - def RunOsScriptsVirtualized(self, feedback_fn, instance):
1565 """Runs the OS scripts inside a safe virtualized environment. 1566 1567 The virtualized environment reuses the instance and temporarily 1568 creates a disk onto which the image of the helper VM is dumped. 1569 The temporary disk is used to boot the helper VM. The OS scripts 1570 are passed to the helper VM through the metadata daemon and the OS 1571 install package. 1572 1573 @type feedback_fn: callable 1574 @param feedback_fn: function used send feedback back to the caller 1575 1576 @type instance: L{objects.Instance} 1577 @param instance: instance for which the OS scripts must be run 1578 inside the virtualized environment 1579 1580 """ 1581 install_image = self.cfg.GetInstallImage() 1582 1583 if not install_image: 1584 raise errors.OpExecError("Cannot create install instance because an" 1585 " install image has not been specified") 1586 1587 disk_size = DetermineImageSize(self, install_image, instance.primary_node) 1588 1589 env = self.GetOsInstallPackageEnvironment( 1590 instance, 1591 constants.OS_SCRIPT_CREATE_UNTRUSTED) 1592 self.UpdateInstanceOsInstallPackage(feedback_fn, instance, env) 1593 UpdateMetadata(feedback_fn, self.rpc, instance, 1594 osparams_private=self.op.osparams_private, 1595 osparams_secret=self.op.osparams_secret) 1596 1597 with TemporaryDisk(self, 1598 instance, 1599 [(constants.DT_PLAIN, constants.DISK_RDWR, disk_size)], 1600 feedback_fn): 1601 feedback_fn("Activating instance disks") 1602 StartInstanceDisks(self, instance, False) 1603 1604 feedback_fn("Imaging disk with install image") 1605 ImageDisks(self, instance, install_image) 1606 1607 feedback_fn("Starting instance with install image") 1608 result = self.rpc.call_instance_start(instance.primary_node, 1609 (instance, [], []), 1610 False, self.op.reason) 1611 result.Raise("Could not start instance '%s' with the install image '%s'" 1612 % (instance.name, install_image)) 1613 1614 # First wait for the instance to start up 1615 running_check = lambda: IsInstanceRunning(self, instance, prereq=False) 1616 instance_up = retry.SimpleRetry(True, running_check, 5.0, 1617 self.op.helper_startup_timeout) 1618 if not instance_up: 1619 raise errors.OpExecError("Could not boot instance using install image" 1620 " '%s'" % install_image) 1621 1622 feedback_fn("Instance is up, now awaiting shutdown") 1623 1624 # Then for it to be finished, detected by its shutdown 1625 instance_up = retry.SimpleRetry(False, running_check, 20.0, 1626 self.op.helper_shutdown_timeout) 1627 if instance_up: 1628 self.LogWarning("Installation not completed prior to timeout, shutting" 1629 " down instance forcibly") 1630 1631 feedback_fn("Installation complete")
1632
1633 - def Exec(self, feedback_fn):
1634 """Create and add the instance to the cluster. 1635 1636 """ 1637 assert not (self.owned_locks(locking.LEVEL_NODE_RES) - 1638 self.owned_locks(locking.LEVEL_NODE)), \ 1639 "Node locks differ from node resource locks" 1640 1641 ht_kind = self.op.hypervisor 1642 if ht_kind in constants.HTS_REQ_PORT: 1643 network_port = self.cfg.AllocatePort() 1644 else: 1645 network_port = None 1646 1647 instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId()) 1648 1649 # This is ugly but we got a chicken-egg problem here 1650 # We can only take the group disk parameters, as the instance 1651 # has no disks yet (we are generating them right here). 1652 nodegroup = self.cfg.GetNodeGroup(self.pnode.group) 1653 disks = GenerateDiskTemplate(self, 1654 self.op.disk_template, 1655 instance_uuid, self.pnode.uuid, 1656 self.secondaries, 1657 self.disks, 1658 self.instance_file_storage_dir, 1659 self.op.file_driver, 1660 0, 1661 feedback_fn, 1662 self.cfg.GetGroupDiskParams(nodegroup)) 1663 1664 if self.op.os_type is None: 1665 os_type = "" 1666 else: 1667 os_type = self.op.os_type 1668 1669 iobj = objects.Instance(name=self.op.instance_name, 1670 uuid=instance_uuid, 1671 os=os_type, 1672 primary_node=self.pnode.uuid, 1673 nics=self.nics, disks=[], 1674 disk_template=self.op.disk_template, 1675 disks_active=False, 1676 admin_state=constants.ADMINST_DOWN, 1677 admin_state_source=constants.ADMIN_SOURCE, 1678 network_port=network_port, 1679 beparams=self.op.beparams, 1680 hvparams=self.op.hvparams, 1681 hypervisor=self.op.hypervisor, 1682 osparams=self.op.osparams, 1683 osparams_private=self.op.osparams_private, 1684 ) 1685 1686 if self.op.tags: 1687 for tag in self.op.tags: 1688 iobj.AddTag(tag) 1689 1690 if self.adopt_disks: 1691 if self.op.disk_template == constants.DT_PLAIN: 1692 # rename LVs to the newly-generated names; we need to construct 1693 # 'fake' LV disks with the old data, plus the new unique_id 1694 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks] 1695 rename_to = [] 1696 for t_dsk, a_dsk in zip(tmp_disks, self.disks): 1697 rename_to.append(t_dsk.logical_id) 1698 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT]) 1699 result = self.rpc.call_blockdev_rename(self.pnode.uuid, 1700 zip(tmp_disks, rename_to)) 1701 result.Raise("Failed to rename adoped LVs") 1702 else: 1703 feedback_fn("* creating instance disks...") 1704 try: 1705 CreateDisks(self, iobj, disks=disks) 1706 except errors.OpExecError: 1707 self.LogWarning("Device creation failed") 1708 self.cfg.ReleaseDRBDMinors(instance_uuid) 1709 raise 1710 1711 feedback_fn("adding instance %s to cluster config" % self.op.instance_name) 1712 self.cfg.AddInstance(iobj, self.proc.GetECId()) 1713 1714 feedback_fn("adding disks to cluster config") 1715 for disk in disks: 1716 self.cfg.AddInstanceDisk(iobj.uuid, disk) 1717 1718 # re-read the instance from the configuration 1719 iobj = self.cfg.GetInstanceInfo(iobj.uuid) 1720 1721 if self.op.mode == constants.INSTANCE_IMPORT: 1722 # Release unused nodes 1723 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid]) 1724 else: 1725 # Release all nodes 1726 ReleaseLocks(self, locking.LEVEL_NODE) 1727 1728 # Wipe disks 1729 disk_abort = False 1730 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks: 1731 feedback_fn("* wiping instance disks...") 1732 try: 1733 WipeDisks(self, iobj) 1734 except errors.OpExecError, err: 1735 logging.exception("Wiping disks failed") 1736 self.LogWarning("Wiping instance disks failed (%s)", err) 1737 disk_abort = True 1738 1739 self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj) 1740 1741 # Image disks 1742 os_image = objects.GetOSImage(iobj.osparams) 1743 disk_abort = False 1744 1745 if not self.adopt_disks and os_image is not None: 1746 feedback_fn("* imaging instance disks...") 1747 try: 1748 ImageDisks(self, iobj, os_image) 1749 except errors.OpExecError, err: 1750 logging.exception("Imaging disks failed") 1751 self.LogWarning("Imaging instance disks failed (%s)", err) 1752 disk_abort = True 1753 1754 self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj) 1755 1756 # instance disks are now active 1757 iobj.disks_active = True 1758 1759 # Release all node resource locks 1760 ReleaseLocks(self, locking.LEVEL_NODE_RES) 1761 1762 if iobj.os: 1763 result = self.rpc.call_os_diagnose([iobj.primary_node])[iobj.primary_node] 1764 result.Raise("Failed to get OS '%s'" % iobj.os) 1765 1766 trusted = None 1767 1768 for (name, _, _, _, _, _, _, os_trusted) in result.payload: 1769 if name == objects.OS.GetName(iobj.os): 1770 trusted = os_trusted 1771 break 1772 1773 if trusted is None: 1774 raise errors.OpPrereqError("OS '%s' is not available in node '%s'" % 1775 (iobj.os, iobj.primary_node)) 1776 elif trusted: 1777 self.RunOsScripts(feedback_fn, iobj) 1778 else: 1779 self.RunOsScriptsVirtualized(feedback_fn, iobj) 1780 # Instance is modified by 'RunOsScriptsVirtualized', 1781 # therefore, it must be retrieved once again from the 1782 # configuration, otherwise there will be a config object 1783 # version mismatch. 1784 iobj = self.cfg.GetInstanceInfo(iobj.uuid) 1785 1786 # Update instance metadata so that it can be reached from the 1787 # metadata service. 1788 UpdateMetadata(feedback_fn, self.rpc, iobj, 1789 osparams_private=self.op.osparams_private, 1790 osparams_secret=self.op.osparams_secret) 1791 1792 assert not self.owned_locks(locking.LEVEL_NODE_RES) 1793 1794 if self.op.start: 1795 iobj.admin_state = constants.ADMINST_UP 1796 self.cfg.Update(iobj, feedback_fn) 1797 logging.info("Starting instance %s on node %s", self.op.instance_name, 1798 self.pnode.name) 1799 feedback_fn("* starting instance...") 1800 result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None), 1801 False, self.op.reason) 1802 result.Raise("Could not start instance") 1803 1804 return self.cfg.GetNodeNames(list(self.cfg.GetInstanceNodes(iobj.uuid)))
1805
1806 - def PrepareRetry(self, feedback_fn):
1807 # A temporary lack of resources can only happen if opportunistic locking 1808 # is used. 1809 assert self.op.opportunistic_locking 1810 1811 logging.info("Opportunistic locking did not suceed, falling back to" 1812 " full lock allocation") 1813 feedback_fn("* falling back to full lock allocation") 1814 self.op.opportunistic_locking = False
1815
1816 1817 -class LUInstanceRename(LogicalUnit):
1818 """Rename an instance. 1819 1820 """ 1821 HPATH = "instance-rename" 1822 HTYPE = constants.HTYPE_INSTANCE 1823 REQ_BGL = False 1824
1825 - def CheckArguments(self):
1826 """Check arguments. 1827 1828 """ 1829 if self.op.ip_check and not self.op.name_check: 1830 # TODO: make the ip check more flexible and not depend on the name check 1831 raise errors.OpPrereqError("IP address check requires a name check", 1832 errors.ECODE_INVAL) 1833 1834 self._new_name_resolved = False
1835
1836 - def BuildHooksEnv(self):
1837 """Build hooks env. 1838 1839 This runs on master, primary and secondary nodes of the instance. 1840 1841 """ 1842 env = BuildInstanceHookEnvByObject(self, self.instance) 1843 env["INSTANCE_NEW_NAME"] = self.op.new_name 1844 return env
1845
1846 - def BuildHooksNodes(self):
1847 """Build hooks nodes. 1848 1849 """ 1850 nl = [self.cfg.GetMasterNode()] + \ 1851 list(self.cfg.GetInstanceNodes(self.instance.uuid)) 1852 return (nl, nl)
1853
1855 """Checks and resolves the new name, storing the FQDN, if permitted. 1856 1857 """ 1858 if self._new_name_resolved or not self.op.name_check: 1859 return 1860 1861 hostname = _CheckHostnameSane(self, self.op.new_name) 1862 self.op.new_name = hostname.name 1863 if (self.op.ip_check and 1864 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)): 1865 raise errors.OpPrereqError("IP %s of instance %s already in use" % 1866 (hostname.ip, self.op.new_name), 1867 errors.ECODE_NOTUNIQUE) 1868 self._new_name_resolved = True
1869
1870 - def CheckPrereq(self):
1871 """Check prerequisites. 1872 1873 This checks that the instance is in the cluster and is not running. 1874 1875 """ 1876 (self.op.instance_uuid, self.op.instance_name) = \ 1877 ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid, 1878 self.op.instance_name) 1879 instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 1880 assert instance is not None 1881 1882 # It should actually not happen that an instance is running with a disabled 1883 # disk template, but in case it does, the renaming of file-based instances 1884 # will fail horribly. Thus, we test it before. 1885 if (instance.disk_template in constants.DTS_FILEBASED and 1886 self.op.new_name != instance.name): 1887 CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(), 1888 instance.disk_template) 1889 1890 CheckNodeOnline(self, instance.primary_node) 1891 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING, 1892 msg="cannot rename") 1893 self.instance = instance 1894 1895 self._PerformChecksAndResolveNewName() 1896 1897 if self.op.new_name != instance.name: 1898 CheckInstanceExistence(self, self.op.new_name)
1899
1900 - def ExpandNames(self):
1901 self._ExpandAndLockInstance() 1902 1903 # Note that this call might not resolve anything if name checks have been 1904 # disabled in the opcode. In this case, we might have a renaming collision 1905 # if a shortened name and a full name are used simultaneously, as we will 1906 # have two different locks. However, at that point the user has taken away 1907 # the tools necessary to detect this issue. 1908 self._PerformChecksAndResolveNewName() 1909 1910 # Used to prevent instance namespace collisions. 1911 if self.op.new_name != self.op.instance_name: 1912 CheckInstanceExistence(self, self.op.new_name) 1913 self.add_locks[locking.LEVEL_INSTANCE] = self.op.new_name
1914
1915 - def Exec(self, feedback_fn):
1916 """Rename the instance. 1917 1918 """ 1919 old_name = self.instance.name 1920 1921 rename_file_storage = False 1922 if (self.instance.disk_template in (constants.DT_FILE, 1923 constants.DT_SHARED_FILE) and 1924 self.op.new_name != self.instance.name): 1925 disks = self.cfg.GetInstanceDisks(self.instance.uuid) 1926 old_file_storage_dir = os.path.dirname(disks[0].logical_id[1]) 1927 rename_file_storage = True 1928 1929 self.cfg.RenameInstance(self.instance.uuid, self.op.new_name) 1930 1931 # Assert that we have both the locks needed 1932 assert old_name in self.owned_locks(locking.LEVEL_INSTANCE) 1933 assert self.op.new_name in self.owned_locks(locking.LEVEL_INSTANCE) 1934 1935 # re-read the instance from the configuration after rename 1936 renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid) 1937 disks = self.cfg.GetInstanceDisks(renamed_inst.uuid) 1938 1939 if rename_file_storage: 1940 new_file_storage_dir = os.path.dirname(disks[0].logical_id[1]) 1941 result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node, 1942 old_file_storage_dir, 1943 new_file_storage_dir) 1944 result.Raise("Could not rename on node %s directory '%s' to '%s'" 1945 " (but the instance has been renamed in Ganeti)" % 1946 (self.cfg.GetNodeName(renamed_inst.primary_node), 1947 old_file_storage_dir, new_file_storage_dir)) 1948 1949 StartInstanceDisks(self, renamed_inst, None) 1950 renamed_inst = self.cfg.GetInstanceInfo(renamed_inst.uuid) 1951 1952 # update info on disks 1953 info = GetInstanceInfoText(renamed_inst) 1954 for (idx, disk) in enumerate(disks): 1955 for node_uuid in self.cfg.GetInstanceNodes(renamed_inst.uuid): 1956 result = self.rpc.call_blockdev_setinfo(node_uuid, 1957 (disk, renamed_inst), info) 1958 result.Warn("Error setting info on node %s for disk %s" % 1959 (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning) 1960 try: 1961 result = self.rpc.call_instance_run_rename(renamed_inst.primary_node, 1962 renamed_inst, old_name, 1963 self.op.debug_level) 1964 result.Warn("Could not run OS rename script for instance %s on node %s" 1965 " (but the instance has been renamed in Ganeti)" % 1966 (renamed_inst.name, 1967 self.cfg.GetNodeName(renamed_inst.primary_node)), 1968 self.LogWarning) 1969 finally: 1970 ShutdownInstanceDisks(self, renamed_inst) 1971 1972 return renamed_inst.name
1973
1974 1975 -class LUInstanceRemove(LogicalUnit):
1976 """Remove an instance. 1977 1978 """ 1979 HPATH = "instance-remove" 1980 HTYPE = constants.HTYPE_INSTANCE 1981 REQ_BGL = False 1982
1983 - def ExpandNames(self):
1984 self._ExpandAndLockInstance() 1985 self.needed_locks[locking.LEVEL_NODE] = [] 1986 self.needed_locks[locking.LEVEL_NODE_RES] = [] 1987 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE 1988 self.dont_collate_locks[locking.LEVEL_NODE] = True 1989 self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
1990
1991 - def DeclareLocks(self, level):
1992 if level == locking.LEVEL_NODE: 1993 self._LockInstancesNodes() 1994 elif level == locking.LEVEL_NODE_RES: 1995 # Copy node locks 1996 self.needed_locks[locking.LEVEL_NODE_RES] = \ 1997 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1998
1999 - def BuildHooksEnv(self):
2000 """Build hooks env. 2001 2002 This runs on master, primary and secondary nodes of the instance. 2003 2004 """ 2005 env = BuildInstanceHookEnvByObject(self, self.instance, 2006 secondary_nodes=self.secondary_nodes, 2007 disks=self.inst_disks) 2008 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout 2009 return env
2010
2011 - def BuildHooksNodes(self):
2012 """Build hooks nodes. 2013 2014 """ 2015 nl = [self.cfg.GetMasterNode()] 2016 nl_post = list(self.cfg.GetInstanceNodes(self.instance.uuid)) + nl 2017 return (nl, nl_post)
2018
2019 - def CheckPrereq(self):
2020 """Check prerequisites. 2021 2022 This checks that the instance is in the cluster. 2023 2024 """ 2025 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 2026 assert self.instance is not None, \ 2027 "Cannot retrieve locked instance %s" % self.op.instance_name 2028 self.secondary_nodes = \ 2029 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid) 2030 self.inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
2031
2032 - def Exec(self, feedback_fn):
2033 """Remove the instance. 2034 2035 """ 2036 logging.info("Shutting down instance %s on node %s", self.instance.name, 2037 self.cfg.GetNodeName(self.instance.primary_node)) 2038 2039 result = self.rpc.call_instance_shutdown(self.instance.primary_node, 2040 self.instance, 2041 self.op.shutdown_timeout, 2042 self.op.reason) 2043 if self.op.ignore_failures: 2044 result.Warn("Warning: can't shutdown instance", feedback_fn) 2045 else: 2046 result.Raise("Could not shutdown instance %s on node %s" % 2047 (self.instance.name, 2048 self.cfg.GetNodeName(self.instance.primary_node))) 2049 2050 assert (self.owned_locks(locking.LEVEL_NODE) == 2051 self.owned_locks(locking.LEVEL_NODE_RES)) 2052 assert not (set(self.cfg.GetInstanceNodes(self.instance.uuid)) - 2053 self.owned_locks(locking.LEVEL_NODE)), \ 2054 "Not owning correct locks" 2055 2056 RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
2057
2058 2059 -class LUInstanceMove(LogicalUnit):
2060 """Move an instance by data-copying. 2061 2062 This LU is only used if the instance needs to be moved by copying the data 2063 from one node in the cluster to another. The instance is shut down and 2064 the data is copied to the new node and the configuration change is propagated, 2065 then the instance is started again. 2066 2067 See also: 2068 L{LUInstanceFailover} for moving an instance on shared storage (no copying 2069 required). 2070 2071 L{LUInstanceMigrate} for the live migration of an instance (no shutdown 2072 required). 2073 """ 2074 HPATH = "instance-move" 2075 HTYPE = constants.HTYPE_INSTANCE 2076 REQ_BGL = False 2077
2078 - def ExpandNames(self):
2079 self._ExpandAndLockInstance() 2080 (self.op.target_node_uuid, self.op.target_node) = \ 2081 ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid, 2082 self.op.target_node) 2083 self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid] 2084 self.needed_locks[locking.LEVEL_NODE_RES] = [] 2085 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
2086
2087 - def DeclareLocks(self, level):
2088 if level == locking.LEVEL_NODE: 2089 self._LockInstancesNodes(primary_only=True) 2090 elif level == locking.LEVEL_NODE_RES: 2091 # Copy node locks 2092 self.needed_locks[locking.LEVEL_NODE_RES] = \ 2093 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2094
2095 - def BuildHooksEnv(self):
2096 """Build hooks env. 2097 2098 This runs on master, primary and target nodes of the instance. 2099 2100 """ 2101 env = { 2102 "TARGET_NODE": self.op.target_node, 2103 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout, 2104 } 2105 env.update(BuildInstanceHookEnvByObject(self, self.instance)) 2106 return env
2107
2108 - def BuildHooksNodes(self):
2109 """Build hooks nodes. 2110 2111 """ 2112 nl = [ 2113 self.cfg.GetMasterNode(), 2114 self.instance.primary_node, 2115 self.op.target_node_uuid, 2116 ] 2117 return (nl, nl)
2118
2119 - def CheckPrereq(self):
2120 """Check prerequisites. 2121 2122 This checks that the instance is in the cluster. 2123 2124 """ 2125 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 2126 assert self.instance is not None, \ 2127 "Cannot retrieve locked instance %s" % self.op.instance_name 2128 2129 disks = self.cfg.GetInstanceDisks(self.instance.uuid) 2130 for idx, dsk in enumerate(disks): 2131 if dsk.dev_type not in constants.DTS_COPYABLE: 2132 raise errors.OpPrereqError("Instance disk %d has disk type %s and is" 2133 " not suitable for copying" 2134 % (idx, dsk.dev_type), errors.ECODE_STATE) 2135 2136 target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid) 2137 assert target_node is not None, \ 2138 "Cannot retrieve locked node %s" % self.op.target_node 2139 2140 self.target_node_uuid = target_node.uuid 2141 if target_node.uuid == self.instance.primary_node: 2142 raise errors.OpPrereqError("Instance %s is already on the node %s" % 2143 (self.instance.name, target_node.name), 2144 errors.ECODE_STATE) 2145 2146 cluster = self.cfg.GetClusterInfo() 2147 bep = cluster.FillBE(self.instance) 2148 2149 CheckNodeOnline(self, target_node.uuid) 2150 CheckNodeNotDrained(self, target_node.uuid) 2151 CheckNodeVmCapable(self, target_node.uuid) 2152 group_info = self.cfg.GetNodeGroup(target_node.group) 2153 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) 2154 CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg, 2155 ignore=self.op.ignore_ipolicy) 2156 2157 if self.instance.admin_state == constants.ADMINST_UP: 2158 # check memory requirements on the target node 2159 CheckNodeFreeMemory( 2160 self, target_node.uuid, "failing over instance %s" % 2161 self.instance.name, bep[constants.BE_MAXMEM], 2162 self.instance.hypervisor, 2163 cluster.hvparams[self.instance.hypervisor]) 2164 else: 2165 self.LogInfo("Not checking memory on the secondary node as" 2166 " instance will not be started") 2167 2168 # check bridge existance 2169 CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
2170
2171 - def Exec(self, feedback_fn):
2172 """Move an instance. 2173 2174 The move is done by shutting it down on its present node, copying 2175 the data over (slow) and starting it on the new node. 2176 2177 """ 2178 source_node = self.cfg.GetNodeInfo(self.instance.primary_node) 2179 target_node = self.cfg.GetNodeInfo(self.target_node_uuid) 2180 2181 self.LogInfo("Shutting down instance %s on source node %s", 2182 self.instance.name, source_node.name) 2183 2184 assert (self.owned_locks(locking.LEVEL_NODE) == 2185 self.owned_locks(locking.LEVEL_NODE_RES)) 2186 2187 result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance, 2188 self.op.shutdown_timeout, 2189 self.op.reason) 2190 if self.op.ignore_consistency: 2191 result.Warn("Could not shutdown instance %s on node %s. Proceeding" 2192 " anyway. Please make sure node %s is down. Error details" % 2193 (self.instance.name, source_node.name, source_node.name), 2194 self.LogWarning) 2195 else: 2196 result.Raise("Could not shutdown instance %s on node %s" % 2197 (self.instance.name, source_node.name)) 2198 2199 # create the target disks 2200 try: 2201 CreateDisks(self, self.instance, target_node_uuid=target_node.uuid) 2202 except errors.OpExecError: 2203 self.LogWarning("Device creation failed") 2204 self.cfg.ReleaseDRBDMinors(self.instance.uuid) 2205 raise 2206 2207 errs = [] 2208 transfers = [] 2209 # activate, get path, create transfer jobs 2210 disks = self.cfg.GetInstanceDisks(self.instance.uuid) 2211 for idx, disk in enumerate(disks): 2212 # FIXME: pass debug option from opcode to backend 2213 dt = masterd.instance.DiskTransfer("disk/%s" % idx, 2214 constants.IEIO_RAW_DISK, 2215 (disk, self.instance), 2216 constants.IEIO_RAW_DISK, 2217 (disk, self.instance), 2218 None) 2219 transfers.append(dt) 2220 self.cfg.Update(disk, feedback_fn) 2221 2222 import_result = \ 2223 masterd.instance.TransferInstanceData(self, feedback_fn, 2224 source_node.uuid, 2225 target_node.uuid, 2226 target_node.secondary_ip, 2227 self.op.compress, 2228 self.instance, transfers) 2229 if not compat.all(import_result): 2230 errs.append("Failed to transfer instance data") 2231 2232 if errs: 2233 self.LogWarning("Some disks failed to copy, aborting") 2234 try: 2235 RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid) 2236 finally: 2237 self.cfg.ReleaseDRBDMinors(self.instance.uuid) 2238 raise errors.OpExecError("Errors during disk copy: %s" % 2239 (",".join(errs),)) 2240 2241 self.instance.primary_node = target_node.uuid 2242 self.cfg.Update(self.instance, feedback_fn) 2243 2244 self.LogInfo("Removing the disks on the original node") 2245 RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid) 2246 2247 # Only start the instance if it's marked as up 2248 if self.instance.admin_state == constants.ADMINST_UP: 2249 self.LogInfo("Starting instance %s on node %s", 2250 self.instance.name, target_node.name) 2251 2252 disks_ok, _ = AssembleInstanceDisks(self, self.instance, 2253 ignore_secondaries=True) 2254 if not disks_ok: 2255 ShutdownInstanceDisks(self, self.instance) 2256 raise errors.OpExecError("Can't activate the instance's disks") 2257 2258 result = self.rpc.call_instance_start(target_node.uuid, 2259 (self.instance, None, None), False, 2260 self.op.reason) 2261 msg = result.fail_msg 2262 if msg: 2263 ShutdownInstanceDisks(self, self.instance) 2264 raise errors.OpExecError("Could not start instance %s on node %s: %s" % 2265 (self.instance.name, target_node.name, msg))
2266
2267 2268 -class LUInstanceMultiAlloc(NoHooksLU):
2269 """Allocates multiple instances at the same time. 2270 2271 """ 2272 REQ_BGL = False 2273
2274 - def CheckArguments(self):
2275 """Check arguments. 2276 2277 """ 2278 nodes = [] 2279 for inst in self.op.instances: 2280 if inst.iallocator is not None: 2281 raise errors.OpPrereqError("iallocator are not allowed to be set on" 2282 " instance objects", errors.ECODE_INVAL) 2283 nodes.append(bool(inst.pnode)) 2284 if inst.disk_template in constants.DTS_INT_MIRROR: 2285 nodes.append(bool(inst.snode)) 2286 2287 has_nodes = compat.any(nodes) 2288 if compat.all(nodes) ^ has_nodes: 2289 raise errors.OpPrereqError("There are instance objects providing" 2290 " pnode/snode while others do not", 2291 errors.ECODE_INVAL) 2292 2293 if not has_nodes and self.op.iallocator is None: 2294 default_iallocator = self.cfg.GetDefaultIAllocator() 2295 if default_iallocator: 2296 self.op.iallocator = default_iallocator 2297 else: 2298 raise errors.OpPrereqError("No iallocator or nodes on the instances" 2299 " given and no cluster-wide default" 2300 " iallocator found; please specify either" 2301 " an iallocator or nodes on the instances" 2302 " or set a cluster-wide default iallocator", 2303 errors.ECODE_INVAL) 2304 2305 _CheckOpportunisticLocking(self.op) 2306 2307 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances]) 2308 if dups: 2309 raise errors.OpPrereqError("There are duplicate instance names: %s" % 2310 utils.CommaJoin(dups), errors.ECODE_INVAL)
2311
2312 - def ExpandNames(self):
2313 """Calculate the locks. 2314 2315 """ 2316 self.share_locks = ShareAll() 2317 self.needed_locks = { 2318 # iallocator will select nodes and even if no iallocator is used, 2319 # collisions with LUInstanceCreate should be avoided 2320 locking.LEVEL_NODE_ALLOC: locking.ALL_SET, 2321 } 2322 2323 if self.op.iallocator: 2324 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET 2325 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET 2326 2327 if self.op.opportunistic_locking: 2328 self.opportunistic_locks[locking.LEVEL_NODE] = True 2329 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True 2330 else: 2331 nodeslist = [] 2332 for inst in self.op.instances: 2333 (inst.pnode_uuid, inst.pnode) = \ 2334 ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode) 2335 nodeslist.append(inst.pnode_uuid) 2336 if inst.snode is not None: 2337 (inst.snode_uuid, inst.snode) = \ 2338 ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode) 2339 nodeslist.append(inst.snode_uuid) 2340 2341 self.needed_locks[locking.LEVEL_NODE] = nodeslist 2342 # Lock resources of instance's primary and secondary nodes (copy to 2343 # prevent accidential modification) 2344 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2345
2346 - def CheckPrereq(self):
2347 """Check prerequisite. 2348 2349 """ 2350 if self.op.iallocator: 2351 cluster = self.cfg.GetClusterInfo() 2352 default_vg = self.cfg.GetVGName() 2353 ec_id = self.proc.GetECId() 2354 2355 if self.op.opportunistic_locking: 2356 # Only consider nodes for which a lock is held 2357 node_whitelist = self.cfg.GetNodeNames( 2358 set(self.owned_locks(locking.LEVEL_NODE)) & 2359 set(self.owned_locks(locking.LEVEL_NODE_RES))) 2360 else: 2361 node_whitelist = None 2362 2363 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op.disks, 2364 op.disk_template, 2365 default_vg), 2366 _ComputeNics(op, cluster, None, 2367 self.cfg, ec_id), 2368 _ComputeFullBeParams(op, cluster), 2369 node_whitelist) 2370 for op in self.op.instances] 2371 2372 req = iallocator.IAReqMultiInstanceAlloc(instances=insts) 2373 ial = iallocator.IAllocator(self.cfg, self.rpc, req) 2374 2375 ial.Run(self.op.iallocator) 2376 2377 if not ial.success: 2378 raise errors.OpPrereqError("Can't compute nodes using" 2379 " iallocator '%s': %s" % 2380 (self.op.iallocator, ial.info), 2381 errors.ECODE_NORES) 2382 2383 self.ia_result = ial.result 2384 2385 if self.op.dry_run: 2386 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), { 2387 constants.JOB_IDS_KEY: [], 2388 })
2389
2390 - def _ConstructPartialResult(self):
2391 """Contructs the partial result. 2392 2393 """ 2394 if self.op.iallocator: 2395 (allocatable, failed_insts) = self.ia_result 2396 allocatable_insts = map(compat.fst, allocatable) 2397 else: 2398 allocatable_insts = [op.instance_name for op in self.op.instances] 2399 failed_insts = [] 2400 2401 return { 2402 constants.ALLOCATABLE_KEY: allocatable_insts, 2403 constants.FAILED_KEY: failed_insts, 2404 }
2405
2406 - def Exec(self, feedback_fn):
2407 """Executes the opcode. 2408 2409 """ 2410 jobs = [] 2411 if self.op.iallocator: 2412 op2inst = dict((op.instance_name, op) for op in self.op.instances) 2413 (allocatable, failed) = self.ia_result 2414 2415 for (name, node_names) in allocatable: 2416 op = op2inst.pop(name) 2417 2418 (op.pnode_uuid, op.pnode) = \ 2419 ExpandNodeUuidAndName(self.cfg, None, node_names[0]) 2420 if len(node_names) > 1: 2421 (op.snode_uuid, op.snode) = \ 2422 ExpandNodeUuidAndName(self.cfg, None, node_names[1]) 2423 2424 jobs.append([op]) 2425 2426 missing = set(op2inst.keys()) - set(failed) 2427 assert not missing, \ 2428 "Iallocator did return incomplete result: %s" % \ 2429 utils.CommaJoin(missing) 2430 else: 2431 jobs.extend([op] for op in self.op.instances) 2432 2433 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2434
2435 2436 -class _InstNicModPrivate(object):
2437 """Data structure for network interface modifications. 2438 2439 Used by L{LUInstanceSetParams}. 2440 2441 """
2442 - def __init__(self):
2443 self.params = None 2444 self.filled = None
2445
2446 2447 -def _PrepareContainerMods(mods, private_fn):
2448 """Prepares a list of container modifications by adding a private data field. 2449 2450 @type mods: list of tuples; (operation, index, parameters) 2451 @param mods: List of modifications 2452 @type private_fn: callable or None 2453 @param private_fn: Callable for constructing a private data field for a 2454 modification 2455 @rtype: list 2456 2457 """ 2458 if private_fn is None: 2459 fn = lambda: None 2460 else: 2461 fn = private_fn 2462 2463 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2464
2465 2466 -def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2467 """Checks if nodes have enough physical CPUs 2468 2469 This function checks if all given nodes have the needed number of 2470 physical CPUs. In case any node has less CPUs or we cannot get the 2471 information from the node, this function raises an OpPrereqError 2472 exception. 2473 2474 @type lu: C{LogicalUnit} 2475 @param lu: a logical unit from which we get configuration data 2476 @type node_uuids: C{list} 2477 @param node_uuids: the list of node UUIDs to check 2478 @type requested: C{int} 2479 @param requested: the minimum acceptable number of physical CPUs 2480 @type hypervisor_specs: list of pairs (string, dict of strings) 2481 @param hypervisor_specs: list of hypervisor specifications in 2482 pairs (hypervisor_name, hvparams) 2483 @raise errors.OpPrereqError: if the node doesn't have enough CPUs, 2484 or we cannot check the node 2485 2486 """ 2487 nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs) 2488 for node_uuid in node_uuids: 2489 info = nodeinfo[node_uuid] 2490 node_name = lu.cfg.GetNodeName(node_uuid) 2491 info.Raise("Cannot get current information from node %s" % node_name, 2492 prereq=True, ecode=errors.ECODE_ENVIRON) 2493 (_, _, (hv_info, )) = info.payload 2494 num_cpus = hv_info.get("cpu_total", None) 2495 if not isinstance(num_cpus, int): 2496 raise errors.OpPrereqError("Can't compute the number of physical CPUs" 2497 " on node %s, result was '%s'" % 2498 (node_name, num_cpus), errors.ECODE_ENVIRON) 2499 if requested > num_cpus: 2500 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are " 2501 "required" % (node_name, num_cpus, requested), 2502 errors.ECODE_NORES)
2503
2504 2505 -def GetItemFromContainer(identifier, kind, container):
2506 """Return the item refered by the identifier. 2507 2508 @type identifier: string 2509 @param identifier: Item index or name or UUID 2510 @type kind: string 2511 @param kind: One-word item description 2512 @type container: list 2513 @param container: Container to get the item from 2514 2515 """ 2516 # Index 2517 try: 2518 idx = int(identifier) 2519 if idx == -1: 2520 # Append 2521 absidx = len(container) - 1 2522 elif idx < 0: 2523 raise IndexError("Not accepting negative indices other than -1") 2524 elif idx > len(container): 2525 raise IndexError("Got %s index %s, but there are only %s" % 2526 (kind, idx, len(container))) 2527 else: 2528 absidx = idx 2529 return (absidx, container[idx]) 2530 except ValueError: 2531 pass 2532 2533 for idx, item in enumerate(container): 2534 if item.uuid == identifier or item.name == identifier: 2535 return (idx, item) 2536 2537 raise errors.OpPrereqError("Cannot find %s with identifier %s" % 2538 (kind, identifier), errors.ECODE_NOENT)
2539
2540 2541 -def _ApplyContainerMods(kind, container, chgdesc, mods, 2542 create_fn, modify_fn, remove_fn, 2543 post_add_fn=None):
2544 """Applies descriptions in C{mods} to C{container}. 2545 2546 @type kind: string 2547 @param kind: One-word item description 2548 @type container: list 2549 @param container: Container to modify 2550 @type chgdesc: None or list 2551 @param chgdesc: List of applied changes 2552 @type mods: list 2553 @param mods: Modifications as returned by L{_PrepareContainerMods} 2554 @type create_fn: callable 2555 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD}); 2556 receives absolute item index, parameters and private data object as added 2557 by L{_PrepareContainerMods}, returns tuple containing new item and changes 2558 as list 2559 @type modify_fn: callable 2560 @param modify_fn: Callback for modifying an existing item 2561 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters 2562 and private data object as added by L{_PrepareContainerMods}, returns 2563 changes as list 2564 @type remove_fn: callable 2565 @param remove_fn: Callback on removing item; receives absolute item index, 2566 item and private data object as added by L{_PrepareContainerMods} 2567 @type post_add_fn: callable 2568 @param post_add_fn: Callable for post-processing a newly created item after 2569 it has been put into the container. It receives the index of the new item 2570 and the new item as parameters. 2571 2572 """ 2573 for (op, identifier, params, private) in mods: 2574 changes = None 2575 2576 if op == constants.DDM_ADD: 2577 # Calculate where item will be added 2578 # When adding an item, identifier can only be an index 2579 try: 2580 idx = int(identifier) 2581 except ValueError: 2582 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as" 2583 " identifier for %s" % constants.DDM_ADD, 2584 errors.ECODE_INVAL) 2585 if idx == -1: 2586 addidx = len(container) 2587 else: 2588 if idx < 0: 2589 raise IndexError("Not accepting negative indices other than -1") 2590 elif idx > len(container): 2591 raise IndexError("Got %s index %s, but there are only %s" % 2592 (kind, idx, len(container))) 2593 addidx = idx 2594 2595 if create_fn is None: 2596 item = params 2597 else: 2598 (item, changes) = create_fn(addidx, params, private) 2599 2600 if idx == -1: 2601 container.append(item) 2602 else: 2603 assert idx >= 0 2604 assert idx <= len(container) 2605 # list.insert does so before the specified index 2606 container.insert(idx, item) 2607 2608 if post_add_fn is not None: 2609 post_add_fn(addidx, item) 2610 2611 else: 2612 # Retrieve existing item 2613 (absidx, item) = GetItemFromContainer(identifier, kind, container) 2614 2615 if op == constants.DDM_REMOVE: 2616 assert not params 2617 2618 changes = [("%s/%s" % (kind, absidx), "remove")] 2619 2620 if remove_fn is not None: 2621 msg = remove_fn(absidx, item, private) 2622 if msg: 2623 changes.append(("%s/%s" % (kind, absidx), msg)) 2624 2625 assert container[absidx] == item 2626 del container[absidx] 2627 elif op == constants.DDM_MODIFY: 2628 if modify_fn is not None: 2629 changes = modify_fn(absidx, item, params, private) 2630 else: 2631 raise errors.ProgrammerError("Unhandled operation '%s'" % op) 2632 2633 assert _TApplyContModsCbChanges(changes) 2634 2635 if not (chgdesc is None or changes is None): 2636 chgdesc.extend(changes)
2637
2638 2639 -class LUInstanceSetParams(LogicalUnit):
2640 """Modifies an instances's parameters. 2641 2642 """ 2643 HPATH = "instance-modify" 2644 HTYPE = constants.HTYPE_INSTANCE 2645 REQ_BGL = False 2646 2647 @staticmethod
2648 - def _UpgradeDiskNicMods(kind, mods, verify_fn):
2649 assert ht.TList(mods) 2650 assert not mods or len(mods[0]) in (2, 3) 2651 2652 if mods and len(mods[0]) == 2: 2653 result = [] 2654 2655 addremove = 0 2656 for op, params in mods: 2657 if op in (constants.DDM_ADD, constants.DDM_REMOVE): 2658 result.append((op, -1, params)) 2659 addremove += 1 2660 2661 if addremove > 1: 2662 raise errors.OpPrereqError("Only one %s add or remove operation is" 2663 " supported at a time" % kind, 2664 errors.ECODE_INVAL) 2665 else: 2666 result.append((constants.DDM_MODIFY, op, params)) 2667 2668 assert verify_fn(result) 2669 else: 2670 result = mods 2671 2672 return result
2673 2674 @staticmethod
2675 - def _CheckMods(kind, mods, key_types, item_fn):
2676 """Ensures requested disk/NIC modifications are valid. 2677 2678 """ 2679 for (op, _, params) in mods: 2680 assert ht.TDict(params) 2681 2682 # If 'key_types' is an empty dict, we assume we have an 2683 # 'ext' template and thus do not ForceDictType 2684 if key_types: 2685 utils.ForceDictType(params, key_types) 2686 2687 if op == constants.DDM_REMOVE: 2688 if params: 2689 raise errors.OpPrereqError("No settings should be passed when" 2690 " removing a %s" % kind, 2691 errors.ECODE_INVAL) 2692 elif op in (constants.DDM_ADD, constants.DDM_MODIFY): 2693 item_fn(op, params) 2694 else: 2695 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2696
2697 - def _VerifyDiskModification(self, op, params, excl_stor, group_access_type):
2698 """Verifies a disk modification. 2699 2700 """ 2701 if op == constants.DDM_ADD: 2702 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR) 2703 if mode not in constants.DISK_ACCESS_SET: 2704 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode, 2705 errors.ECODE_INVAL) 2706 2707 size = params.get(constants.IDISK_SIZE, None) 2708 if size is None: 2709 raise errors.OpPrereqError("Required disk parameter '%s' missing" % 2710 constants.IDISK_SIZE, errors.ECODE_INVAL) 2711 size = int(size) 2712 2713 params[constants.IDISK_SIZE] = size 2714 name = params.get(constants.IDISK_NAME, None) 2715 if name is not None and name.lower() == constants.VALUE_NONE: 2716 params[constants.IDISK_NAME] = None 2717 2718 CheckSpindlesExclusiveStorage(params, excl_stor, True) 2719 2720 # Check disk access param (only for specific disks) 2721 if self.instance.disk_template in constants.DTS_HAVE_ACCESS: 2722 access_type = params.get(constants.IDISK_ACCESS, group_access_type) 2723 if not IsValidDiskAccessModeCombination(self.instance.hypervisor, 2724 self.instance.disk_template, 2725 access_type): 2726 raise errors.OpPrereqError("Selected hypervisor (%s) cannot be" 2727 " used with %s disk access param" % 2728 (self.instance.hypervisor, access_type), 2729 errors.ECODE_STATE) 2730 2731 elif op == constants.DDM_MODIFY: 2732 if constants.IDISK_SIZE in params: 2733 raise errors.OpPrereqError("Disk size change not possible, use" 2734 " grow-disk", errors.ECODE_INVAL) 2735 2736 # Disk modification supports changing only the disk name and mode. 2737 # Changing arbitrary parameters is allowed only for ext disk template", 2738 if self.instance.disk_template != constants.DT_EXT: 2739 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES) 2740 else: 2741 # We have to check that 'access' parameter can not be modified 2742 if constants.IDISK_ACCESS in params: 2743 raise errors.OpPrereqError("Disk 'access' parameter change is" 2744 " not possible", errors.ECODE_INVAL) 2745 2746 name = params.get(constants.IDISK_NAME, None) 2747 if name is not None and name.lower() == constants.VALUE_NONE: 2748 params[constants.IDISK_NAME] = None
2749 2750 @staticmethod
2751 - def _VerifyNicModification(op, params):
2752 """Verifies a network interface modification. 2753 2754 """ 2755 if op in (constants.DDM_ADD, constants.DDM_MODIFY): 2756 ip = params.get(constants.INIC_IP, None) 2757 name = params.get(constants.INIC_NAME, None) 2758 req_net = params.get(constants.INIC_NETWORK, None) 2759 link = params.get(constants.NIC_LINK, None) 2760 mode = params.get(constants.NIC_MODE, None) 2761 if name is not None and name.lower() == constants.VALUE_NONE: 2762 params[constants.INIC_NAME] = None 2763 if req_net is not None: 2764 if req_net.lower() == constants.VALUE_NONE: 2765 params[constants.INIC_NETWORK] = None 2766 req_net = None 2767 elif link is not None or mode is not None: 2768 raise errors.OpPrereqError("If network is given" 2769 " mode or link should not", 2770 errors.ECODE_INVAL) 2771 2772 if op == constants.DDM_ADD: 2773 macaddr = params.get(constants.INIC_MAC, None) 2774 if macaddr is None: 2775 params[constants.INIC_MAC] = constants.VALUE_AUTO 2776 2777 if ip is not None: 2778 if ip.lower() == constants.VALUE_NONE: 2779 params[constants.INIC_IP] = None 2780 else: 2781 if ip.lower() == constants.NIC_IP_POOL: 2782 if op == constants.DDM_ADD and req_net is None: 2783 raise errors.OpPrereqError("If ip=pool, parameter network" 2784 " cannot be none", 2785 errors.ECODE_INVAL) 2786 else: 2787 if not netutils.IPAddress.IsValid(ip): 2788 raise errors.OpPrereqError("Invalid IP address '%s'" % ip, 2789 errors.ECODE_INVAL) 2790 2791 if constants.INIC_MAC in params: 2792 macaddr = params[constants.INIC_MAC] 2793 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 2794 macaddr = utils.NormalizeAndValidateMac(macaddr) 2795 2796 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO: 2797 raise errors.OpPrereqError("'auto' is not a valid MAC address when" 2798 " modifying an existing NIC", 2799 errors.ECODE_INVAL)
2800
2801 - def CheckArguments(self):
2802 if not (self.op.nics or self.op.disks or self.op.disk_template or 2803 self.op.hvparams or self.op.beparams or self.op.os_name or 2804 self.op.osparams or self.op.offline is not None or 2805 self.op.runtime_mem or self.op.pnode or self.op.osparams_private or 2806 self.op.instance_communication is not None): 2807 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL) 2808 2809 if self.op.hvparams: 2810 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, 2811 "hypervisor", "instance", "cluster") 2812 2813 self.op.disks = self._UpgradeDiskNicMods( 2814 "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams)) 2815 self.op.nics = self._UpgradeDiskNicMods( 2816 "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams)) 2817 2818 # Check disk template modifications 2819 if self.op.disk_template: 2820 if self.op.disks: 2821 raise errors.OpPrereqError("Disk template conversion and other disk" 2822 " changes not supported at the same time", 2823 errors.ECODE_INVAL) 2824 2825 # mirrored template node checks 2826 if self.op.disk_template in constants.DTS_INT_MIRROR: 2827 if not self.op.remote_node: 2828 raise errors.OpPrereqError("Changing the disk template to a mirrored" 2829 " one requires specifying a secondary" 2830 " node", errors.ECODE_INVAL) 2831 elif self.op.remote_node: 2832 self.LogWarning("Changing the disk template to a non-mirrored one," 2833 " the secondary node will be ignored") 2834 # the secondary node must be cleared in order to be ignored, otherwise 2835 # the operation will fail, in the GenerateDiskTemplate method 2836 self.op.remote_node = None 2837 2838 # file-based template checks 2839 if self.op.disk_template in constants.DTS_FILEBASED: 2840 if not self.op.file_driver: 2841 self.op.file_driver = constants.FD_DEFAULT 2842 elif self.op.file_driver not in constants.FILE_DRIVER: 2843 raise errors.OpPrereqError("Invalid file driver name '%s'" % 2844 self.op.file_driver, errors.ECODE_INVAL) 2845 2846 # Check NIC modifications 2847 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES, 2848 self._VerifyNicModification) 2849 2850 if self.op.pnode: 2851 (self.op.pnode_uuid, self.op.pnode) = \ 2852 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2853
2854 - def ExpandNames(self):
2855 self._ExpandAndLockInstance() 2856 self.needed_locks[locking.LEVEL_NODEGROUP] = [] 2857 # Can't even acquire node locks in shared mode as upcoming changes in 2858 # Ganeti 2.6 will start to modify the node object on disk conversion 2859 self.needed_locks[locking.LEVEL_NODE] = [] 2860 self.needed_locks[locking.LEVEL_NODE_RES] = [] 2861 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE 2862 # Look node group to look up the ipolicy 2863 self.share_locks[locking.LEVEL_NODEGROUP] = 1 2864 self.dont_collate_locks[locking.LEVEL_NODEGROUP] = True 2865 self.dont_collate_locks[locking.LEVEL_NODE] = True 2866 self.dont_collate_locks[locking.LEVEL_NODE_RES] = True
2867
2868 - def DeclareLocks(self, level):
2869 if level == locking.LEVEL_NODEGROUP: 2870 assert not self.needed_locks[locking.LEVEL_NODEGROUP] 2871 # Acquire locks for the instance's nodegroups optimistically. Needs 2872 # to be verified in CheckPrereq 2873 self.needed_locks[locking.LEVEL_NODEGROUP] = \ 2874 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid) 2875 elif level == locking.LEVEL_NODE: 2876 self._LockInstancesNodes() 2877 if self.op.disk_template and self.op.remote_node: 2878 (self.op.remote_node_uuid, self.op.remote_node) = \ 2879 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid, 2880 self.op.remote_node) 2881 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid) 2882 elif level == locking.LEVEL_NODE_RES and self.op.disk_template: 2883 # Copy node locks 2884 self.needed_locks[locking.LEVEL_NODE_RES] = \ 2885 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2886
2887 - def BuildHooksEnv(self):
2888 """Build hooks env. 2889 2890 This runs on the master, primary and secondaries. 2891 2892 """ 2893 args = {} 2894 if constants.BE_MINMEM in self.be_new: 2895 args["minmem"] = self.be_new[constants.BE_MINMEM] 2896 if constants.BE_MAXMEM in self.be_new: 2897 args["maxmem"] = self.be_new[constants.BE_MAXMEM] 2898 if constants.BE_VCPUS in self.be_new: 2899 args["vcpus"] = self.be_new[constants.BE_VCPUS] 2900 # TODO: export disk changes. Note: _BuildInstanceHookEnv* don't export disk 2901 # information at all. 2902 2903 if self._new_nics is not None: 2904 nics = [] 2905 2906 for nic in self._new_nics: 2907 n = copy.deepcopy(nic) 2908 nicparams = self.cluster.SimpleFillNIC(n.nicparams) 2909 n.nicparams = nicparams 2910 nics.append(NICToTuple(self, n)) 2911 2912 args["nics"] = nics 2913 2914 env = BuildInstanceHookEnvByObject(self, self.instance, override=args) 2915 if self.op.disk_template: 2916 env["NEW_DISK_TEMPLATE"] = self.op.disk_template 2917 if self.op.runtime_mem: 2918 env["RUNTIME_MEMORY"] = self.op.runtime_mem 2919 2920 return env
2921
2922 - def BuildHooksNodes(self):
2923 """Build hooks nodes. 2924 2925 """ 2926 nl = [self.cfg.GetMasterNode()] + \ 2927 list(self.cfg.GetInstanceNodes(self.instance.uuid)) 2928 return (nl, nl)
2929
2930 - def _PrepareNicModification(self, params, private, old_ip, old_net_uuid, 2931 old_params, cluster, pnode_uuid):
2932 2933 update_params_dict = dict([(key, params[key]) 2934 for key in constants.NICS_PARAMETERS 2935 if key in params]) 2936 2937 req_link = update_params_dict.get(constants.NIC_LINK, None) 2938 req_mode = update_params_dict.get(constants.NIC_MODE, None) 2939 2940 new_net_uuid = None 2941 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid) 2942 if new_net_uuid_or_name: 2943 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name) 2944 new_net_obj = self.cfg.GetNetwork(new_net_uuid) 2945 2946 if old_net_uuid: 2947 old_net_obj = self.cfg.GetNetwork(old_net_uuid) 2948 2949 if new_net_uuid: 2950 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid) 2951 if not netparams: 2952 raise errors.OpPrereqError("No netparams found for the network" 2953 " %s, probably not connected" % 2954 new_net_obj.name, errors.ECODE_INVAL) 2955 new_params = dict(netparams) 2956 else: 2957 new_params = GetUpdatedParams(old_params, update_params_dict) 2958 2959 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES) 2960 2961 new_filled_params = cluster.SimpleFillNIC(new_params) 2962 objects.NIC.CheckParameterSyntax(new_filled_params) 2963 2964 new_mode = new_filled_params[constants.NIC_MODE] 2965 if new_mode == constants.NIC_MODE_BRIDGED: 2966 bridge = new_filled_params[constants.NIC_LINK] 2967 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg 2968 if msg: 2969 msg = "Error checking bridges on node '%s': %s" % \ 2970 (self.cfg.GetNodeName(pnode_uuid), msg) 2971 if self.op.force: 2972 self.warn.append(msg) 2973 else: 2974 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON) 2975 2976 elif new_mode == constants.NIC_MODE_ROUTED: 2977 ip = params.get(constants.INIC_IP, old_ip) 2978 if ip is None and not new_net_uuid: 2979 raise errors.OpPrereqError("Cannot set the NIC IP address to None" 2980 " on a routed NIC if not attached to a" 2981 " network", errors.ECODE_INVAL) 2982 2983 elif new_mode == constants.NIC_MODE_OVS: 2984 # TODO: check OVS link 2985 self.LogInfo("OVS links are currently not checked for correctness") 2986 2987 if constants.INIC_MAC in params: 2988 mac = params[constants.INIC_MAC] 2989 if mac is None: 2990 raise errors.OpPrereqError("Cannot unset the NIC MAC address", 2991 errors.ECODE_INVAL) 2992 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 2993 # otherwise generate the MAC address 2994 params[constants.INIC_MAC] = \ 2995 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId()) 2996 else: 2997 # or validate/reserve the current one 2998 try: 2999 self.cfg.ReserveMAC(mac, self.proc.GetECId()) 3000 except errors.ReservationError: 3001 raise errors.OpPrereqError("MAC address '%s' already in use" 3002 " in cluster" % mac, 3003 errors.ECODE_NOTUNIQUE) 3004 elif new_net_uuid != old_net_uuid: 3005 3006 def get_net_prefix(net_uuid): 3007 mac_prefix = None 3008 if net_uuid: 3009 nobj = self.cfg.GetNetwork(net_uuid) 3010 mac_prefix = nobj.mac_prefix 3011 3012 return mac_prefix
3013 3014 new_prefix = get_net_prefix(new_net_uuid) 3015 old_prefix = get_net_prefix(old_net_uuid) 3016 if old_prefix != new_prefix: 3017 params[constants.INIC_MAC] = \ 3018 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId()) 3019 3020 # if there is a change in (ip, network) tuple 3021 new_ip = params.get(constants.INIC_IP, old_ip) 3022 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid): 3023 if new_ip: 3024 # if IP is pool then require a network and generate one IP 3025 if new_ip.lower() == constants.NIC_IP_POOL: 3026 if new_net_uuid: 3027 try: 3028 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId()) 3029 except errors.ReservationError: 3030 raise errors.OpPrereqError("Unable to get a free IP" 3031 " from the address pool", 3032 errors.ECODE_STATE) 3033 self.LogInfo("Chose IP %s from network %s", 3034 new_ip, 3035 new_net_obj.name) 3036 params[constants.INIC_IP] = new_ip 3037 else: 3038 raise errors.OpPrereqError("ip=pool, but no network found", 3039 errors.ECODE_INVAL) 3040 # Reserve new IP if in the new network if any 3041 elif new_net_uuid: 3042 try: 3043 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(), 3044 check=self.op.conflicts_check) 3045 self.LogInfo("Reserving IP %s in network %s", 3046 new_ip, new_net_obj.name) 3047 except errors.ReservationError: 3048 raise errors.OpPrereqError("IP %s not available in network %s" % 3049 (new_ip, new_net_obj.name), 3050 errors.ECODE_NOTUNIQUE) 3051 # new network is None so check if new IP is a conflicting IP 3052 elif self.op.conflicts_check: 3053 _CheckForConflictingIp(self, new_ip, pnode_uuid) 3054 3055 # release old IP if old network is not None 3056 if old_ip and old_net_uuid: 3057 try: 3058 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId()) 3059 except errors.AddressPoolError: 3060 logging.warning("Release IP %s not contained in network %s", 3061 old_ip, old_net_obj.name) 3062 3063 # there are no changes in (ip, network) tuple and old network is not None 3064 elif (old_net_uuid is not None and 3065 (req_link is not None or req_mode is not None)): 3066 raise errors.OpPrereqError("Not allowed to change link or mode of" 3067 " a NIC that is connected to a network", 3068 errors.ECODE_INVAL) 3069 3070 private.params = new_params 3071 private.filled = new_filled_params
3072
3073 - def _PreCheckDiskTemplate(self, pnode_info):
3074 """CheckPrereq checks related to a new disk template.""" 3075 # Arguments are passed to avoid configuration lookups 3076 pnode_uuid = self.instance.primary_node 3077 3078 if self.instance.disk_template in constants.DTS_NOT_CONVERTIBLE_FROM: 3079 raise errors.OpPrereqError("Conversion from the '%s' disk template is" 3080 " not supported" % self.instance.disk_template, 3081 errors.ECODE_INVAL) 3082 3083 elif self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO: 3084 raise errors.OpPrereqError("Conversion to the '%s' disk template is" 3085 " not supported" % self.op.disk_template, 3086 errors.ECODE_INVAL) 3087 3088 if (self.op.disk_template != constants.DT_EXT and 3089 self.instance.disk_template == self.op.disk_template): 3090 raise errors.OpPrereqError("Instance already has disk template %s" % 3091 self.instance.disk_template, 3092 errors.ECODE_INVAL) 3093 3094 if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template): 3095 enabled_dts = utils.CommaJoin(self.cluster.enabled_disk_templates) 3096 raise errors.OpPrereqError("Disk template '%s' is not enabled for this" 3097 " cluster (enabled templates: %s)" % 3098 (self.op.disk_template, enabled_dts), 3099 errors.ECODE_STATE) 3100 3101 default_vg = self.cfg.GetVGName() 3102 if (not default_vg and 3103 self.op.disk_template not in constants.DTS_NOT_LVM): 3104 raise errors.OpPrereqError("Disk template conversions to lvm-based" 3105 " instances are not supported by the cluster", 3106 errors.ECODE_STATE) 3107 3108 CheckInstanceState(self, self.instance, INSTANCE_DOWN, 3109 msg="cannot change disk template") 3110 3111 # compute new disks' information 3112 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) 3113 self.disks_info = ComputeDisksInfo(inst_disks, self.op.disk_template, 3114 default_vg, self.op.ext_params) 3115 3116 # mirror node verification 3117 if self.op.disk_template in constants.DTS_INT_MIRROR: 3118 if self.op.remote_node_uuid == pnode_uuid: 3119 raise errors.OpPrereqError("Given new secondary node %s is the same" 3120 " as the primary node of the instance" % 3121 self.op.remote_node, errors.ECODE_STATE) 3122 CheckNodeOnline(self, self.op.remote_node_uuid) 3123 CheckNodeNotDrained(self, self.op.remote_node_uuid) 3124 CheckNodeVmCapable(self, self.op.remote_node_uuid) 3125 3126 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid) 3127 snode_group = self.cfg.GetNodeGroup(snode_info.group) 3128 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster, 3129 snode_group) 3130 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg, 3131 ignore=self.op.ignore_ipolicy) 3132 if pnode_info.group != snode_info.group: 3133 self.LogWarning("The primary and secondary nodes are in two" 3134 " different node groups; the disk parameters" 3135 " from the first disk's node group will be" 3136 " used") 3137 3138 # check that the template is in the primary node group's allowed templates 3139 pnode_group = self.cfg.GetNodeGroup(pnode_info.group) 3140 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster, 3141 pnode_group) 3142 allowed_dts = ipolicy[constants.IPOLICY_DTS] 3143 if self.op.disk_template not in allowed_dts: 3144 raise errors.OpPrereqError("Disk template '%s' in not allowed (allowed" 3145 " templates: %s)" % (self.op.disk_template, 3146 utils.CommaJoin(allowed_dts)), 3147 errors.ECODE_STATE) 3148 3149 if not self.op.disk_template in constants.DTS_EXCL_STORAGE: 3150 # Make sure none of the nodes require exclusive storage 3151 nodes = [pnode_info] 3152 if self.op.disk_template in constants.DTS_INT_MIRROR: 3153 assert snode_info 3154 nodes.append(snode_info) 3155 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n) 3156 if compat.any(map(has_es, nodes)): 3157 errmsg = ("Cannot convert disk template from %s to %s when exclusive" 3158 " storage is enabled" % (self.instance.disk_template, 3159 self.op.disk_template)) 3160 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE) 3161 3162 # node capacity checks 3163 if (self.op.disk_template == constants.DT_PLAIN and 3164 self.instance.disk_template == constants.DT_DRBD8): 3165 # we ensure that no capacity checks will be made for conversions from 3166 # the 'drbd' to the 'plain' disk template 3167 pass 3168 elif (self.op.disk_template == constants.DT_DRBD8 and 3169 self.instance.disk_template == constants.DT_PLAIN): 3170 # for conversions from the 'plain' to the 'drbd' disk template, check 3171 # only the remote node's capacity 3172 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info) 3173 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], req_sizes) 3174 elif self.op.disk_template in constants.DTS_LVM: 3175 # rest lvm-based capacity checks 3176 node_uuids = [pnode_uuid] 3177 if self.op.remote_node_uuid: 3178 node_uuids.append(self.op.remote_node_uuid) 3179 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info) 3180 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes) 3181 elif self.op.disk_template == constants.DT_RBD: 3182 # CheckRADOSFreeSpace() is simply a placeholder 3183 CheckRADOSFreeSpace() 3184 elif self.op.disk_template == constants.DT_EXT: 3185 # FIXME: Capacity checks for extstorage template, if exists 3186 pass 3187 else: 3188 # FIXME: Checks about other non lvm-based disk templates 3189 pass
3190
3191 - def _PreCheckDisks(self, ispec):
3192 """CheckPrereq checks related to disk changes. 3193 3194 @type ispec: dict 3195 @param ispec: instance specs to be updated with the new disks 3196 3197 """ 3198 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance) 3199 3200 inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid) 3201 excl_stor = compat.any( 3202 rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes).values() 3203 ) 3204 3205 # Get the group access type 3206 node_info = self.cfg.GetNodeInfo(self.instance.primary_node) 3207 node_group = self.cfg.GetNodeGroup(node_info.group) 3208 group_disk_params = self.cfg.GetGroupDiskParams(node_group) 3209 group_access_type = group_disk_params[self.instance.disk_template].get( 3210 constants.RBD_ACCESS, constants.DISK_KERNELSPACE 3211 ) 3212 3213 # Check disk modifications. This is done here and not in CheckArguments 3214 # (as with NICs), because we need to know the instance's disk template 3215 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor, 3216 group_access_type) 3217 if self.instance.disk_template == constants.DT_EXT: 3218 self._CheckMods("disk", self.op.disks, {}, ver_fn) 3219 else: 3220 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES, 3221 ver_fn) 3222 3223 self.diskmod = _PrepareContainerMods(self.op.disks, None) 3224 3225 # Check the validity of the `provider' parameter 3226 if self.instance.disk_template in constants.DT_EXT: 3227 for mod in self.diskmod: 3228 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None) 3229 if mod[0] == constants.DDM_ADD: 3230 if ext_provider is None: 3231 raise errors.OpPrereqError("Instance template is '%s' and parameter" 3232 " '%s' missing, during disk add" % 3233 (constants.DT_EXT, 3234 constants.IDISK_PROVIDER), 3235 errors.ECODE_NOENT) 3236 elif mod[0] == constants.DDM_MODIFY: 3237 if ext_provider: 3238 raise errors.OpPrereqError("Parameter '%s' is invalid during disk" 3239 " modification" % 3240 constants.IDISK_PROVIDER, 3241 errors.ECODE_INVAL) 3242 else: 3243 for mod in self.diskmod: 3244 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None) 3245 if ext_provider is not None: 3246 raise errors.OpPrereqError("Parameter '%s' is only valid for" 3247 " instances of type '%s'" % 3248 (constants.IDISK_PROVIDER, 3249 constants.DT_EXT), 3250 errors.ECODE_INVAL) 3251 3252 if not self.op.wait_for_sync and not self.instance.disks_active: 3253 for mod in self.diskmod: 3254 if mod[0] == constants.DDM_ADD: 3255 raise errors.OpPrereqError("Can't add a disk to an instance with" 3256 " deactivated disks and" 3257 " --no-wait-for-sync given.", 3258 errors.ECODE_INVAL) 3259 3260 if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS: 3261 raise errors.OpPrereqError("Disk operations not supported for" 3262 " diskless instances", errors.ECODE_INVAL) 3263 3264 def _PrepareDiskMod(_, disk, params, __): 3265 disk.name = params.get(constants.IDISK_NAME, None)
3266 3267 # Verify disk changes (operating on a copy) 3268 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) 3269 disks = copy.deepcopy(inst_disks) 3270 _ApplyContainerMods("disk", disks, None, self.diskmod, None, 3271 _PrepareDiskMod, None) 3272 utils.ValidateDeviceNames("disk", disks) 3273 if len(disks) > constants.MAX_DISKS: 3274 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add" 3275 " more" % constants.MAX_DISKS, 3276 errors.ECODE_STATE) 3277 disk_sizes = [disk.size for disk in inst_disks] 3278 disk_sizes.extend(params["size"] for (op, idx, params, private) in 3279 self.diskmod if op == constants.DDM_ADD) 3280 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes) 3281 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes 3282 3283 # either --online or --offline was passed 3284 if self.op.offline is not None: 3285 if self.op.offline: 3286 msg = "can't change to offline without being down first" 3287 else: 3288 msg = "can't change to online (down) without being offline first" 3289 CheckInstanceState(self, self.instance, INSTANCE_NOT_RUNNING, 3290 msg=msg) 3291 3292 @staticmethod
3293 - def _InstanceCommunicationDDM(cfg, instance_communication, instance):
3294 """Create a NIC mod that adds or removes the instance 3295 communication NIC to a running instance. 3296 3297 The NICS are dynamically created using the Dynamic Device 3298 Modification (DDM). This function produces a NIC modification 3299 (mod) that inserts an additional NIC meant for instance 3300 communication in or removes an existing instance communication NIC 3301 from a running instance, using DDM. 3302 3303 @type cfg: L{config.ConfigWriter} 3304 @param cfg: cluster configuration 3305 3306 @type instance_communication: boolean 3307 @param instance_communication: whether instance communication is 3308 enabled or disabled 3309 3310 @type instance: L{objects.Instance} 3311 @param instance: instance to which the NIC mod will be applied to 3312 3313 @rtype: (L{constants.DDM_ADD}, -1, parameters) or 3314 (L{constants.DDM_REMOVE}, -1, parameters) or 3315 L{None} 3316 @return: DDM mod containing an action to add or remove the NIC, or 3317 None if nothing needs to be done 3318 3319 """ 3320 nic_name = _ComputeInstanceCommunicationNIC(instance.name) 3321 3322 instance_communication_nic = None 3323 3324 for nic in instance.nics: 3325 if nic.name == nic_name: 3326 instance_communication_nic = nic 3327 break 3328 3329 if instance_communication and not instance_communication_nic: 3330 action = constants.DDM_ADD 3331 params = {constants.INIC_NAME: nic_name, 3332 constants.INIC_MAC: constants.VALUE_GENERATE, 3333 constants.INIC_IP: constants.NIC_IP_POOL, 3334 constants.INIC_NETWORK: 3335 cfg.GetInstanceCommunicationNetwork()} 3336 elif not instance_communication and instance_communication_nic: 3337 action = constants.DDM_REMOVE 3338 params = None 3339 else: 3340 action = None 3341 params = None 3342 3343 if action is not None: 3344 return (action, -1, params) 3345 else: 3346 return None
3347
3348 - def CheckPrereq(self):
3349 """Check prerequisites. 3350 3351 This only checks the instance list against the existing names. 3352 3353 """ 3354 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE) 3355 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 3356 self.cluster = self.cfg.GetClusterInfo() 3357 cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor] 3358 3359 assert self.instance is not None, \ 3360 "Cannot retrieve locked instance %s" % self.op.instance_name 3361 3362 pnode_uuid = self.instance.primary_node 3363 3364 self.warn = [] 3365 3366 if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and 3367 not self.op.force): 3368 # verify that the instance is not up 3369 instance_info = self.rpc.call_instance_info( 3370 pnode_uuid, self.instance.name, self.instance.hypervisor, 3371 cluster_hvparams) 3372 if instance_info.fail_msg: 3373 self.warn.append("Can't get instance runtime information: %s" % 3374 instance_info.fail_msg) 3375 elif instance_info.payload: 3376 raise errors.OpPrereqError("Instance is still running on %s" % 3377 self.cfg.GetNodeName(pnode_uuid), 3378 errors.ECODE_STATE) 3379 3380 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE) 3381 node_uuids = list(self.cfg.GetInstanceNodes(self.instance.uuid)) 3382 pnode_info = self.cfg.GetNodeInfo(pnode_uuid) 3383 3384 #_CheckInstanceNodeGroups(self.cfg, self.op.instance_name, owned_groups) 3385 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP) 3386 group_info = self.cfg.GetNodeGroup(pnode_info.group) 3387 3388 # dictionary with instance information after the modification 3389 ispec = {} 3390 3391 if self.op.hotplug or self.op.hotplug_if_possible: 3392 result = self.rpc.call_hotplug_supported(self.instance.primary_node, 3393 self.instance) 3394 if result.fail_msg: 3395 if self.op.hotplug: 3396 result.Raise("Hotplug is not possible: %s" % result.fail_msg, 3397 prereq=True, ecode=errors.ECODE_STATE) 3398 else: 3399 self.LogWarning(result.fail_msg) 3400 self.op.hotplug = False 3401 self.LogInfo("Modification will take place without hotplugging.") 3402 else: 3403 self.op.hotplug = True 3404 3405 # Prepare NIC modifications 3406 # add or remove NIC for instance communication 3407 if self.op.instance_communication is not None: 3408 mod = self._InstanceCommunicationDDM(self.cfg, 3409 self.op.instance_communication, 3410 self.instance) 3411 if mod is not None: 3412 self.op.nics.append(mod) 3413 3414 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate) 3415 3416 # disks processing 3417 assert not (self.op.disk_template and self.op.disks), \ 3418 "Can't modify disk template and apply disk changes at the same time" 3419 3420 if self.op.disk_template: 3421 self._PreCheckDiskTemplate(pnode_info) 3422 self.instance_file_storage_dir = CalculateFileStorageDir(self) 3423 3424 self._PreCheckDisks(ispec) 3425 3426 # hvparams processing 3427 if self.op.hvparams: 3428 hv_type = self.instance.hypervisor 3429 i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams) 3430 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES) 3431 hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict) 3432 3433 # local check 3434 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new) 3435 CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new) 3436 self.hv_proposed = self.hv_new = hv_new # the new actual values 3437 self.hv_inst = i_hvdict # the new dict (without defaults) 3438 else: 3439 self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor, 3440 self.instance.os, 3441 self.instance.hvparams) 3442 self.hv_new = self.hv_inst = {} 3443 3444 # beparams processing 3445 if self.op.beparams: 3446 i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams, 3447 use_none=True) 3448 objects.UpgradeBeParams(i_bedict) 3449 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES) 3450 be_new = self.cluster.SimpleFillBE(i_bedict) 3451 self.be_proposed = self.be_new = be_new # the new actual values 3452 self.be_inst = i_bedict # the new dict (without defaults) 3453 else: 3454 self.be_new = self.be_inst = {} 3455 self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams) 3456 be_old = self.cluster.FillBE(self.instance) 3457 3458 # CPU param validation -- checking every time a parameter is 3459 # changed to cover all cases where either CPU mask or vcpus have 3460 # changed 3461 if (constants.BE_VCPUS in self.be_proposed and 3462 constants.HV_CPU_MASK in self.hv_proposed): 3463 cpu_list = \ 3464 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK]) 3465 # Verify mask is consistent with number of vCPUs. Can skip this 3466 # test if only 1 entry in the CPU mask, which means same mask 3467 # is applied to all vCPUs. 3468 if (len(cpu_list) > 1 and 3469 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]): 3470 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the" 3471 " CPU mask [%s]" % 3472 (self.be_proposed[constants.BE_VCPUS], 3473 self.hv_proposed[constants.HV_CPU_MASK]), 3474 errors.ECODE_INVAL) 3475 3476 # Only perform this test if a new CPU mask is given 3477 if constants.HV_CPU_MASK in self.hv_new and cpu_list: 3478 # Calculate the largest CPU number requested 3479 max_requested_cpu = max(map(max, cpu_list)) 3480 # Check that all of the instance's nodes have enough physical CPUs to 3481 # satisfy the requested CPU mask 3482 hvspecs = [(self.instance.hypervisor, 3483 self.cfg.GetClusterInfo() 3484 .hvparams[self.instance.hypervisor])] 3485 _CheckNodesPhysicalCPUs(self, 3486 self.cfg.GetInstanceNodes(self.instance.uuid), 3487 max_requested_cpu + 1, 3488 hvspecs) 3489 3490 # osparams processing 3491 if self.op.os_name and not self.op.force: 3492 instance_os = self.op.os_name 3493 else: 3494 instance_os = self.instance.os 3495 3496 if self.op.osparams or self.op.osparams_private: 3497 public_parms = self.op.osparams or {} 3498 private_parms = self.op.osparams_private or {} 3499 dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms) 3500 3501 if dupe_keys: 3502 raise errors.OpPrereqError("OS parameters repeated multiple times: %s" % 3503 utils.CommaJoin(dupe_keys)) 3504 3505 self.os_inst = GetUpdatedParams(self.instance.osparams, 3506 public_parms) 3507 self.os_inst_private = GetUpdatedParams(self.instance.osparams_private, 3508 private_parms) 3509 3510 CheckOSParams(self, True, node_uuids, instance_os, 3511 objects.FillDict(self.os_inst, 3512 self.os_inst_private), 3513 self.op.force_variant) 3514 3515 else: 3516 self.os_inst = {} 3517 self.os_inst_private = {} 3518 3519 #TODO(dynmem): do the appropriate check involving MINMEM 3520 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and 3521 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]): 3522 mem_check_list = [pnode_uuid] 3523 if be_new[constants.BE_AUTO_BALANCE]: 3524 # either we changed auto_balance to yes or it was from before 3525 mem_check_list.extend( 3526 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)) 3527 instance_info = self.rpc.call_instance_info( 3528 pnode_uuid, self.instance.name, self.instance.hypervisor, 3529 cluster_hvparams) 3530 hvspecs = [(self.instance.hypervisor, 3531 cluster_hvparams)] 3532 nodeinfo = self.rpc.call_node_info(mem_check_list, None, 3533 hvspecs) 3534 pninfo = nodeinfo[pnode_uuid] 3535 msg = pninfo.fail_msg 3536 if msg: 3537 # Assume the primary node is unreachable and go ahead 3538 self.warn.append("Can't get info from primary node %s: %s" % 3539 (self.cfg.GetNodeName(pnode_uuid), msg)) 3540 else: 3541 (_, _, (pnhvinfo, )) = pninfo.payload 3542 if not isinstance(pnhvinfo.get("memory_free", None), int): 3543 self.warn.append("Node data from primary node %s doesn't contain" 3544 " free memory information" % 3545 self.cfg.GetNodeName(pnode_uuid)) 3546 elif instance_info.fail_msg: 3547 self.warn.append("Can't get instance runtime information: %s" % 3548 instance_info.fail_msg) 3549 else: 3550 if instance_info.payload: 3551 current_mem = int(instance_info.payload["memory"]) 3552 else: 3553 # Assume instance not running 3554 # (there is a slight race condition here, but it's not very 3555 # probable, and we have no other way to check) 3556 # TODO: Describe race condition 3557 current_mem = 0 3558 #TODO(dynmem): do the appropriate check involving MINMEM 3559 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem - 3560 pnhvinfo["memory_free"]) 3561 if miss_mem > 0: 3562 raise errors.OpPrereqError("This change will prevent the instance" 3563 " from starting, due to %d MB of memory" 3564 " missing on its primary node" % 3565 miss_mem, errors.ECODE_NORES) 3566 3567 if be_new[constants.BE_AUTO_BALANCE]: 3568 secondary_nodes = \ 3569 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid) 3570 for node_uuid, nres in nodeinfo.items(): 3571 if node_uuid not in secondary_nodes: 3572 continue 3573 nres.Raise("Can't get info from secondary node %s" % 3574 self.cfg.GetNodeName(node_uuid), prereq=True, 3575 ecode=errors.ECODE_STATE) 3576 (_, _, (nhvinfo, )) = nres.payload 3577 if not isinstance(nhvinfo.get("memory_free", None), int): 3578 raise errors.OpPrereqError("Secondary node %s didn't return free" 3579 " memory information" % 3580 self.cfg.GetNodeName(node_uuid), 3581 errors.ECODE_STATE) 3582 #TODO(dynmem): do the appropriate check involving MINMEM 3583 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]: 3584 raise errors.OpPrereqError("This change will prevent the instance" 3585 " from failover to its secondary node" 3586 " %s, due to not enough memory" % 3587 self.cfg.GetNodeName(node_uuid), 3588 errors.ECODE_STATE) 3589 3590 if self.op.runtime_mem: 3591 remote_info = self.rpc.call_instance_info( 3592 self.instance.primary_node, self.instance.name, 3593 self.instance.hypervisor, 3594 cluster_hvparams) 3595 remote_info.Raise("Error checking node %s" % 3596 self.cfg.GetNodeName(self.instance.primary_node), 3597 prereq=True) 3598 if not remote_info.payload: # not running already 3599 raise errors.OpPrereqError("Instance %s is not running" % 3600 self.instance.name, errors.ECODE_STATE) 3601 3602 current_memory = remote_info.payload["memory"] 3603 if (not self.op.force and 3604 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or 3605 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])): 3606 raise errors.OpPrereqError("Instance %s must have memory between %d" 3607 " and %d MB of memory unless --force is" 3608 " given" % 3609 (self.instance.name, 3610 self.be_proposed[constants.BE_MINMEM], 3611 self.be_proposed[constants.BE_MAXMEM]), 3612 errors.ECODE_INVAL) 3613 3614 delta = self.op.runtime_mem - current_memory 3615 if delta > 0: 3616 CheckNodeFreeMemory( 3617 self, self.instance.primary_node, 3618 "ballooning memory for instance %s" % self.instance.name, delta, 3619 self.instance.hypervisor, 3620 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor]) 3621 3622 # make self.cluster visible in the functions below 3623 cluster = self.cluster 3624 3625 def _PrepareNicCreate(_, params, private): 3626 self._PrepareNicModification(params, private, None, None, 3627 {}, cluster, pnode_uuid) 3628 return (None, None)
3629 3630 def _PrepareNicMod(_, nic, params, private): 3631 self._PrepareNicModification(params, private, nic.ip, nic.network, 3632 nic.nicparams, cluster, pnode_uuid) 3633 return None 3634 3635 def _PrepareNicRemove(_, params, __): 3636 ip = params.ip 3637 net = params.network 3638 if net is not None and ip is not None: 3639 self.cfg.ReleaseIp(net, ip, self.proc.GetECId()) 3640 3641 # Verify NIC changes (operating on copy) 3642 nics = [nic.Copy() for nic in self.instance.nics] 3643 _ApplyContainerMods("NIC", nics, None, self.nicmod, 3644 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove) 3645 if len(nics) > constants.MAX_NICS: 3646 raise errors.OpPrereqError("Instance has too many network interfaces" 3647 " (%d), cannot add more" % constants.MAX_NICS, 3648 errors.ECODE_STATE) 3649 3650 # Pre-compute NIC changes (necessary to use result in hooks) 3651 self._nic_chgdesc = [] 3652 if self.nicmod: 3653 # Operate on copies as this is still in prereq 3654 nics = [nic.Copy() for nic in self.instance.nics] 3655 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod, 3656 self._CreateNewNic, self._ApplyNicMods, 3657 self._RemoveNic) 3658 # Verify that NIC names are unique and valid 3659 utils.ValidateDeviceNames("NIC", nics) 3660 self._new_nics = nics 3661 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics) 3662 else: 3663 self._new_nics = None 3664 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics) 3665 3666 if not self.op.ignore_ipolicy: 3667 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster, 3668 group_info) 3669 3670 # Fill ispec with backend parameters 3671 ispec[constants.ISPEC_SPINDLE_USE] = \ 3672 self.be_new.get(constants.BE_SPINDLE_USE, None) 3673 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS, 3674 None) 3675 3676 # Copy ispec to verify parameters with min/max values separately 3677 if self.op.disk_template: 3678 new_disk_template = self.op.disk_template 3679 else: 3680 new_disk_template = self.instance.disk_template 3681 ispec_max = ispec.copy() 3682 ispec_max[constants.ISPEC_MEM_SIZE] = \ 3683 self.be_new.get(constants.BE_MAXMEM, None) 3684 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max, 3685 new_disk_template) 3686 ispec_min = ispec.copy() 3687 ispec_min[constants.ISPEC_MEM_SIZE] = \ 3688 self.be_new.get(constants.BE_MINMEM, None) 3689 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min, 3690 new_disk_template) 3691 3692 if (res_max or res_min): 3693 # FIXME: Improve error message by including information about whether 3694 # the upper or lower limit of the parameter fails the ipolicy. 3695 msg = ("Instance allocation to group %s (%s) violates policy: %s" % 3696 (group_info, group_info.name, 3697 utils.CommaJoin(set(res_max + res_min)))) 3698 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 3699
3700 - def _ConvertInstanceTemplate(self, feedback_fn):
3701 """Converts the disk template of an instance. 3702 3703 This function converts the disk template of an instance. It supports 3704 conversions among all the available disk templates except conversions 3705 between the LVM-based disk templates, that use their separate code path. 3706 Also, this method does not support conversions that include the 'diskless' 3707 template and those targeting the 'blockdev' template. 3708 3709 @type feedback_fn: callable 3710 @param feedback_fn: function used to send feedback back to the caller 3711 3712 @rtype: NoneType 3713 @return: None 3714 @raise errors.OpPrereqError: in case of failure 3715 3716 """ 3717 template_info = self.op.disk_template 3718 if self.op.disk_template == constants.DT_EXT: 3719 template_info = ":".join([self.op.disk_template, 3720 self.op.ext_params["provider"]]) 3721 3722 feedback_fn("Converting disk template from '%s' to '%s'" % 3723 (self.instance.disk_template, template_info)) 3724 3725 assert not (self.instance.disk_template in 3726 constants.DTS_NOT_CONVERTIBLE_FROM or 3727 self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO), \ 3728 ("Unsupported disk template conversion from '%s' to '%s'" % 3729 (self.instance.disk_template, self.op.disk_template)) 3730 3731 pnode_uuid = self.instance.primary_node 3732 snode_uuid = [] 3733 if self.op.remote_node_uuid: 3734 snode_uuid = [self.op.remote_node_uuid] 3735 3736 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid) 3737 3738 feedback_fn("Generating new '%s' disk template..." % template_info) 3739 new_disks = GenerateDiskTemplate(self, 3740 self.op.disk_template, 3741 self.instance.uuid, 3742 pnode_uuid, 3743 snode_uuid, 3744 self.disks_info, 3745 self.instance_file_storage_dir, 3746 self.op.file_driver, 3747 0, 3748 feedback_fn, 3749 self.diskparams) 3750 3751 # Create the new block devices for the instance. 3752 feedback_fn("Creating new empty disks of type '%s'..." % template_info) 3753 try: 3754 CreateDisks(self, self.instance, disk_template=self.op.disk_template, 3755 disks=new_disks) 3756 except errors.OpExecError: 3757 self.LogWarning("Device creation failed") 3758 self.cfg.ReleaseDRBDMinors(self.instance.uuid) 3759 raise 3760 3761 # Transfer the data from the old to the newly created disks of the instance. 3762 feedback_fn("Populating the new empty disks of type '%s'..." % 3763 template_info) 3764 for idx, (old, new) in enumerate(zip(old_disks, new_disks)): 3765 feedback_fn(" - copying data from disk %s (%s), size %s" % 3766 (idx, self.instance.disk_template, 3767 utils.FormatUnit(new.size, "h"))) 3768 if self.instance.disk_template == constants.DT_DRBD8: 3769 old = old.children[0] 3770 result = self.rpc.call_blockdev_convert(pnode_uuid, (old, self.instance), 3771 (new, self.instance)) 3772 msg = result.fail_msg 3773 if msg: 3774 # A disk failed to copy. Abort the conversion operation and rollback 3775 # the modifications to the previous state. The instance will remain 3776 # intact. 3777 if self.op.disk_template == constants.DT_DRBD8: 3778 new = new.children[0] 3779 self.Log(" - ERROR: Could not copy disk '%s' to '%s'" % 3780 (old.logical_id[1], new.logical_id[1])) 3781 try: 3782 self.LogInfo("Some disks failed to copy") 3783 self.LogInfo("The instance will not be affected, aborting operation") 3784 self.LogInfo("Removing newly created disks of type '%s'..." % 3785 template_info) 3786 RemoveDisks(self, self.instance, disk_template=self.op.disk_template, 3787 disks=new_disks) 3788 self.LogInfo("Newly created disks removed successfully") 3789 finally: 3790 self.cfg.ReleaseDRBDMinors(self.instance.uuid) 3791 result.Raise("Error while converting the instance's template") 3792 3793 # In case of DRBD disk, return its port to the pool 3794 if self.instance.disk_template == constants.DT_DRBD8: 3795 for disk in old_disks: 3796 tcp_port = disk.logical_id[2] 3797 self.cfg.AddTcpUdpPort(tcp_port) 3798 3799 # Remove old disks from the instance. 3800 feedback_fn("Detaching old disks (%s) from the instance and removing" 3801 " them from cluster config" % self.instance.disk_template) 3802 for old_disk in old_disks: 3803 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid) 3804 3805 # The old disk_template will be needed to remove the old block devices. 3806 old_disk_template = self.instance.disk_template 3807 3808 # Update the disk template of the instance 3809 self.cfg.SetInstanceDiskTemplate(self.instance.uuid, self.op.disk_template) 3810 3811 # Attach the new disks to the instance. 3812 feedback_fn("Adding new disks (%s) to cluster config and attaching" 3813 " them to the instance" % template_info) 3814 for (idx, new_disk) in enumerate(new_disks): 3815 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx) 3816 3817 # Re-read the instance from the configuration. 3818 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) 3819 3820 # Release node locks while waiting for sync and disks removal. 3821 ReleaseLocks(self, locking.LEVEL_NODE) 3822 3823 disk_abort = not WaitForSync(self, self.instance, 3824 oneshot=not self.op.wait_for_sync) 3825 if disk_abort: 3826 raise errors.OpExecError("There are some degraded disks for" 3827 " this instance, please cleanup manually") 3828 3829 feedback_fn("Removing old block devices of type '%s'..." % 3830 old_disk_template) 3831 RemoveDisks(self, self.instance, disk_template=old_disk_template, 3832 disks=old_disks)
3833 3834 # Node resource locks will be released by the caller. 3835
3836 - def _ConvertPlainToDrbd(self, feedback_fn):
3837 """Converts an instance from plain to drbd. 3838 3839 """ 3840 feedback_fn("Converting disk template from 'plain' to 'drbd'") 3841 3842 pnode_uuid = self.instance.primary_node 3843 snode_uuid = self.op.remote_node_uuid 3844 3845 assert self.instance.disk_template == constants.DT_PLAIN 3846 3847 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid) 3848 new_disks = GenerateDiskTemplate(self, self.op.disk_template, 3849 self.instance.uuid, pnode_uuid, 3850 [snode_uuid], self.disks_info, 3851 None, None, 0, 3852 feedback_fn, self.diskparams) 3853 anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams) 3854 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid) 3855 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid) 3856 info = GetInstanceInfoText(self.instance) 3857 feedback_fn("Creating additional volumes...") 3858 # first, create the missing data and meta devices 3859 for disk in anno_disks: 3860 # unfortunately this is... not too nice 3861 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1], 3862 info, True, p_excl_stor) 3863 for child in disk.children: 3864 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True, 3865 s_excl_stor) 3866 # at this stage, all new LVs have been created, we can rename the 3867 # old ones 3868 feedback_fn("Renaming original volumes...") 3869 rename_list = [(o, n.children[0].logical_id) 3870 for (o, n) in zip(old_disks, new_disks)] 3871 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list) 3872 result.Raise("Failed to rename original LVs") 3873 3874 feedback_fn("Initializing DRBD devices...") 3875 # all child devices are in place, we can now create the DRBD devices 3876 try: 3877 for disk in anno_disks: 3878 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor), 3879 (snode_uuid, s_excl_stor)]: 3880 f_create = node_uuid == pnode_uuid 3881 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info, 3882 f_create, excl_stor) 3883 except errors.GenericError, e: 3884 feedback_fn("Initializing of DRBD devices failed;" 3885 " renaming back original volumes...") 3886 rename_back_list = [(n.children[0], o.logical_id) 3887 for (n, o) in zip(new_disks, old_disks)] 3888 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list) 3889 result.Raise("Failed to rename LVs back after error %s" % str(e)) 3890 raise 3891 3892 # Remove the old disks from the instance 3893 for old_disk in old_disks: 3894 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid) 3895 3896 # Update the disk template of the instance 3897 self.cfg.SetInstanceDiskTemplate(self.instance.uuid, constants.DT_DRBD8) 3898 3899 # Attach the new disks to the instance 3900 for (idx, new_disk) in enumerate(new_disks): 3901 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx) 3902 3903 # re-read the instance from the configuration 3904 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) 3905 3906 # Release node locks while waiting for sync 3907 ReleaseLocks(self, locking.LEVEL_NODE) 3908 3909 # disks are created, waiting for sync 3910 disk_abort = not WaitForSync(self, self.instance, 3911 oneshot=not self.op.wait_for_sync) 3912 if disk_abort: 3913 raise errors.OpExecError("There are some degraded disks for" 3914 " this instance, please cleanup manually")
3915 3916 # Node resource locks will be released by caller 3917
3918 - def _ConvertDrbdToPlain(self, feedback_fn):
3919 """Converts an instance from drbd to plain. 3920 3921 """ 3922 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid) 3923 3924 assert self.instance.disk_template == constants.DT_DRBD8 3925 assert len(secondary_nodes) == 1 or not self.instance.disks 3926 3927 # it will not be possible to calculate the snode_uuid later 3928 snode_uuid = None 3929 if secondary_nodes: 3930 snode_uuid = secondary_nodes[0] 3931 3932 feedback_fn("Converting disk template from 'drbd' to 'plain'") 3933 3934 disks = self.cfg.GetInstanceDisks(self.instance.uuid) 3935 old_disks = AnnotateDiskParams(self.instance, disks, self.cfg) 3936 new_disks = [d.children[0] for d in disks] 3937 3938 # copy over size, mode and name 3939 for parent, child in zip(old_disks, new_disks): 3940 child.size = parent.size 3941 child.mode = parent.mode 3942 child.name = parent.name 3943 3944 # this is a DRBD disk, return its port to the pool 3945 for disk in old_disks: 3946 tcp_port = disk.logical_id[2] 3947 self.cfg.AddTcpUdpPort(tcp_port) 3948 3949 # Remove the old disks from the instance 3950 for old_disk in old_disks: 3951 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid) 3952 3953 # Update the disk template of the instance 3954 self.cfg.SetInstanceDiskTemplate(self.instance.uuid, constants.DT_PLAIN) 3955 3956 # Attach the new disks to the instance 3957 for (idx, new_disk) in enumerate(new_disks): 3958 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx) 3959 3960 # re-read the instance from the configuration 3961 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) 3962 3963 # Release locks in case removing disks takes a while 3964 ReleaseLocks(self, locking.LEVEL_NODE) 3965 3966 feedback_fn("Removing volumes on the secondary node...") 3967 RemoveDisks(self, self.instance, disk_template=constants.DT_DRBD8, 3968 disks=old_disks, target_node_uuid=snode_uuid) 3969 3970 feedback_fn("Removing unneeded volumes on the primary node...") 3971 meta_disks = [] 3972 for idx, disk in enumerate(old_disks): 3973 meta_disks.append(disk.children[1]) 3974 RemoveDisks(self, self.instance, disk_template=constants.DT_DRBD8, 3975 disks=meta_disks)
3976
3977 - def _HotplugDevice(self, action, dev_type, device, extra, seq):
3978 self.LogInfo("Trying to hotplug device...") 3979 msg = "hotplug:" 3980 result = self.rpc.call_hotplug_device(self.instance.primary_node, 3981 self.instance, action, dev_type, 3982 (device, self.instance), 3983 extra, seq) 3984 if result.fail_msg: 3985 self.LogWarning("Could not hotplug device: %s" % result.fail_msg) 3986 self.LogInfo("Continuing execution..") 3987 msg += "failed" 3988 else: 3989 self.LogInfo("Hotplug done.") 3990 msg += "done" 3991 return msg
3992
3993 - def _CreateNewDisk(self, idx, params, _):
3994 """Creates a new disk. 3995 3996 """ 3997 # add a new disk 3998 instance_disks = self.cfg.GetInstanceDisks(self.instance.uuid) 3999 if self.instance.disk_template in constants.DTS_FILEBASED: 4000 (file_driver, file_path) = instance_disks[0].logical_id 4001 file_path = os.path.dirname(file_path) 4002 else: 4003 file_driver = file_path = None 4004 4005 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid) 4006 disk = \ 4007 GenerateDiskTemplate(self, self.instance.disk_template, 4008 self.instance.uuid, self.instance.primary_node, 4009 secondary_nodes, [params], file_path, 4010 file_driver, idx, self.Log, self.diskparams)[0] 4011 4012 new_disks = CreateDisks(self, self.instance, disks=[disk]) 4013 self.cfg.AddInstanceDisk(self.instance.uuid, disk, idx) 4014 4015 # re-read the instance from the configuration 4016 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) 4017 4018 if self.cluster.prealloc_wipe_disks: 4019 # Wipe new disk 4020 WipeOrCleanupDisks(self, self.instance, 4021 disks=[(idx, disk, 0)], 4022 cleanup=new_disks) 4023 4024 changes = [ 4025 ("disk/%d" % idx, 4026 "add:size=%s,mode=%s" % (disk.size, disk.mode)), 4027 ] 4028 if self.op.hotplug: 4029 result = self.rpc.call_blockdev_assemble(self.instance.primary_node, 4030 (disk, self.instance), 4031 self.instance, True, idx) 4032 if result.fail_msg: 4033 changes.append(("disk/%d" % idx, "assemble:failed")) 4034 self.LogWarning("Can't assemble newly created disk %d: %s", 4035 idx, result.fail_msg) 4036 else: 4037 _, link_name, uri = result.payload 4038 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD, 4039 constants.HOTPLUG_TARGET_DISK, 4040 disk, (link_name, uri), idx) 4041 changes.append(("disk/%d" % idx, msg)) 4042 4043 return (disk, changes)
4044
4045 - def _PostAddDisk(self, _, disk):
4046 if not WaitForSync(self, self.instance, disks=[disk], 4047 oneshot=not self.op.wait_for_sync): 4048 raise errors.OpExecError("Failed to sync disks of %s" % 4049 self.instance.name) 4050 4051 # the disk is active at this point, so deactivate it if the instance disks 4052 # are supposed to be inactive 4053 if not self.instance.disks_active: 4054 ShutdownInstanceDisks(self, self.instance, disks=[disk])
4055
4056 - def _ModifyDisk(self, idx, disk, params, _):
4057 """Modifies a disk. 4058 4059 """ 4060 changes = [] 4061 if constants.IDISK_MODE in params: 4062 disk.mode = params.get(constants.IDISK_MODE) 4063 changes.append(("disk.mode/%d" % idx, disk.mode)) 4064 4065 if constants.IDISK_NAME in params: 4066 disk.name = params.get(constants.IDISK_NAME) 4067 changes.append(("disk.name/%d" % idx, disk.name)) 4068 4069 # Modify arbitrary params in case instance template is ext 4070 for key, value in params.iteritems(): 4071 if (key not in constants.MODIFIABLE_IDISK_PARAMS and 4072 self.instance.disk_template == constants.DT_EXT): 4073 # stolen from GetUpdatedParams: default means reset/delete 4074 if value.lower() == constants.VALUE_DEFAULT: 4075 try: 4076 del disk.params[key] 4077 except KeyError: 4078 pass 4079 else: 4080 disk.params[key] = value 4081 changes.append(("disk.params:%s/%d" % (key, idx), value)) 4082 4083 # Update disk object 4084 self.cfg.Update(disk, self.feedback_fn) 4085 4086 return changes
4087
4088 - def _RemoveDisk(self, idx, root, _):
4089 """Removes a disk. 4090 4091 """ 4092 hotmsg = "" 4093 if self.op.hotplug: 4094 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE, 4095 constants.HOTPLUG_TARGET_DISK, 4096 root, None, idx) 4097 ShutdownInstanceDisks(self, self.instance, [root]) 4098 4099 RemoveDisks(self, self.instance, disks=[root]) 4100 4101 # if this is a DRBD disk, return its port to the pool 4102 if root.dev_type in constants.DTS_DRBD: 4103 self.cfg.AddTcpUdpPort(root.logical_id[2]) 4104 4105 # Remove disk from config 4106 self.cfg.RemoveInstanceDisk(self.instance.uuid, root.uuid) 4107 4108 # re-read the instance from the configuration 4109 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid) 4110 4111 return hotmsg
4112
4113 - def _CreateNewNic(self, idx, params, private):
4114 """Creates data structure for a new network interface. 4115 4116 """ 4117 mac = params[constants.INIC_MAC] 4118 ip = params.get(constants.INIC_IP, None) 4119 net = params.get(constants.INIC_NETWORK, None) 4120 name = params.get(constants.INIC_NAME, None) 4121 net_uuid = self.cfg.LookupNetwork(net) 4122 #TODO: not private.filled?? can a nic have no nicparams?? 4123 nicparams = private.filled 4124 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name, 4125 nicparams=nicparams) 4126 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId()) 4127 4128 changes = [ 4129 ("nic.%d" % idx, 4130 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" % 4131 (mac, ip, private.filled[constants.NIC_MODE], 4132 private.filled[constants.NIC_LINK], net)), 4133 ] 4134 4135 if self.op.hotplug: 4136 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD, 4137 constants.HOTPLUG_TARGET_NIC, 4138 nobj, None, idx) 4139 changes.append(("nic.%d" % idx, msg)) 4140 4141 return (nobj, changes)
4142
4143 - def _ApplyNicMods(self, idx, nic, params, private):
4144 """Modifies a network interface. 4145 4146 """ 4147 changes = [] 4148 4149 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]: 4150 if key in params: 4151 changes.append(("nic.%s/%d" % (key, idx), params[key])) 4152 setattr(nic, key, params[key]) 4153 4154 new_net = params.get(constants.INIC_NETWORK, nic.network) 4155 new_net_uuid = self.cfg.LookupNetwork(new_net) 4156 if new_net_uuid != nic.network: 4157 changes.append(("nic.network/%d" % idx, new_net)) 4158 nic.network = new_net_uuid 4159 4160 if private.filled: 4161 nic.nicparams = private.filled 4162 4163 for (key, val) in nic.nicparams.items(): 4164 changes.append(("nic.%s/%d" % (key, idx), val)) 4165 4166 if self.op.hotplug: 4167 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY, 4168 constants.HOTPLUG_TARGET_NIC, 4169 nic, None, idx) 4170 changes.append(("nic/%d" % idx, msg)) 4171 4172 return changes
4173
4174 - def _RemoveNic(self, idx, nic, _):
4175 if self.op.hotplug: 4176 return self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE, 4177 constants.HOTPLUG_TARGET_NIC, 4178 nic, None, idx)
4179
4180 - def Exec(self, feedback_fn):
4181 """Modifies an instance. 4182 4183 All parameters take effect only at the next restart of the instance. 4184 4185 """ 4186 self.feedback_fn = feedback_fn 4187 # Process here the warnings from CheckPrereq, as we don't have a 4188 # feedback_fn there. 4189 # TODO: Replace with self.LogWarning 4190 for warn in self.warn: 4191 feedback_fn("WARNING: %s" % warn) 4192 4193 assert ((self.op.disk_template is None) ^ 4194 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \ 4195 "Not owning any node resource locks" 4196 4197 result = [] 4198 4199 # New primary node 4200 if self.op.pnode_uuid: 4201 self.instance.primary_node = self.op.pnode_uuid 4202 4203 # runtime memory 4204 if self.op.runtime_mem: 4205 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node, 4206 self.instance, 4207 self.op.runtime_mem) 4208 rpcres.Raise("Cannot modify instance runtime memory") 4209 result.append(("runtime_memory", self.op.runtime_mem)) 4210 4211 # Apply disk changes 4212 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid) 4213 _ApplyContainerMods("disk", inst_disks, result, self.diskmod, 4214 self._CreateNewDisk, self._ModifyDisk, 4215 self._RemoveDisk, post_add_fn=self._PostAddDisk) 4216 4217 if self.op.disk_template: 4218 if __debug__: 4219 check_nodes = set(self.cfg.GetInstanceNodes(self.instance.uuid)) 4220 if self.op.remote_node_uuid: 4221 check_nodes.add(self.op.remote_node_uuid) 4222 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]: 4223 owned = self.owned_locks(level) 4224 assert not (check_nodes - owned), \ 4225 ("Not owning the correct locks, owning %r, expected at least %r" % 4226 (owned, check_nodes)) 4227 4228 r_shut = ShutdownInstanceDisks(self, self.instance) 4229 if not r_shut: 4230 raise errors.OpExecError("Cannot shutdown instance disks, unable to" 4231 " proceed with disk template conversion") 4232 mode = (self.instance.disk_template, self.op.disk_template) 4233 try: 4234 if mode in self._DISK_CONVERSIONS: 4235 self._DISK_CONVERSIONS[mode](self, feedback_fn) 4236 else: 4237 self._ConvertInstanceTemplate(feedback_fn) 4238 except: 4239 self.cfg.ReleaseDRBDMinors(self.instance.uuid) 4240 raise 4241 result.append(("disk_template", self.op.disk_template)) 4242 4243 assert self.instance.disk_template == self.op.disk_template, \ 4244 ("Expected disk template '%s', found '%s'" % 4245 (self.op.disk_template, self.instance.disk_template)) 4246 4247 # Release node and resource locks if there are any (they might already have 4248 # been released during disk conversion) 4249 ReleaseLocks(self, locking.LEVEL_NODE) 4250 ReleaseLocks(self, locking.LEVEL_NODE_RES) 4251 4252 # Apply NIC changes 4253 if self._new_nics is not None: 4254 self.instance.nics = self._new_nics 4255 result.extend(self._nic_chgdesc) 4256 4257 # hvparams changes 4258 if self.op.hvparams: 4259 self.instance.hvparams = self.hv_inst 4260 for key, val in self.op.hvparams.iteritems(): 4261 result.append(("hv/%s" % key, val)) 4262 4263 # beparams changes 4264 if self.op.beparams: 4265 self.instance.beparams = self.be_inst 4266 for key, val in self.op.beparams.iteritems(): 4267 result.append(("be/%s" % key, val)) 4268 4269 # OS change 4270 if self.op.os_name: 4271 self.instance.os = self.op.os_name 4272 4273 # osparams changes 4274 if self.op.osparams: 4275 self.instance.osparams = self.os_inst 4276 for key, val in self.op.osparams.iteritems(): 4277 result.append(("os/%s" % key, val)) 4278 4279 if self.op.osparams_private: 4280 self.instance.osparams_private = self.os_inst_private 4281 for key, val in self.op.osparams_private.iteritems(): 4282 # Show the Private(...) blurb. 4283 result.append(("os_private/%s" % key, repr(val))) 4284 4285 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId()) 4286 4287 if self.op.offline is None: 4288 # Ignore 4289 pass 4290 elif self.op.offline: 4291 # Mark instance as offline 4292 self.instance = self.cfg.MarkInstanceOffline(self.instance.uuid) 4293 result.append(("admin_state", constants.ADMINST_OFFLINE)) 4294 else: 4295 # Mark instance as online, but stopped 4296 self.instance = self.cfg.MarkInstanceDown(self.instance.uuid) 4297 result.append(("admin_state", constants.ADMINST_DOWN)) 4298 4299 UpdateMetadata(feedback_fn, self.rpc, self.instance) 4300 4301 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or 4302 self.owned_locks(locking.LEVEL_NODE)), \ 4303 "All node locks should have been released by now" 4304 4305 return result
4306 4307 _DISK_CONVERSIONS = { 4308 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd, 4309 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain, 4310 } 4311
4312 4313 -class LUInstanceChangeGroup(LogicalUnit):
4314 HPATH = "instance-change-group" 4315 HTYPE = constants.HTYPE_INSTANCE 4316 REQ_BGL = False 4317
4318 - def ExpandNames(self):
4319 self.share_locks = ShareAll() 4320 4321 self.needed_locks = { 4322 locking.LEVEL_NODEGROUP: [], 4323 locking.LEVEL_NODE: [], 4324 locking.LEVEL_NODE_ALLOC: locking.ALL_SET, 4325 } 4326 4327 self._ExpandAndLockInstance() 4328 4329 if self.op.target_groups: 4330 self.req_target_uuids = map(self.cfg.LookupNodeGroup, 4331 self.op.target_groups) 4332 else: 4333 self.req_target_uuids = None 4334 4335 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
4336
4337 - def DeclareLocks(self, level):
4338 if level == locking.LEVEL_NODEGROUP: 4339 assert not self.needed_locks[locking.LEVEL_NODEGROUP] 4340 4341 if self.req_target_uuids: 4342 lock_groups = set(self.req_target_uuids) 4343 4344 # Lock all groups used by instance optimistically; this requires going 4345 # via the node before it's locked, requiring verification later on 4346 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid) 4347 lock_groups.update(instance_groups) 4348 else: 4349 # No target groups, need to lock all of them 4350 lock_groups = locking.ALL_SET 4351 4352 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups 4353 4354 elif level == locking.LEVEL_NODE: 4355 if self.req_target_uuids: 4356 # Lock all nodes used by instances 4357 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND 4358 self._LockInstancesNodes() 4359 4360 # Lock all nodes in all potential target groups 4361 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) | 4362 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)) 4363 member_nodes = [node_uuid 4364 for group in lock_groups 4365 for node_uuid in self.cfg.GetNodeGroup(group).members] 4366 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes) 4367 else: 4368 # Lock all nodes as all groups are potential targets 4369 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4370
4371 - def CheckPrereq(self):
4372 owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE)) 4373 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) 4374 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE)) 4375 4376 assert (self.req_target_uuids is None or 4377 owned_groups.issuperset(self.req_target_uuids)) 4378 assert owned_instance_names == set([self.op.instance_name]) 4379 4380 # Get instance information 4381 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 4382 4383 # Check if node groups for locked instance are still correct 4384 instance_all_nodes = self.cfg.GetInstanceNodes(self.instance.uuid) 4385 assert owned_nodes.issuperset(instance_all_nodes), \ 4386 ("Instance %s's nodes changed while we kept the lock" % 4387 self.op.instance_name) 4388 4389 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid, 4390 owned_groups) 4391 4392 if self.req_target_uuids: 4393 # User requested specific target groups 4394 self.target_uuids = frozenset(self.req_target_uuids) 4395 else: 4396 # All groups except those used by the instance are potential targets 4397 self.target_uuids = owned_groups - inst_groups 4398 4399 conflicting_groups = self.target_uuids & inst_groups 4400 if conflicting_groups: 4401 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are" 4402 " used by the instance '%s'" % 4403 (utils.CommaJoin(conflicting_groups), 4404 self.op.instance_name), 4405 errors.ECODE_INVAL) 4406 4407 if not self.target_uuids: 4408 raise errors.OpPrereqError("There are no possible target groups", 4409 errors.ECODE_INVAL)
4410
4411 - def BuildHooksEnv(self):
4412 """Build hooks env. 4413 4414 """ 4415 assert self.target_uuids 4416 4417 env = { 4418 "TARGET_GROUPS": " ".join(self.target_uuids), 4419 } 4420 4421 env.update(BuildInstanceHookEnvByObject(self, self.instance)) 4422 4423 return env
4424
4425 - def BuildHooksNodes(self):
4426 """Build hooks nodes. 4427 4428 """ 4429 mn = self.cfg.GetMasterNode() 4430 return ([mn], [mn])
4431
4432 - def Exec(self, feedback_fn):
4433 instances = list(self.owned_locks(locking.LEVEL_INSTANCE)) 4434 4435 assert instances == [self.op.instance_name], "Instance not locked" 4436 4437 req = iallocator.IAReqGroupChange(instances=instances, 4438 target_groups=list(self.target_uuids)) 4439 ial = iallocator.IAllocator(self.cfg, self.rpc, req) 4440 4441 ial.Run(self.op.iallocator) 4442 4443 if not ial.success: 4444 raise errors.OpPrereqError("Can't compute solution for changing group of" 4445 " instance '%s' using iallocator '%s': %s" % 4446 (self.op.instance_name, self.op.iallocator, 4447 ial.info), errors.ECODE_NORES) 4448 4449 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False) 4450 4451 self.LogInfo("Iallocator returned %s job(s) for changing group of" 4452 " instance '%s'", len(jobs), self.op.instance_name) 4453 4454 return ResultWithJobs(jobs)
4455