Package ganeti :: Package cmdlib :: Module instance
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cmdlib.instance

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc. 
   5  # 
   6  # This program is free software; you can redistribute it and/or modify 
   7  # it under the terms of the GNU General Public License as published by 
   8  # the Free Software Foundation; either version 2 of the License, or 
   9  # (at your option) any later version. 
  10  # 
  11  # This program is distributed in the hope that it will be useful, but 
  12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
  13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
  14  # General Public License for more details. 
  15  # 
  16  # You should have received a copy of the GNU General Public License 
  17  # along with this program; if not, write to the Free Software 
  18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
  19  # 02110-1301, USA. 
  20   
  21   
  22  """Logical units dealing with instances.""" 
  23   
  24  import OpenSSL 
  25  import copy 
  26  import logging 
  27  import os 
  28   
  29  from ganeti import compat 
  30  from ganeti import constants 
  31  from ganeti import errors 
  32  from ganeti import ht 
  33  from ganeti import hypervisor 
  34  from ganeti import locking 
  35  from ganeti.masterd import iallocator 
  36  from ganeti import masterd 
  37  from ganeti import netutils 
  38  from ganeti import objects 
  39  from ganeti import opcodes 
  40  from ganeti import pathutils 
  41  from ganeti import rpc 
  42  from ganeti import utils 
  43   
  44  from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs 
  45   
  46  from ganeti.cmdlib.common import INSTANCE_DOWN, \ 
  47    INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \ 
  48    ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \ 
  49    LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \ 
  50    IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \ 
  51    AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \ 
  52    ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \ 
  53    CheckDiskTemplateEnabled 
  54  from ganeti.cmdlib.instance_storage import CreateDisks, \ 
  55    CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \ 
  56    IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \ 
  57    CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \ 
  58    StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \ 
  59    CheckSpindlesExclusiveStorage 
  60  from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \ 
  61    GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \ 
  62    NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \ 
  63    ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \ 
  64    GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \ 
  65    CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS 
  66   
  67  import ganeti.masterd.instance 
  68   
  69   
  70  #: Type description for changes as returned by L{_ApplyContainerMods}'s 
  71  #: callbacks 
  72  _TApplyContModsCbChanges = \ 
  73    ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([ 
  74      ht.TNonEmptyString, 
  75      ht.TAny, 
  76      ]))) 
77 78 79 -def _CheckHostnameSane(lu, name):
80 """Ensures that a given hostname resolves to a 'sane' name. 81 82 The given name is required to be a prefix of the resolved hostname, 83 to prevent accidental mismatches. 84 85 @param lu: the logical unit on behalf of which we're checking 86 @param name: the name we should resolve and check 87 @return: the resolved hostname object 88 89 """ 90 hostname = netutils.GetHostname(name=name) 91 if hostname.name != name: 92 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name) 93 if not utils.MatchNameComponent(name, [hostname.name]): 94 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the" 95 " same as given hostname '%s'") % 96 (hostname.name, name), errors.ECODE_INVAL) 97 return hostname
98
99 100 -def _CheckOpportunisticLocking(op):
101 """Generate error if opportunistic locking is not possible. 102 103 """ 104 if op.opportunistic_locking and not op.iallocator: 105 raise errors.OpPrereqError("Opportunistic locking is only available in" 106 " combination with an instance allocator", 107 errors.ECODE_INVAL)
108
109 110 -def _CreateInstanceAllocRequest(op, disks, nics, beparams, node_name_whitelist):
111 """Wrapper around IAReqInstanceAlloc. 112 113 @param op: The instance opcode 114 @param disks: The computed disks 115 @param nics: The computed nics 116 @param beparams: The full filled beparams 117 @param node_name_whitelist: List of nodes which should appear as online to the 118 allocator (unless the node is already marked offline) 119 120 @returns: A filled L{iallocator.IAReqInstanceAlloc} 121 122 """ 123 spindle_use = beparams[constants.BE_SPINDLE_USE] 124 return iallocator.IAReqInstanceAlloc(name=op.instance_name, 125 disk_template=op.disk_template, 126 tags=op.tags, 127 os=op.os_type, 128 vcpus=beparams[constants.BE_VCPUS], 129 memory=beparams[constants.BE_MAXMEM], 130 spindle_use=spindle_use, 131 disks=disks, 132 nics=[n.ToDict() for n in nics], 133 hypervisor=op.hypervisor, 134 node_whitelist=node_name_whitelist)
135
136 137 -def _ComputeFullBeParams(op, cluster):
138 """Computes the full beparams. 139 140 @param op: The instance opcode 141 @param cluster: The cluster config object 142 143 @return: The fully filled beparams 144 145 """ 146 default_beparams = cluster.beparams[constants.PP_DEFAULT] 147 for param, value in op.beparams.iteritems(): 148 if value == constants.VALUE_AUTO: 149 op.beparams[param] = default_beparams[param] 150 objects.UpgradeBeParams(op.beparams) 151 utils.ForceDictType(op.beparams, constants.BES_PARAMETER_TYPES) 152 return cluster.SimpleFillBE(op.beparams)
153
154 155 -def _ComputeNics(op, cluster, default_ip, cfg, ec_id):
156 """Computes the nics. 157 158 @param op: The instance opcode 159 @param cluster: Cluster configuration object 160 @param default_ip: The default ip to assign 161 @param cfg: An instance of the configuration object 162 @param ec_id: Execution context ID 163 164 @returns: The build up nics 165 166 """ 167 nics = [] 168 for nic in op.nics: 169 nic_mode_req = nic.get(constants.INIC_MODE, None) 170 nic_mode = nic_mode_req 171 if nic_mode is None or nic_mode == constants.VALUE_AUTO: 172 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE] 173 174 net = nic.get(constants.INIC_NETWORK, None) 175 link = nic.get(constants.NIC_LINK, None) 176 ip = nic.get(constants.INIC_IP, None) 177 178 if net is None or net.lower() == constants.VALUE_NONE: 179 net = None 180 else: 181 if nic_mode_req is not None or link is not None: 182 raise errors.OpPrereqError("If network is given, no mode or link" 183 " is allowed to be passed", 184 errors.ECODE_INVAL) 185 186 # ip validity checks 187 if ip is None or ip.lower() == constants.VALUE_NONE: 188 nic_ip = None 189 elif ip.lower() == constants.VALUE_AUTO: 190 if not op.name_check: 191 raise errors.OpPrereqError("IP address set to auto but name checks" 192 " have been skipped", 193 errors.ECODE_INVAL) 194 nic_ip = default_ip 195 else: 196 # We defer pool operations until later, so that the iallocator has 197 # filled in the instance's node(s) dimara 198 if ip.lower() == constants.NIC_IP_POOL: 199 if net is None: 200 raise errors.OpPrereqError("if ip=pool, parameter network" 201 " must be passed too", 202 errors.ECODE_INVAL) 203 204 elif not netutils.IPAddress.IsValid(ip): 205 raise errors.OpPrereqError("Invalid IP address '%s'" % ip, 206 errors.ECODE_INVAL) 207 208 nic_ip = ip 209 210 # TODO: check the ip address for uniqueness 211 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip: 212 raise errors.OpPrereqError("Routed nic mode requires an ip address", 213 errors.ECODE_INVAL) 214 215 # MAC address verification 216 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO) 217 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 218 mac = utils.NormalizeAndValidateMac(mac) 219 220 try: 221 # TODO: We need to factor this out 222 cfg.ReserveMAC(mac, ec_id) 223 except errors.ReservationError: 224 raise errors.OpPrereqError("MAC address %s already in use" 225 " in cluster" % mac, 226 errors.ECODE_NOTUNIQUE) 227 228 # Build nic parameters 229 nicparams = {} 230 if nic_mode_req: 231 nicparams[constants.NIC_MODE] = nic_mode 232 if link: 233 nicparams[constants.NIC_LINK] = link 234 235 check_params = cluster.SimpleFillNIC(nicparams) 236 objects.NIC.CheckParameterSyntax(check_params) 237 net_uuid = cfg.LookupNetwork(net) 238 name = nic.get(constants.INIC_NAME, None) 239 if name is not None and name.lower() == constants.VALUE_NONE: 240 name = None 241 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name, 242 network=net_uuid, nicparams=nicparams) 243 nic_obj.uuid = cfg.GenerateUniqueID(ec_id) 244 nics.append(nic_obj) 245 246 return nics
247
248 249 -def _CheckForConflictingIp(lu, ip, node_uuid):
250 """In case of conflicting IP address raise error. 251 252 @type ip: string 253 @param ip: IP address 254 @type node_uuid: string 255 @param node_uuid: node UUID 256 257 """ 258 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid) 259 if conf_net is not None: 260 raise errors.OpPrereqError(("The requested IP address (%s) belongs to" 261 " network %s, but the target NIC does not." % 262 (ip, conf_net)), 263 errors.ECODE_STATE) 264 265 return (None, None)
266
267 268 -def _ComputeIPolicyInstanceSpecViolation( 269 ipolicy, instance_spec, disk_template, 270 _compute_fn=ComputeIPolicySpecViolation):
271 """Compute if instance specs meets the specs of ipolicy. 272 273 @type ipolicy: dict 274 @param ipolicy: The ipolicy to verify against 275 @param instance_spec: dict 276 @param instance_spec: The instance spec to verify 277 @type disk_template: string 278 @param disk_template: the disk template of the instance 279 @param _compute_fn: The function to verify ipolicy (unittest only) 280 @see: L{ComputeIPolicySpecViolation} 281 282 """ 283 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None) 284 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None) 285 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0) 286 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, []) 287 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0) 288 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None) 289 290 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count, 291 disk_sizes, spindle_use, disk_template)
292
293 294 -def _CheckOSVariant(os_obj, name):
295 """Check whether an OS name conforms to the os variants specification. 296 297 @type os_obj: L{objects.OS} 298 @param os_obj: OS object to check 299 @type name: string 300 @param name: OS name passed by the user, to check for validity 301 302 """ 303 variant = objects.OS.GetVariant(name) 304 if not os_obj.supported_variants: 305 if variant: 306 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'" 307 " passed)" % (os_obj.name, variant), 308 errors.ECODE_INVAL) 309 return 310 if not variant: 311 raise errors.OpPrereqError("OS name must include a variant", 312 errors.ECODE_INVAL) 313 314 if variant not in os_obj.supported_variants: 315 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
316
317 318 -class LUInstanceCreate(LogicalUnit):
319 """Create an instance. 320 321 """ 322 HPATH = "instance-add" 323 HTYPE = constants.HTYPE_INSTANCE 324 REQ_BGL = False 325
326 - def _CheckDiskTemplateValid(self):
327 """Checks validity of disk template. 328 329 """ 330 cluster = self.cfg.GetClusterInfo() 331 if self.op.disk_template is None: 332 # FIXME: It would be better to take the default disk template from the 333 # ipolicy, but for the ipolicy we need the primary node, which we get from 334 # the iallocator, which wants the disk template as input. To solve this 335 # chicken-and-egg problem, it should be possible to specify just a node 336 # group from the iallocator and take the ipolicy from that. 337 self.op.disk_template = cluster.enabled_disk_templates[0] 338 CheckDiskTemplateEnabled(cluster, self.op.disk_template)
339
340 - def _CheckDiskArguments(self):
341 """Checks validity of disk-related arguments. 342 343 """ 344 # check that disk's names are unique and valid 345 utils.ValidateDeviceNames("disk", self.op.disks) 346 347 self._CheckDiskTemplateValid() 348 349 # check disks. parameter names and consistent adopt/no-adopt strategy 350 has_adopt = has_no_adopt = False 351 for disk in self.op.disks: 352 if self.op.disk_template != constants.DT_EXT: 353 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES) 354 if constants.IDISK_ADOPT in disk: 355 has_adopt = True 356 else: 357 has_no_adopt = True 358 if has_adopt and has_no_adopt: 359 raise errors.OpPrereqError("Either all disks are adopted or none is", 360 errors.ECODE_INVAL) 361 if has_adopt: 362 if self.op.disk_template not in constants.DTS_MAY_ADOPT: 363 raise errors.OpPrereqError("Disk adoption is not supported for the" 364 " '%s' disk template" % 365 self.op.disk_template, 366 errors.ECODE_INVAL) 367 if self.op.iallocator is not None: 368 raise errors.OpPrereqError("Disk adoption not allowed with an" 369 " iallocator script", errors.ECODE_INVAL) 370 if self.op.mode == constants.INSTANCE_IMPORT: 371 raise errors.OpPrereqError("Disk adoption not allowed for" 372 " instance import", errors.ECODE_INVAL) 373 else: 374 if self.op.disk_template in constants.DTS_MUST_ADOPT: 375 raise errors.OpPrereqError("Disk template %s requires disk adoption," 376 " but no 'adopt' parameter given" % 377 self.op.disk_template, 378 errors.ECODE_INVAL) 379 380 self.adopt_disks = has_adopt
381
382 - def CheckArguments(self):
383 """Check arguments. 384 385 """ 386 # do not require name_check to ease forward/backward compatibility 387 # for tools 388 if self.op.no_install and self.op.start: 389 self.LogInfo("No-installation mode selected, disabling startup") 390 self.op.start = False 391 # validate/normalize the instance name 392 self.op.instance_name = \ 393 netutils.Hostname.GetNormalizedName(self.op.instance_name) 394 395 if self.op.ip_check and not self.op.name_check: 396 # TODO: make the ip check more flexible and not depend on the name check 397 raise errors.OpPrereqError("Cannot do IP address check without a name" 398 " check", errors.ECODE_INVAL) 399 400 # check nics' parameter names 401 for nic in self.op.nics: 402 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES) 403 # check that NIC's parameters names are unique and valid 404 utils.ValidateDeviceNames("NIC", self.op.nics) 405 406 self._CheckDiskArguments() 407 408 # instance name verification 409 if self.op.name_check: 410 self.hostname = _CheckHostnameSane(self, self.op.instance_name) 411 self.op.instance_name = self.hostname.name 412 # used in CheckPrereq for ip ping check 413 self.check_ip = self.hostname.ip 414 else: 415 self.check_ip = None 416 417 # file storage checks 418 if (self.op.file_driver and 419 not self.op.file_driver in constants.FILE_DRIVER): 420 raise errors.OpPrereqError("Invalid file driver name '%s'" % 421 self.op.file_driver, errors.ECODE_INVAL) 422 423 # set default file_driver if unset and required 424 if (not self.op.file_driver and 425 self.op.disk_template in [constants.DT_FILE, 426 constants.DT_SHARED_FILE]): 427 self.op.file_driver = constants.FD_DEFAULT 428 429 ### Node/iallocator related checks 430 CheckIAllocatorOrNode(self, "iallocator", "pnode") 431 432 if self.op.pnode is not None: 433 if self.op.disk_template in constants.DTS_INT_MIRROR: 434 if self.op.snode is None: 435 raise errors.OpPrereqError("The networked disk templates need" 436 " a mirror node", errors.ECODE_INVAL) 437 elif self.op.snode: 438 self.LogWarning("Secondary node will be ignored on non-mirrored disk" 439 " template") 440 self.op.snode = None 441 442 _CheckOpportunisticLocking(self.op) 443 444 self._cds = GetClusterDomainSecret() 445 446 if self.op.mode == constants.INSTANCE_IMPORT: 447 # On import force_variant must be True, because if we forced it at 448 # initial install, our only chance when importing it back is that it 449 # works again! 450 self.op.force_variant = True 451 452 if self.op.no_install: 453 self.LogInfo("No-installation mode has no effect during import") 454 455 elif self.op.mode == constants.INSTANCE_CREATE: 456 if self.op.os_type is None: 457 raise errors.OpPrereqError("No guest OS specified", 458 errors.ECODE_INVAL) 459 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os: 460 raise errors.OpPrereqError("Guest OS '%s' is not allowed for" 461 " installation" % self.op.os_type, 462 errors.ECODE_STATE) 463 if self.op.disk_template is None: 464 raise errors.OpPrereqError("No disk template specified", 465 errors.ECODE_INVAL) 466 467 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT: 468 # Check handshake to ensure both clusters have the same domain secret 469 src_handshake = self.op.source_handshake 470 if not src_handshake: 471 raise errors.OpPrereqError("Missing source handshake", 472 errors.ECODE_INVAL) 473 474 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds, 475 src_handshake) 476 if errmsg: 477 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg, 478 errors.ECODE_INVAL) 479 480 # Load and check source CA 481 self.source_x509_ca_pem = self.op.source_x509_ca 482 if not self.source_x509_ca_pem: 483 raise errors.OpPrereqError("Missing source X509 CA", 484 errors.ECODE_INVAL) 485 486 try: 487 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem, 488 self._cds) 489 except OpenSSL.crypto.Error, err: 490 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" % 491 (err, ), errors.ECODE_INVAL) 492 493 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None) 494 if errcode is not None: 495 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ), 496 errors.ECODE_INVAL) 497 498 self.source_x509_ca = cert 499 500 src_instance_name = self.op.source_instance_name 501 if not src_instance_name: 502 raise errors.OpPrereqError("Missing source instance name", 503 errors.ECODE_INVAL) 504 505 self.source_instance_name = \ 506 netutils.GetHostname(name=src_instance_name).name 507 508 else: 509 raise errors.OpPrereqError("Invalid instance creation mode %r" % 510 self.op.mode, errors.ECODE_INVAL)
511
512 - def ExpandNames(self):
513 """ExpandNames for CreateInstance. 514 515 Figure out the right locks for instance creation. 516 517 """ 518 self.needed_locks = {} 519 520 # this is just a preventive check, but someone might still add this 521 # instance in the meantime, and creation will fail at lock-add time 522 if self.op.instance_name in\ 523 [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]: 524 raise errors.OpPrereqError("Instance '%s' is already in the cluster" % 525 self.op.instance_name, errors.ECODE_EXISTS) 526 527 self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name 528 529 if self.op.iallocator: 530 # TODO: Find a solution to not lock all nodes in the cluster, e.g. by 531 # specifying a group on instance creation and then selecting nodes from 532 # that group 533 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET 534 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET 535 536 if self.op.opportunistic_locking: 537 self.opportunistic_locks[locking.LEVEL_NODE] = True 538 else: 539 (self.op.pnode_uuid, self.op.pnode) = \ 540 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode) 541 nodelist = [self.op.pnode_uuid] 542 if self.op.snode is not None: 543 (self.op.snode_uuid, self.op.snode) = \ 544 ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode) 545 nodelist.append(self.op.snode_uuid) 546 self.needed_locks[locking.LEVEL_NODE] = nodelist 547 548 # in case of import lock the source node too 549 if self.op.mode == constants.INSTANCE_IMPORT: 550 src_node = self.op.src_node 551 src_path = self.op.src_path 552 553 if src_path is None: 554 self.op.src_path = src_path = self.op.instance_name 555 556 if src_node is None: 557 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET 558 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET 559 self.op.src_node = None 560 if os.path.isabs(src_path): 561 raise errors.OpPrereqError("Importing an instance from a path" 562 " requires a source node option", 563 errors.ECODE_INVAL) 564 else: 565 (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \ 566 ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node) 567 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET: 568 self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid) 569 if not os.path.isabs(src_path): 570 self.op.src_path = \ 571 utils.PathJoin(pathutils.EXPORT_DIR, src_path) 572 573 self.needed_locks[locking.LEVEL_NODE_RES] = \ 574 CopyLockList(self.needed_locks[locking.LEVEL_NODE]) 575 576 # Optimistically acquire shared group locks (we're reading the 577 # configuration). We can't just call GetInstanceNodeGroups, because the 578 # instance doesn't exist yet. Therefore we lock all node groups of all 579 # nodes we have. 580 if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET: 581 # In the case we lock all nodes for opportunistic allocation, we have no 582 # choice than to lock all groups, because they're allocated before nodes. 583 # This is sad, but true. At least we release all those we don't need in 584 # CheckPrereq later. 585 self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET 586 else: 587 self.needed_locks[locking.LEVEL_NODEGROUP] = \ 588 list(self.cfg.GetNodeGroupsFromNodes( 589 self.needed_locks[locking.LEVEL_NODE])) 590 self.share_locks[locking.LEVEL_NODEGROUP] = 1
591
592 - def DeclareLocks(self, level):
593 if level == locking.LEVEL_NODE_RES and \ 594 self.opportunistic_locks[locking.LEVEL_NODE]: 595 # Even when using opportunistic locking, we require the same set of 596 # NODE_RES locks as we got NODE locks 597 self.needed_locks[locking.LEVEL_NODE_RES] = \ 598 self.owned_locks(locking.LEVEL_NODE)
599
600 - def _RunAllocator(self):
601 """Run the allocator based on input opcode. 602 603 """ 604 if self.op.opportunistic_locking: 605 # Only consider nodes for which a lock is held 606 node_name_whitelist = self.cfg.GetNodeNames( 607 self.owned_locks(locking.LEVEL_NODE)) 608 else: 609 node_name_whitelist = None 610 611 #TODO Export network to iallocator so that it chooses a pnode 612 # in a nodegroup that has the desired network connected to 613 req = _CreateInstanceAllocRequest(self.op, self.disks, 614 self.nics, self.be_full, 615 node_name_whitelist) 616 ial = iallocator.IAllocator(self.cfg, self.rpc, req) 617 618 ial.Run(self.op.iallocator) 619 620 if not ial.success: 621 # When opportunistic locks are used only a temporary failure is generated 622 if self.op.opportunistic_locking: 623 ecode = errors.ECODE_TEMP_NORES 624 else: 625 ecode = errors.ECODE_NORES 626 627 raise errors.OpPrereqError("Can't compute nodes using" 628 " iallocator '%s': %s" % 629 (self.op.iallocator, ial.info), 630 ecode) 631 632 (self.op.pnode_uuid, self.op.pnode) = \ 633 ExpandNodeUuidAndName(self.cfg, None, ial.result[0]) 634 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s", 635 self.op.instance_name, self.op.iallocator, 636 utils.CommaJoin(ial.result)) 637 638 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator" 639 640 if req.RequiredNodes() == 2: 641 (self.op.snode_uuid, self.op.snode) = \ 642 ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
643
644 - def BuildHooksEnv(self):
645 """Build hooks env. 646 647 This runs on master, primary and secondary nodes of the instance. 648 649 """ 650 env = { 651 "ADD_MODE": self.op.mode, 652 } 653 if self.op.mode == constants.INSTANCE_IMPORT: 654 env["SRC_NODE"] = self.op.src_node 655 env["SRC_PATH"] = self.op.src_path 656 env["SRC_IMAGES"] = self.src_images 657 658 env.update(BuildInstanceHookEnv( 659 name=self.op.instance_name, 660 primary_node_name=self.op.pnode, 661 secondary_node_names=self.cfg.GetNodeNames(self.secondaries), 662 status=self.op.start, 663 os_type=self.op.os_type, 664 minmem=self.be_full[constants.BE_MINMEM], 665 maxmem=self.be_full[constants.BE_MAXMEM], 666 vcpus=self.be_full[constants.BE_VCPUS], 667 nics=NICListToTuple(self, self.nics), 668 disk_template=self.op.disk_template, 669 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""), 670 d[constants.IDISK_SIZE], d[constants.IDISK_MODE]) 671 for d in self.disks], 672 bep=self.be_full, 673 hvp=self.hv_full, 674 hypervisor_name=self.op.hypervisor, 675 tags=self.op.tags, 676 )) 677 678 return env
679
680 - def BuildHooksNodes(self):
681 """Build hooks nodes. 682 683 """ 684 nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries 685 return nl, nl
686
687 - def _ReadExportInfo(self):
688 """Reads the export information from disk. 689 690 It will override the opcode source node and path with the actual 691 information, if these two were not specified before. 692 693 @return: the export information 694 695 """ 696 assert self.op.mode == constants.INSTANCE_IMPORT 697 698 if self.op.src_node_uuid is None: 699 locked_nodes = self.owned_locks(locking.LEVEL_NODE) 700 exp_list = self.rpc.call_export_list(locked_nodes) 701 found = False 702 for node_uuid in exp_list: 703 if exp_list[node_uuid].fail_msg: 704 continue 705 if self.op.src_path in exp_list[node_uuid].payload: 706 found = True 707 self.op.src_node = self.cfg.GetNodeInfo(node_uuid).name 708 self.op.src_node_uuid = node_uuid 709 self.op.src_path = utils.PathJoin(pathutils.EXPORT_DIR, 710 self.op.src_path) 711 break 712 if not found: 713 raise errors.OpPrereqError("No export found for relative path %s" % 714 self.op.src_path, errors.ECODE_INVAL) 715 716 CheckNodeOnline(self, self.op.src_node_uuid) 717 result = self.rpc.call_export_info(self.op.src_node_uuid, self.op.src_path) 718 result.Raise("No export or invalid export found in dir %s" % 719 self.op.src_path) 720 721 export_info = objects.SerializableConfigParser.Loads(str(result.payload)) 722 if not export_info.has_section(constants.INISECT_EXP): 723 raise errors.ProgrammerError("Corrupted export config", 724 errors.ECODE_ENVIRON) 725 726 ei_version = export_info.get(constants.INISECT_EXP, "version") 727 if int(ei_version) != constants.EXPORT_VERSION: 728 raise errors.OpPrereqError("Wrong export version %s (wanted %d)" % 729 (ei_version, constants.EXPORT_VERSION), 730 errors.ECODE_ENVIRON) 731 return export_info
732
733 - def _ReadExportParams(self, einfo):
734 """Use export parameters as defaults. 735 736 In case the opcode doesn't specify (as in override) some instance 737 parameters, then try to use them from the export information, if 738 that declares them. 739 740 """ 741 self.op.os_type = einfo.get(constants.INISECT_EXP, "os") 742 743 if not self.op.disks: 744 disks = [] 745 # TODO: import the disk iv_name too 746 for idx in range(constants.MAX_DISKS): 747 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx): 748 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx) 749 disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx) 750 disk = { 751 constants.IDISK_SIZE: disk_sz, 752 constants.IDISK_NAME: disk_name 753 } 754 disks.append(disk) 755 self.op.disks = disks 756 if not disks and self.op.disk_template != constants.DT_DISKLESS: 757 raise errors.OpPrereqError("No disk info specified and the export" 758 " is missing the disk information", 759 errors.ECODE_INVAL) 760 761 if not self.op.nics: 762 nics = [] 763 for idx in range(constants.MAX_NICS): 764 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx): 765 ndict = {} 766 for name in [constants.INIC_IP, 767 constants.INIC_MAC, constants.INIC_NAME]: 768 nic_param_name = "nic%d_%s" % (idx, name) 769 if einfo.has_option(constants.INISECT_INS, nic_param_name): 770 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name)) 771 ndict[name] = v 772 network = einfo.get(constants.INISECT_INS, 773 "nic%d_%s" % (idx, constants.INIC_NETWORK)) 774 # in case network is given link and mode are inherited 775 # from nodegroup's netparams and thus should not be passed here 776 if network: 777 ndict[constants.INIC_NETWORK] = network 778 else: 779 for name in list(constants.NICS_PARAMETERS): 780 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name)) 781 ndict[name] = v 782 nics.append(ndict) 783 else: 784 break 785 self.op.nics = nics 786 787 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"): 788 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split() 789 790 if (self.op.hypervisor is None and 791 einfo.has_option(constants.INISECT_INS, "hypervisor")): 792 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor") 793 794 if einfo.has_section(constants.INISECT_HYP): 795 # use the export parameters but do not override the ones 796 # specified by the user 797 for name, value in einfo.items(constants.INISECT_HYP): 798 if name not in self.op.hvparams: 799 self.op.hvparams[name] = value 800 801 if einfo.has_section(constants.INISECT_BEP): 802 # use the parameters, without overriding 803 for name, value in einfo.items(constants.INISECT_BEP): 804 if name not in self.op.beparams: 805 self.op.beparams[name] = value 806 # Compatibility for the old "memory" be param 807 if name == constants.BE_MEMORY: 808 if constants.BE_MAXMEM not in self.op.beparams: 809 self.op.beparams[constants.BE_MAXMEM] = value 810 if constants.BE_MINMEM not in self.op.beparams: 811 self.op.beparams[constants.BE_MINMEM] = value 812 else: 813 # try to read the parameters old style, from the main section 814 for name in constants.BES_PARAMETERS: 815 if (name not in self.op.beparams and 816 einfo.has_option(constants.INISECT_INS, name)): 817 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name) 818 819 if einfo.has_section(constants.INISECT_OSP): 820 # use the parameters, without overriding 821 for name, value in einfo.items(constants.INISECT_OSP): 822 if name not in self.op.osparams: 823 self.op.osparams[name] = value
824
825 - def _RevertToDefaults(self, cluster):
826 """Revert the instance parameters to the default values. 827 828 """ 829 # hvparams 830 hv_defs = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, {}) 831 for name in self.op.hvparams.keys(): 832 if name in hv_defs and hv_defs[name] == self.op.hvparams[name]: 833 del self.op.hvparams[name] 834 # beparams 835 be_defs = cluster.SimpleFillBE({}) 836 for name in self.op.beparams.keys(): 837 if name in be_defs and be_defs[name] == self.op.beparams[name]: 838 del self.op.beparams[name] 839 # nic params 840 nic_defs = cluster.SimpleFillNIC({}) 841 for nic in self.op.nics: 842 for name in constants.NICS_PARAMETERS: 843 if name in nic and name in nic_defs and nic[name] == nic_defs[name]: 844 del nic[name] 845 # osparams 846 os_defs = cluster.SimpleFillOS(self.op.os_type, {}) 847 for name in self.op.osparams.keys(): 848 if name in os_defs and os_defs[name] == self.op.osparams[name]: 849 del self.op.osparams[name]
850
851 - def _CalculateFileStorageDir(self):
852 """Calculate final instance file storage dir. 853 854 """ 855 # file storage dir calculation/check 856 self.instance_file_storage_dir = None 857 if self.op.disk_template in constants.DTS_FILEBASED: 858 # build the full file storage dir path 859 joinargs = [] 860 861 if self.op.disk_template == constants.DT_SHARED_FILE: 862 get_fsd_fn = self.cfg.GetSharedFileStorageDir 863 else: 864 get_fsd_fn = self.cfg.GetFileStorageDir 865 866 cfg_storagedir = get_fsd_fn() 867 if not cfg_storagedir: 868 raise errors.OpPrereqError("Cluster file storage dir not defined", 869 errors.ECODE_STATE) 870 joinargs.append(cfg_storagedir) 871 872 if self.op.file_storage_dir is not None: 873 joinargs.append(self.op.file_storage_dir) 874 875 joinargs.append(self.op.instance_name) 876 877 # pylint: disable=W0142 878 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
879
880 - def CheckPrereq(self): # pylint: disable=R0914
881 """Check prerequisites. 882 883 """ 884 # Check that the optimistically acquired groups are correct wrt the 885 # acquired nodes 886 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) 887 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE)) 888 cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes)) 889 if not owned_groups.issuperset(cur_groups): 890 raise errors.OpPrereqError("New instance %s's node groups changed since" 891 " locks were acquired, current groups are" 892 " are '%s', owning groups '%s'; retry the" 893 " operation" % 894 (self.op.instance_name, 895 utils.CommaJoin(cur_groups), 896 utils.CommaJoin(owned_groups)), 897 errors.ECODE_STATE) 898 899 self._CalculateFileStorageDir() 900 901 if self.op.mode == constants.INSTANCE_IMPORT: 902 export_info = self._ReadExportInfo() 903 self._ReadExportParams(export_info) 904 self._old_instance_name = export_info.get(constants.INISECT_INS, "name") 905 else: 906 self._old_instance_name = None 907 908 if (not self.cfg.GetVGName() and 909 self.op.disk_template not in constants.DTS_NOT_LVM): 910 raise errors.OpPrereqError("Cluster does not support lvm-based" 911 " instances", errors.ECODE_STATE) 912 913 if (self.op.hypervisor is None or 914 self.op.hypervisor == constants.VALUE_AUTO): 915 self.op.hypervisor = self.cfg.GetHypervisorType() 916 917 cluster = self.cfg.GetClusterInfo() 918 enabled_hvs = cluster.enabled_hypervisors 919 if self.op.hypervisor not in enabled_hvs: 920 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the" 921 " cluster (%s)" % 922 (self.op.hypervisor, ",".join(enabled_hvs)), 923 errors.ECODE_STATE) 924 925 # Check tag validity 926 for tag in self.op.tags: 927 objects.TaggableObject.ValidateTag(tag) 928 929 # check hypervisor parameter syntax (locally) 930 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES) 931 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type, 932 self.op.hvparams) 933 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor) 934 hv_type.CheckParameterSyntax(filled_hvp) 935 self.hv_full = filled_hvp 936 # check that we don't specify global parameters on an instance 937 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor", 938 "instance", "cluster") 939 940 # fill and remember the beparams dict 941 self.be_full = _ComputeFullBeParams(self.op, cluster) 942 943 # build os parameters 944 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams) 945 946 # now that hvp/bep are in final format, let's reset to defaults, 947 # if told to do so 948 if self.op.identify_defaults: 949 self._RevertToDefaults(cluster) 950 951 # NIC buildup 952 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg, 953 self.proc.GetECId()) 954 955 # disk checks/pre-build 956 default_vg = self.cfg.GetVGName() 957 self.disks = ComputeDisks(self.op, default_vg) 958 959 if self.op.mode == constants.INSTANCE_IMPORT: 960 disk_images = [] 961 for idx in range(len(self.disks)): 962 option = "disk%d_dump" % idx 963 if export_info.has_option(constants.INISECT_INS, option): 964 # FIXME: are the old os-es, disk sizes, etc. useful? 965 export_name = export_info.get(constants.INISECT_INS, option) 966 image = utils.PathJoin(self.op.src_path, export_name) 967 disk_images.append(image) 968 else: 969 disk_images.append(False) 970 971 self.src_images = disk_images 972 973 if self.op.instance_name == self._old_instance_name: 974 for idx, nic in enumerate(self.nics): 975 if nic.mac == constants.VALUE_AUTO: 976 nic_mac_ini = "nic%d_mac" % idx 977 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini) 978 979 # ENDIF: self.op.mode == constants.INSTANCE_IMPORT 980 981 # ip ping checks (we use the same ip that was resolved in ExpandNames) 982 if self.op.ip_check: 983 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT): 984 raise errors.OpPrereqError("IP %s of instance %s already in use" % 985 (self.check_ip, self.op.instance_name), 986 errors.ECODE_NOTUNIQUE) 987 988 #### mac address generation 989 # By generating here the mac address both the allocator and the hooks get 990 # the real final mac address rather than the 'auto' or 'generate' value. 991 # There is a race condition between the generation and the instance object 992 # creation, which means that we know the mac is valid now, but we're not 993 # sure it will be when we actually add the instance. If things go bad 994 # adding the instance will abort because of a duplicate mac, and the 995 # creation job will fail. 996 for nic in self.nics: 997 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 998 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId()) 999 1000 #### allocator run 1001 1002 if self.op.iallocator is not None: 1003 self._RunAllocator() 1004 1005 # Release all unneeded node locks 1006 keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid, 1007 self.op.src_node_uuid]) 1008 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks) 1009 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks) 1010 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC) 1011 # Release all unneeded group locks 1012 ReleaseLocks(self, locking.LEVEL_NODEGROUP, 1013 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks)) 1014 1015 assert (self.owned_locks(locking.LEVEL_NODE) == 1016 self.owned_locks(locking.LEVEL_NODE_RES)), \ 1017 "Node locks differ from node resource locks" 1018 1019 #### node related checks 1020 1021 # check primary node 1022 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid) 1023 assert self.pnode is not None, \ 1024 "Cannot retrieve locked node %s" % self.op.pnode_uuid 1025 if pnode.offline: 1026 raise errors.OpPrereqError("Cannot use offline primary node '%s'" % 1027 pnode.name, errors.ECODE_STATE) 1028 if pnode.drained: 1029 raise errors.OpPrereqError("Cannot use drained primary node '%s'" % 1030 pnode.name, errors.ECODE_STATE) 1031 if not pnode.vm_capable: 1032 raise errors.OpPrereqError("Cannot use non-vm_capable primary node" 1033 " '%s'" % pnode.name, errors.ECODE_STATE) 1034 1035 self.secondaries = [] 1036 1037 # Fill in any IPs from IP pools. This must happen here, because we need to 1038 # know the nic's primary node, as specified by the iallocator 1039 for idx, nic in enumerate(self.nics): 1040 net_uuid = nic.network 1041 if net_uuid is not None: 1042 nobj = self.cfg.GetNetwork(net_uuid) 1043 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid) 1044 if netparams is None: 1045 raise errors.OpPrereqError("No netparams found for network" 1046 " %s. Propably not connected to" 1047 " node's %s nodegroup" % 1048 (nobj.name, self.pnode.name), 1049 errors.ECODE_INVAL) 1050 self.LogInfo("NIC/%d inherits netparams %s" % 1051 (idx, netparams.values())) 1052 nic.nicparams = dict(netparams) 1053 if nic.ip is not None: 1054 if nic.ip.lower() == constants.NIC_IP_POOL: 1055 try: 1056 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId()) 1057 except errors.ReservationError: 1058 raise errors.OpPrereqError("Unable to get a free IP for NIC %d" 1059 " from the address pool" % idx, 1060 errors.ECODE_STATE) 1061 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name) 1062 else: 1063 try: 1064 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId()) 1065 except errors.ReservationError: 1066 raise errors.OpPrereqError("IP address %s already in use" 1067 " or does not belong to network %s" % 1068 (nic.ip, nobj.name), 1069 errors.ECODE_NOTUNIQUE) 1070 1071 # net is None, ip None or given 1072 elif self.op.conflicts_check: 1073 _CheckForConflictingIp(self, nic.ip, self.pnode.uuid) 1074 1075 # mirror node verification 1076 if self.op.disk_template in constants.DTS_INT_MIRROR: 1077 if self.op.snode_uuid == pnode.uuid: 1078 raise errors.OpPrereqError("The secondary node cannot be the" 1079 " primary node", errors.ECODE_INVAL) 1080 CheckNodeOnline(self, self.op.snode_uuid) 1081 CheckNodeNotDrained(self, self.op.snode_uuid) 1082 CheckNodeVmCapable(self, self.op.snode_uuid) 1083 self.secondaries.append(self.op.snode_uuid) 1084 1085 snode = self.cfg.GetNodeInfo(self.op.snode_uuid) 1086 if pnode.group != snode.group: 1087 self.LogWarning("The primary and secondary nodes are in two" 1088 " different node groups; the disk parameters" 1089 " from the first disk's node group will be" 1090 " used") 1091 1092 nodes = [pnode] 1093 if self.op.disk_template in constants.DTS_INT_MIRROR: 1094 nodes.append(snode) 1095 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n) 1096 excl_stor = compat.any(map(has_es, nodes)) 1097 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE: 1098 raise errors.OpPrereqError("Disk template %s not supported with" 1099 " exclusive storage" % self.op.disk_template, 1100 errors.ECODE_STATE) 1101 for disk in self.disks: 1102 CheckSpindlesExclusiveStorage(disk, excl_stor, True) 1103 1104 node_uuids = [pnode.uuid] + self.secondaries 1105 1106 if not self.adopt_disks: 1107 if self.op.disk_template == constants.DT_RBD: 1108 # _CheckRADOSFreeSpace() is just a placeholder. 1109 # Any function that checks prerequisites can be placed here. 1110 # Check if there is enough space on the RADOS cluster. 1111 CheckRADOSFreeSpace() 1112 elif self.op.disk_template == constants.DT_EXT: 1113 # FIXME: Function that checks prereqs if needed 1114 pass 1115 elif self.op.disk_template in utils.GetLvmDiskTemplates(): 1116 # Check lv size requirements, if not adopting 1117 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks) 1118 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes) 1119 else: 1120 # FIXME: add checks for other, non-adopting, non-lvm disk templates 1121 pass 1122 1123 elif self.op.disk_template == constants.DT_PLAIN: # Check the adoption data 1124 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG], 1125 disk[constants.IDISK_ADOPT]) 1126 for disk in self.disks]) 1127 if len(all_lvs) != len(self.disks): 1128 raise errors.OpPrereqError("Duplicate volume names given for adoption", 1129 errors.ECODE_INVAL) 1130 for lv_name in all_lvs: 1131 try: 1132 # FIXME: lv_name here is "vg/lv" need to ensure that other calls 1133 # to ReserveLV uses the same syntax 1134 self.cfg.ReserveLV(lv_name, self.proc.GetECId()) 1135 except errors.ReservationError: 1136 raise errors.OpPrereqError("LV named %s used by another instance" % 1137 lv_name, errors.ECODE_NOTUNIQUE) 1138 1139 vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid] 1140 vg_names.Raise("Cannot get VG information from node %s" % pnode.name) 1141 1142 node_lvs = self.rpc.call_lv_list([pnode.uuid], 1143 vg_names.payload.keys())[pnode.uuid] 1144 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name) 1145 node_lvs = node_lvs.payload 1146 1147 delta = all_lvs.difference(node_lvs.keys()) 1148 if delta: 1149 raise errors.OpPrereqError("Missing logical volume(s): %s" % 1150 utils.CommaJoin(delta), 1151 errors.ECODE_INVAL) 1152 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]] 1153 if online_lvs: 1154 raise errors.OpPrereqError("Online logical volumes found, cannot" 1155 " adopt: %s" % utils.CommaJoin(online_lvs), 1156 errors.ECODE_STATE) 1157 # update the size of disk based on what is found 1158 for dsk in self.disks: 1159 dsk[constants.IDISK_SIZE] = \ 1160 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG], 1161 dsk[constants.IDISK_ADOPT])][0])) 1162 1163 elif self.op.disk_template == constants.DT_BLOCK: 1164 # Normalize and de-duplicate device paths 1165 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT]) 1166 for disk in self.disks]) 1167 if len(all_disks) != len(self.disks): 1168 raise errors.OpPrereqError("Duplicate disk names given for adoption", 1169 errors.ECODE_INVAL) 1170 baddisks = [d for d in all_disks 1171 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)] 1172 if baddisks: 1173 raise errors.OpPrereqError("Device node(s) %s lie outside %s and" 1174 " cannot be adopted" % 1175 (utils.CommaJoin(baddisks), 1176 constants.ADOPTABLE_BLOCKDEV_ROOT), 1177 errors.ECODE_INVAL) 1178 1179 node_disks = self.rpc.call_bdev_sizes([pnode.uuid], 1180 list(all_disks))[pnode.uuid] 1181 node_disks.Raise("Cannot get block device information from node %s" % 1182 pnode.name) 1183 node_disks = node_disks.payload 1184 delta = all_disks.difference(node_disks.keys()) 1185 if delta: 1186 raise errors.OpPrereqError("Missing block device(s): %s" % 1187 utils.CommaJoin(delta), 1188 errors.ECODE_INVAL) 1189 for dsk in self.disks: 1190 dsk[constants.IDISK_SIZE] = \ 1191 int(float(node_disks[dsk[constants.IDISK_ADOPT]])) 1192 1193 # Verify instance specs 1194 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None) 1195 ispec = { 1196 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None), 1197 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None), 1198 constants.ISPEC_DISK_COUNT: len(self.disks), 1199 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE] 1200 for disk in self.disks], 1201 constants.ISPEC_NIC_COUNT: len(self.nics), 1202 constants.ISPEC_SPINDLE_USE: spindle_use, 1203 } 1204 1205 group_info = self.cfg.GetNodeGroup(pnode.group) 1206 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) 1207 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec, 1208 self.op.disk_template) 1209 if not self.op.ignore_ipolicy and res: 1210 msg = ("Instance allocation to group %s (%s) violates policy: %s" % 1211 (pnode.group, group_info.name, utils.CommaJoin(res))) 1212 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 1213 1214 CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams) 1215 1216 CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant) 1217 # check OS parameters (remotely) 1218 CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full) 1219 1220 CheckNicsBridgesExist(self, self.nics, self.pnode.uuid) 1221 1222 #TODO: _CheckExtParams (remotely) 1223 # Check parameters for extstorage 1224 1225 # memory check on primary node 1226 #TODO(dynmem): use MINMEM for checking 1227 if self.op.start: 1228 hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}), 1229 self.op.hvparams) 1230 CheckNodeFreeMemory(self, self.pnode.uuid, 1231 "creating instance %s" % self.op.instance_name, 1232 self.be_full[constants.BE_MAXMEM], 1233 self.op.hypervisor, hvfull) 1234 1235 self.dry_run_result = list(node_uuids)
1236
1237 - def Exec(self, feedback_fn):
1238 """Create and add the instance to the cluster. 1239 1240 """ 1241 assert not (self.owned_locks(locking.LEVEL_NODE_RES) - 1242 self.owned_locks(locking.LEVEL_NODE)), \ 1243 "Node locks differ from node resource locks" 1244 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC) 1245 1246 ht_kind = self.op.hypervisor 1247 if ht_kind in constants.HTS_REQ_PORT: 1248 network_port = self.cfg.AllocatePort() 1249 else: 1250 network_port = None 1251 1252 instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId()) 1253 1254 # This is ugly but we got a chicken-egg problem here 1255 # We can only take the group disk parameters, as the instance 1256 # has no disks yet (we are generating them right here). 1257 nodegroup = self.cfg.GetNodeGroup(self.pnode.group) 1258 disks = GenerateDiskTemplate(self, 1259 self.op.disk_template, 1260 instance_uuid, self.pnode.uuid, 1261 self.secondaries, 1262 self.disks, 1263 self.instance_file_storage_dir, 1264 self.op.file_driver, 1265 0, 1266 feedback_fn, 1267 self.cfg.GetGroupDiskParams(nodegroup)) 1268 1269 iobj = objects.Instance(name=self.op.instance_name, 1270 uuid=instance_uuid, 1271 os=self.op.os_type, 1272 primary_node=self.pnode.uuid, 1273 nics=self.nics, disks=disks, 1274 disk_template=self.op.disk_template, 1275 disks_active=False, 1276 admin_state=constants.ADMINST_DOWN, 1277 network_port=network_port, 1278 beparams=self.op.beparams, 1279 hvparams=self.op.hvparams, 1280 hypervisor=self.op.hypervisor, 1281 osparams=self.op.osparams, 1282 ) 1283 1284 if self.op.tags: 1285 for tag in self.op.tags: 1286 iobj.AddTag(tag) 1287 1288 if self.adopt_disks: 1289 if self.op.disk_template == constants.DT_PLAIN: 1290 # rename LVs to the newly-generated names; we need to construct 1291 # 'fake' LV disks with the old data, plus the new unique_id 1292 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks] 1293 rename_to = [] 1294 for t_dsk, a_dsk in zip(tmp_disks, self.disks): 1295 rename_to.append(t_dsk.logical_id) 1296 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT]) 1297 self.cfg.SetDiskID(t_dsk, self.pnode.uuid) 1298 result = self.rpc.call_blockdev_rename(self.pnode.uuid, 1299 zip(tmp_disks, rename_to)) 1300 result.Raise("Failed to rename adoped LVs") 1301 else: 1302 feedback_fn("* creating instance disks...") 1303 try: 1304 CreateDisks(self, iobj) 1305 except errors.OpExecError: 1306 self.LogWarning("Device creation failed") 1307 self.cfg.ReleaseDRBDMinors(self.op.instance_name) 1308 raise 1309 1310 feedback_fn("adding instance %s to cluster config" % self.op.instance_name) 1311 1312 self.cfg.AddInstance(iobj, self.proc.GetECId()) 1313 1314 # Declare that we don't want to remove the instance lock anymore, as we've 1315 # added the instance to the config 1316 del self.remove_locks[locking.LEVEL_INSTANCE] 1317 1318 if self.op.mode == constants.INSTANCE_IMPORT: 1319 # Release unused nodes 1320 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid]) 1321 else: 1322 # Release all nodes 1323 ReleaseLocks(self, locking.LEVEL_NODE) 1324 1325 disk_abort = False 1326 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks: 1327 feedback_fn("* wiping instance disks...") 1328 try: 1329 WipeDisks(self, iobj) 1330 except errors.OpExecError, err: 1331 logging.exception("Wiping disks failed") 1332 self.LogWarning("Wiping instance disks failed (%s)", err) 1333 disk_abort = True 1334 1335 if disk_abort: 1336 # Something is already wrong with the disks, don't do anything else 1337 pass 1338 elif self.op.wait_for_sync: 1339 disk_abort = not WaitForSync(self, iobj) 1340 elif iobj.disk_template in constants.DTS_INT_MIRROR: 1341 # make sure the disks are not degraded (still sync-ing is ok) 1342 feedback_fn("* checking mirrors status") 1343 disk_abort = not WaitForSync(self, iobj, oneshot=True) 1344 else: 1345 disk_abort = False 1346 1347 if disk_abort: 1348 RemoveDisks(self, iobj) 1349 self.cfg.RemoveInstance(iobj.uuid) 1350 # Make sure the instance lock gets removed 1351 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name 1352 raise errors.OpExecError("There are some degraded disks for" 1353 " this instance") 1354 1355 # instance disks are now active 1356 iobj.disks_active = True 1357 1358 # Release all node resource locks 1359 ReleaseLocks(self, locking.LEVEL_NODE_RES) 1360 1361 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks: 1362 # we need to set the disks ID to the primary node, since the 1363 # preceding code might or might have not done it, depending on 1364 # disk template and other options 1365 for disk in iobj.disks: 1366 self.cfg.SetDiskID(disk, self.pnode.uuid) 1367 if self.op.mode == constants.INSTANCE_CREATE: 1368 if not self.op.no_install: 1369 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and 1370 not self.op.wait_for_sync) 1371 if pause_sync: 1372 feedback_fn("* pausing disk sync to install instance OS") 1373 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid, 1374 (iobj.disks, 1375 iobj), True) 1376 for idx, success in enumerate(result.payload): 1377 if not success: 1378 logging.warn("pause-sync of instance %s for disk %d failed", 1379 self.op.instance_name, idx) 1380 1381 feedback_fn("* running the instance OS create scripts...") 1382 # FIXME: pass debug option from opcode to backend 1383 os_add_result = \ 1384 self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False, 1385 self.op.debug_level) 1386 if pause_sync: 1387 feedback_fn("* resuming disk sync") 1388 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid, 1389 (iobj.disks, 1390 iobj), False) 1391 for idx, success in enumerate(result.payload): 1392 if not success: 1393 logging.warn("resume-sync of instance %s for disk %d failed", 1394 self.op.instance_name, idx) 1395 1396 os_add_result.Raise("Could not add os for instance %s" 1397 " on node %s" % (self.op.instance_name, 1398 self.pnode.name)) 1399 1400 else: 1401 if self.op.mode == constants.INSTANCE_IMPORT: 1402 feedback_fn("* running the instance OS import scripts...") 1403 1404 transfers = [] 1405 1406 for idx, image in enumerate(self.src_images): 1407 if not image: 1408 continue 1409 1410 # FIXME: pass debug option from opcode to backend 1411 dt = masterd.instance.DiskTransfer("disk/%s" % idx, 1412 constants.IEIO_FILE, (image, ), 1413 constants.IEIO_SCRIPT, 1414 (iobj.disks[idx], idx), 1415 None) 1416 transfers.append(dt) 1417 1418 import_result = \ 1419 masterd.instance.TransferInstanceData(self, feedback_fn, 1420 self.op.src_node_uuid, 1421 self.pnode.uuid, 1422 self.pnode.secondary_ip, 1423 iobj, transfers) 1424 if not compat.all(import_result): 1425 self.LogWarning("Some disks for instance %s on node %s were not" 1426 " imported successfully" % (self.op.instance_name, 1427 self.pnode.name)) 1428 1429 rename_from = self._old_instance_name 1430 1431 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT: 1432 feedback_fn("* preparing remote import...") 1433 # The source cluster will stop the instance before attempting to make 1434 # a connection. In some cases stopping an instance can take a long 1435 # time, hence the shutdown timeout is added to the connection 1436 # timeout. 1437 connect_timeout = (constants.RIE_CONNECT_TIMEOUT + 1438 self.op.source_shutdown_timeout) 1439 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout) 1440 1441 assert iobj.primary_node == self.pnode.uuid 1442 disk_results = \ 1443 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode, 1444 self.source_x509_ca, 1445 self._cds, timeouts) 1446 if not compat.all(disk_results): 1447 # TODO: Should the instance still be started, even if some disks 1448 # failed to import (valid for local imports, too)? 1449 self.LogWarning("Some disks for instance %s on node %s were not" 1450 " imported successfully" % (self.op.instance_name, 1451 self.pnode.name)) 1452 1453 rename_from = self.source_instance_name 1454 1455 else: 1456 # also checked in the prereq part 1457 raise errors.ProgrammerError("Unknown OS initialization mode '%s'" 1458 % self.op.mode) 1459 1460 # Run rename script on newly imported instance 1461 assert iobj.name == self.op.instance_name 1462 feedback_fn("Running rename script for %s" % self.op.instance_name) 1463 result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj, 1464 rename_from, 1465 self.op.debug_level) 1466 result.Warn("Failed to run rename script for %s on node %s" % 1467 (self.op.instance_name, self.pnode.name), self.LogWarning) 1468 1469 assert not self.owned_locks(locking.LEVEL_NODE_RES) 1470 1471 if self.op.start: 1472 iobj.admin_state = constants.ADMINST_UP 1473 self.cfg.Update(iobj, feedback_fn) 1474 logging.info("Starting instance %s on node %s", self.op.instance_name, 1475 self.pnode.name) 1476 feedback_fn("* starting instance...") 1477 result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None), 1478 False, self.op.reason) 1479 result.Raise("Could not start instance") 1480 1481 return self.cfg.GetNodeNames(list(iobj.all_nodes))
1482
1483 1484 -class LUInstanceRename(LogicalUnit):
1485 """Rename an instance. 1486 1487 """ 1488 HPATH = "instance-rename" 1489 HTYPE = constants.HTYPE_INSTANCE 1490
1491 - def CheckArguments(self):
1492 """Check arguments. 1493 1494 """ 1495 if self.op.ip_check and not self.op.name_check: 1496 # TODO: make the ip check more flexible and not depend on the name check 1497 raise errors.OpPrereqError("IP address check requires a name check", 1498 errors.ECODE_INVAL)
1499
1500 - def BuildHooksEnv(self):
1501 """Build hooks env. 1502 1503 This runs on master, primary and secondary nodes of the instance. 1504 1505 """ 1506 env = BuildInstanceHookEnvByObject(self, self.instance) 1507 env["INSTANCE_NEW_NAME"] = self.op.new_name 1508 return env
1509
1510 - def BuildHooksNodes(self):
1511 """Build hooks nodes. 1512 1513 """ 1514 nl = [self.cfg.GetMasterNode()] + list(self.instance.all_nodes) 1515 return (nl, nl)
1516
1517 - def CheckPrereq(self):
1518 """Check prerequisites. 1519 1520 This checks that the instance is in the cluster and is not running. 1521 1522 """ 1523 (self.op.instance_uuid, self.op.instance_name) = \ 1524 ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid, 1525 self.op.instance_name) 1526 instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 1527 assert instance is not None 1528 1529 # It should actually not happen that an instance is running with a disabled 1530 # disk template, but in case it does, the renaming of file-based instances 1531 # will fail horribly. Thus, we test it before. 1532 if (instance.disk_template in constants.DTS_FILEBASED and 1533 self.op.new_name != instance.name): 1534 CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(), 1535 instance.disk_template) 1536 1537 CheckNodeOnline(self, instance.primary_node) 1538 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING, 1539 msg="cannot rename") 1540 self.instance = instance 1541 1542 new_name = self.op.new_name 1543 if self.op.name_check: 1544 hostname = _CheckHostnameSane(self, new_name) 1545 new_name = self.op.new_name = hostname.name 1546 if (self.op.ip_check and 1547 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)): 1548 raise errors.OpPrereqError("IP %s of instance %s already in use" % 1549 (hostname.ip, new_name), 1550 errors.ECODE_NOTUNIQUE) 1551 1552 instance_names = [inst.name for 1553 inst in self.cfg.GetAllInstancesInfo().values()] 1554 if new_name in instance_names and new_name != instance.name: 1555 raise errors.OpPrereqError("Instance '%s' is already in the cluster" % 1556 new_name, errors.ECODE_EXISTS)
1557
1558 - def Exec(self, feedback_fn):
1559 """Rename the instance. 1560 1561 """ 1562 old_name = self.instance.name 1563 1564 rename_file_storage = False 1565 if (self.instance.disk_template in constants.DTS_FILEBASED and 1566 self.op.new_name != self.instance.name): 1567 old_file_storage_dir = os.path.dirname( 1568 self.instance.disks[0].logical_id[1]) 1569 rename_file_storage = True 1570 1571 self.cfg.RenameInstance(self.instance.uuid, self.op.new_name) 1572 # Change the instance lock. This is definitely safe while we hold the BGL. 1573 # Otherwise the new lock would have to be added in acquired mode. 1574 assert self.REQ_BGL 1575 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER) 1576 self.glm.remove(locking.LEVEL_INSTANCE, old_name) 1577 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name) 1578 1579 # re-read the instance from the configuration after rename 1580 renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid) 1581 1582 if rename_file_storage: 1583 new_file_storage_dir = os.path.dirname( 1584 renamed_inst.disks[0].logical_id[1]) 1585 result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node, 1586 old_file_storage_dir, 1587 new_file_storage_dir) 1588 result.Raise("Could not rename on node %s directory '%s' to '%s'" 1589 " (but the instance has been renamed in Ganeti)" % 1590 (self.cfg.GetNodeName(renamed_inst.primary_node), 1591 old_file_storage_dir, new_file_storage_dir)) 1592 1593 StartInstanceDisks(self, renamed_inst, None) 1594 # update info on disks 1595 info = GetInstanceInfoText(renamed_inst) 1596 for (idx, disk) in enumerate(renamed_inst.disks): 1597 for node_uuid in renamed_inst.all_nodes: 1598 self.cfg.SetDiskID(disk, node_uuid) 1599 result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info) 1600 result.Warn("Error setting info on node %s for disk %s" % 1601 (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning) 1602 try: 1603 result = self.rpc.call_instance_run_rename(renamed_inst.primary_node, 1604 renamed_inst, old_name, 1605 self.op.debug_level) 1606 result.Warn("Could not run OS rename script for instance %s on node %s" 1607 " (but the instance has been renamed in Ganeti)" % 1608 (renamed_inst.name, 1609 self.cfg.GetNodeName(renamed_inst.primary_node)), 1610 self.LogWarning) 1611 finally: 1612 ShutdownInstanceDisks(self, renamed_inst) 1613 1614 return renamed_inst.name
1615
1616 1617 -class LUInstanceRemove(LogicalUnit):
1618 """Remove an instance. 1619 1620 """ 1621 HPATH = "instance-remove" 1622 HTYPE = constants.HTYPE_INSTANCE 1623 REQ_BGL = False 1624
1625 - def ExpandNames(self):
1626 self._ExpandAndLockInstance() 1627 self.needed_locks[locking.LEVEL_NODE] = [] 1628 self.needed_locks[locking.LEVEL_NODE_RES] = [] 1629 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE
1630
1631 - def DeclareLocks(self, level):
1632 if level == locking.LEVEL_NODE: 1633 self._LockInstancesNodes() 1634 elif level == locking.LEVEL_NODE_RES: 1635 # Copy node locks 1636 self.needed_locks[locking.LEVEL_NODE_RES] = \ 1637 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1638
1639 - def BuildHooksEnv(self):
1640 """Build hooks env. 1641 1642 This runs on master, primary and secondary nodes of the instance. 1643 1644 """ 1645 env = BuildInstanceHookEnvByObject(self, self.instance) 1646 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout 1647 return env
1648
1649 - def BuildHooksNodes(self):
1650 """Build hooks nodes. 1651 1652 """ 1653 nl = [self.cfg.GetMasterNode()] 1654 nl_post = list(self.instance.all_nodes) + nl 1655 return (nl, nl_post)
1656
1657 - def CheckPrereq(self):
1658 """Check prerequisites. 1659 1660 This checks that the instance is in the cluster. 1661 1662 """ 1663 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 1664 assert self.instance is not None, \ 1665 "Cannot retrieve locked instance %s" % self.op.instance_name
1666
1667 - def Exec(self, feedback_fn):
1668 """Remove the instance. 1669 1670 """ 1671 logging.info("Shutting down instance %s on node %s", self.instance.name, 1672 self.cfg.GetNodeName(self.instance.primary_node)) 1673 1674 result = self.rpc.call_instance_shutdown(self.instance.primary_node, 1675 self.instance, 1676 self.op.shutdown_timeout, 1677 self.op.reason) 1678 if self.op.ignore_failures: 1679 result.Warn("Warning: can't shutdown instance", feedback_fn) 1680 else: 1681 result.Raise("Could not shutdown instance %s on node %s" % 1682 (self.instance.name, 1683 self.cfg.GetNodeName(self.instance.primary_node))) 1684 1685 assert (self.owned_locks(locking.LEVEL_NODE) == 1686 self.owned_locks(locking.LEVEL_NODE_RES)) 1687 assert not (set(self.instance.all_nodes) - 1688 self.owned_locks(locking.LEVEL_NODE)), \ 1689 "Not owning correct locks" 1690 1691 RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1692
1693 1694 -class LUInstanceMove(LogicalUnit):
1695 """Move an instance by data-copying. 1696 1697 """ 1698 HPATH = "instance-move" 1699 HTYPE = constants.HTYPE_INSTANCE 1700 REQ_BGL = False 1701
1702 - def ExpandNames(self):
1703 self._ExpandAndLockInstance() 1704 (self.op.target_node_uuid, self.op.target_node) = \ 1705 ExpandNodeUuidAndName(self.cfg, self.op.target_node_uuid, 1706 self.op.target_node) 1707 self.needed_locks[locking.LEVEL_NODE] = [self.op.target_node_uuid] 1708 self.needed_locks[locking.LEVEL_NODE_RES] = [] 1709 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
1710
1711 - def DeclareLocks(self, level):
1712 if level == locking.LEVEL_NODE: 1713 self._LockInstancesNodes(primary_only=True) 1714 elif level == locking.LEVEL_NODE_RES: 1715 # Copy node locks 1716 self.needed_locks[locking.LEVEL_NODE_RES] = \ 1717 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
1718
1719 - def BuildHooksEnv(self):
1720 """Build hooks env. 1721 1722 This runs on master, primary and secondary nodes of the instance. 1723 1724 """ 1725 env = { 1726 "TARGET_NODE": self.op.target_node, 1727 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout, 1728 } 1729 env.update(BuildInstanceHookEnvByObject(self, self.instance)) 1730 return env
1731
1732 - def BuildHooksNodes(self):
1733 """Build hooks nodes. 1734 1735 """ 1736 nl = [ 1737 self.cfg.GetMasterNode(), 1738 self.instance.primary_node, 1739 self.op.target_node_uuid, 1740 ] 1741 return (nl, nl)
1742
1743 - def CheckPrereq(self):
1744 """Check prerequisites. 1745 1746 This checks that the instance is in the cluster. 1747 1748 """ 1749 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid) 1750 assert self.instance is not None, \ 1751 "Cannot retrieve locked instance %s" % self.op.instance_name 1752 1753 if self.instance.disk_template not in constants.DTS_COPYABLE: 1754 raise errors.OpPrereqError("Disk template %s not suitable for copying" % 1755 self.instance.disk_template, 1756 errors.ECODE_STATE) 1757 1758 target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid) 1759 assert target_node is not None, \ 1760 "Cannot retrieve locked node %s" % self.op.target_node 1761 1762 self.target_node_uuid = target_node.uuid 1763 if target_node.uuid == self.instance.primary_node: 1764 raise errors.OpPrereqError("Instance %s is already on the node %s" % 1765 (self.instance.name, target_node.name), 1766 errors.ECODE_STATE) 1767 1768 bep = self.cfg.GetClusterInfo().FillBE(self.instance) 1769 1770 for idx, dsk in enumerate(self.instance.disks): 1771 if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE, 1772 constants.DT_SHARED_FILE): 1773 raise errors.OpPrereqError("Instance disk %d has a complex layout," 1774 " cannot copy" % idx, errors.ECODE_STATE) 1775 1776 CheckNodeOnline(self, target_node.uuid) 1777 CheckNodeNotDrained(self, target_node.uuid) 1778 CheckNodeVmCapable(self, target_node.uuid) 1779 cluster = self.cfg.GetClusterInfo() 1780 group_info = self.cfg.GetNodeGroup(target_node.group) 1781 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info) 1782 CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg, 1783 ignore=self.op.ignore_ipolicy) 1784 1785 if self.instance.admin_state == constants.ADMINST_UP: 1786 # check memory requirements on the secondary node 1787 CheckNodeFreeMemory( 1788 self, target_node.uuid, "failing over instance %s" % 1789 self.instance.name, bep[constants.BE_MAXMEM], 1790 self.instance.hypervisor, 1791 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor]) 1792 else: 1793 self.LogInfo("Not checking memory on the secondary node as" 1794 " instance will not be started") 1795 1796 # check bridge existance 1797 CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1798
1799 - def Exec(self, feedback_fn):
1800 """Move an instance. 1801 1802 The move is done by shutting it down on its present node, copying 1803 the data over (slow) and starting it on the new node. 1804 1805 """ 1806 source_node = self.cfg.GetNodeInfo(self.instance.primary_node) 1807 target_node = self.cfg.GetNodeInfo(self.target_node_uuid) 1808 1809 self.LogInfo("Shutting down instance %s on source node %s", 1810 self.instance.name, source_node.name) 1811 1812 assert (self.owned_locks(locking.LEVEL_NODE) == 1813 self.owned_locks(locking.LEVEL_NODE_RES)) 1814 1815 result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance, 1816 self.op.shutdown_timeout, 1817 self.op.reason) 1818 if self.op.ignore_consistency: 1819 result.Warn("Could not shutdown instance %s on node %s. Proceeding" 1820 " anyway. Please make sure node %s is down. Error details" % 1821 (self.instance.name, source_node.name, source_node.name), 1822 self.LogWarning) 1823 else: 1824 result.Raise("Could not shutdown instance %s on node %s" % 1825 (self.instance.name, source_node.name)) 1826 1827 # create the target disks 1828 try: 1829 CreateDisks(self, self.instance, target_node_uuid=target_node.uuid) 1830 except errors.OpExecError: 1831 self.LogWarning("Device creation failed") 1832 self.cfg.ReleaseDRBDMinors(self.instance.uuid) 1833 raise 1834 1835 cluster_name = self.cfg.GetClusterInfo().cluster_name 1836 1837 errs = [] 1838 # activate, get path, copy the data over 1839 for idx, disk in enumerate(self.instance.disks): 1840 self.LogInfo("Copying data for disk %d", idx) 1841 result = self.rpc.call_blockdev_assemble( 1842 target_node.uuid, (disk, self.instance), self.instance.name, 1843 True, idx) 1844 if result.fail_msg: 1845 self.LogWarning("Can't assemble newly created disk %d: %s", 1846 idx, result.fail_msg) 1847 errs.append(result.fail_msg) 1848 break 1849 dev_path = result.payload 1850 result = self.rpc.call_blockdev_export(source_node.uuid, (disk, 1851 self.instance), 1852 target_node.secondary_ip, 1853 dev_path, cluster_name) 1854 if result.fail_msg: 1855 self.LogWarning("Can't copy data over for disk %d: %s", 1856 idx, result.fail_msg) 1857 errs.append(result.fail_msg) 1858 break 1859 1860 if errs: 1861 self.LogWarning("Some disks failed to copy, aborting") 1862 try: 1863 RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid) 1864 finally: 1865 self.cfg.ReleaseDRBDMinors(self.instance.uuid) 1866 raise errors.OpExecError("Errors during disk copy: %s" % 1867 (",".join(errs),)) 1868 1869 self.instance.primary_node = target_node.uuid 1870 self.cfg.Update(self.instance, feedback_fn) 1871 1872 self.LogInfo("Removing the disks on the original node") 1873 RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid) 1874 1875 # Only start the instance if it's marked as up 1876 if self.instance.admin_state == constants.ADMINST_UP: 1877 self.LogInfo("Starting instance %s on node %s", 1878 self.instance.name, target_node.name) 1879 1880 disks_ok, _ = AssembleInstanceDisks(self, self.instance, 1881 ignore_secondaries=True) 1882 if not disks_ok: 1883 ShutdownInstanceDisks(self, self.instance) 1884 raise errors.OpExecError("Can't activate the instance's disks") 1885 1886 result = self.rpc.call_instance_start(target_node.uuid, 1887 (self.instance, None, None), False, 1888 self.op.reason) 1889 msg = result.fail_msg 1890 if msg: 1891 ShutdownInstanceDisks(self, self.instance) 1892 raise errors.OpExecError("Could not start instance %s on node %s: %s" % 1893 (self.instance.name, target_node.name, msg))
1894
1895 1896 -class LUInstanceMultiAlloc(NoHooksLU):
1897 """Allocates multiple instances at the same time. 1898 1899 """ 1900 REQ_BGL = False 1901
1902 - def CheckArguments(self):
1903 """Check arguments. 1904 1905 """ 1906 nodes = [] 1907 for inst in self.op.instances: 1908 if inst.iallocator is not None: 1909 raise errors.OpPrereqError("iallocator are not allowed to be set on" 1910 " instance objects", errors.ECODE_INVAL) 1911 nodes.append(bool(inst.pnode)) 1912 if inst.disk_template in constants.DTS_INT_MIRROR: 1913 nodes.append(bool(inst.snode)) 1914 1915 has_nodes = compat.any(nodes) 1916 if compat.all(nodes) ^ has_nodes: 1917 raise errors.OpPrereqError("There are instance objects providing" 1918 " pnode/snode while others do not", 1919 errors.ECODE_INVAL) 1920 1921 if not has_nodes and self.op.iallocator is None: 1922 default_iallocator = self.cfg.GetDefaultIAllocator() 1923 if default_iallocator: 1924 self.op.iallocator = default_iallocator 1925 else: 1926 raise errors.OpPrereqError("No iallocator or nodes on the instances" 1927 " given and no cluster-wide default" 1928 " iallocator found; please specify either" 1929 " an iallocator or nodes on the instances" 1930 " or set a cluster-wide default iallocator", 1931 errors.ECODE_INVAL) 1932 1933 _CheckOpportunisticLocking(self.op) 1934 1935 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances]) 1936 if dups: 1937 raise errors.OpPrereqError("There are duplicate instance names: %s" % 1938 utils.CommaJoin(dups), errors.ECODE_INVAL)
1939
1940 - def ExpandNames(self):
1941 """Calculate the locks. 1942 1943 """ 1944 self.share_locks = ShareAll() 1945 self.needed_locks = { 1946 # iallocator will select nodes and even if no iallocator is used, 1947 # collisions with LUInstanceCreate should be avoided 1948 locking.LEVEL_NODE_ALLOC: locking.ALL_SET, 1949 } 1950 1951 if self.op.iallocator: 1952 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET 1953 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET 1954 1955 if self.op.opportunistic_locking: 1956 self.opportunistic_locks[locking.LEVEL_NODE] = True 1957 else: 1958 nodeslist = [] 1959 for inst in self.op.instances: 1960 (inst.pnode_uuid, inst.pnode) = \ 1961 ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode) 1962 nodeslist.append(inst.pnode_uuid) 1963 if inst.snode is not None: 1964 (inst.snode_uuid, inst.snode) = \ 1965 ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode) 1966 nodeslist.append(inst.snode_uuid) 1967 1968 self.needed_locks[locking.LEVEL_NODE] = nodeslist 1969 # Lock resources of instance's primary and secondary nodes (copy to 1970 # prevent accidential modification) 1971 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1972
1973 - def DeclareLocks(self, level):
1974 if level == locking.LEVEL_NODE_RES and \ 1975 self.opportunistic_locks[locking.LEVEL_NODE]: 1976 # Even when using opportunistic locking, we require the same set of 1977 # NODE_RES locks as we got NODE locks 1978 self.needed_locks[locking.LEVEL_NODE_RES] = \ 1979 self.owned_locks(locking.LEVEL_NODE)
1980
1981 - def CheckPrereq(self):
1982 """Check prerequisite. 1983 1984 """ 1985 if self.op.iallocator: 1986 cluster = self.cfg.GetClusterInfo() 1987 default_vg = self.cfg.GetVGName() 1988 ec_id = self.proc.GetECId() 1989 1990 if self.op.opportunistic_locking: 1991 # Only consider nodes for which a lock is held 1992 node_whitelist = self.cfg.GetNodeNames( 1993 list(self.owned_locks(locking.LEVEL_NODE))) 1994 else: 1995 node_whitelist = None 1996 1997 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg), 1998 _ComputeNics(op, cluster, None, 1999 self.cfg, ec_id), 2000 _ComputeFullBeParams(op, cluster), 2001 node_whitelist) 2002 for op in self.op.instances] 2003 2004 req = iallocator.IAReqMultiInstanceAlloc(instances=insts) 2005 ial = iallocator.IAllocator(self.cfg, self.rpc, req) 2006 2007 ial.Run(self.op.iallocator) 2008 2009 if not ial.success: 2010 raise errors.OpPrereqError("Can't compute nodes using" 2011 " iallocator '%s': %s" % 2012 (self.op.iallocator, ial.info), 2013 errors.ECODE_NORES) 2014 2015 self.ia_result = ial.result 2016 2017 if self.op.dry_run: 2018 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), { 2019 constants.JOB_IDS_KEY: [], 2020 })
2021
2022 - def _ConstructPartialResult(self):
2023 """Contructs the partial result. 2024 2025 """ 2026 if self.op.iallocator: 2027 (allocatable, failed_insts) = self.ia_result 2028 allocatable_insts = map(compat.fst, allocatable) 2029 else: 2030 allocatable_insts = [op.instance_name for op in self.op.instances] 2031 failed_insts = [] 2032 2033 return { 2034 opcodes.OpInstanceMultiAlloc.ALLOCATABLE_KEY: allocatable_insts, 2035 opcodes.OpInstanceMultiAlloc.FAILED_KEY: failed_insts, 2036 }
2037
2038 - def Exec(self, feedback_fn):
2039 """Executes the opcode. 2040 2041 """ 2042 jobs = [] 2043 if self.op.iallocator: 2044 op2inst = dict((op.instance_name, op) for op in self.op.instances) 2045 (allocatable, failed) = self.ia_result 2046 2047 for (name, node_names) in allocatable: 2048 op = op2inst.pop(name) 2049 2050 (op.pnode_uuid, op.pnode) = \ 2051 ExpandNodeUuidAndName(self.cfg, None, node_names[0]) 2052 if len(node_names) > 1: 2053 (op.snode_uuid, op.snode) = \ 2054 ExpandNodeUuidAndName(self.cfg, None, node_names[1]) 2055 2056 jobs.append([op]) 2057 2058 missing = set(op2inst.keys()) - set(failed) 2059 assert not missing, \ 2060 "Iallocator did return incomplete result: %s" % \ 2061 utils.CommaJoin(missing) 2062 else: 2063 jobs.extend([op] for op in self.op.instances) 2064 2065 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2066
2067 2068 -class _InstNicModPrivate:
2069 """Data structure for network interface modifications. 2070 2071 Used by L{LUInstanceSetParams}. 2072 2073 """
2074 - def __init__(self):
2075 self.params = None 2076 self.filled = None
2077
2078 2079 -def _PrepareContainerMods(mods, private_fn):
2080 """Prepares a list of container modifications by adding a private data field. 2081 2082 @type mods: list of tuples; (operation, index, parameters) 2083 @param mods: List of modifications 2084 @type private_fn: callable or None 2085 @param private_fn: Callable for constructing a private data field for a 2086 modification 2087 @rtype: list 2088 2089 """ 2090 if private_fn is None: 2091 fn = lambda: None 2092 else: 2093 fn = private_fn 2094 2095 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2096
2097 2098 -def _CheckNodesPhysicalCPUs(lu, node_uuids, requested, hypervisor_specs):
2099 """Checks if nodes have enough physical CPUs 2100 2101 This function checks if all given nodes have the needed number of 2102 physical CPUs. In case any node has less CPUs or we cannot get the 2103 information from the node, this function raises an OpPrereqError 2104 exception. 2105 2106 @type lu: C{LogicalUnit} 2107 @param lu: a logical unit from which we get configuration data 2108 @type node_uuids: C{list} 2109 @param node_uuids: the list of node UUIDs to check 2110 @type requested: C{int} 2111 @param requested: the minimum acceptable number of physical CPUs 2112 @type hypervisor_specs: list of pairs (string, dict of strings) 2113 @param hypervisor_specs: list of hypervisor specifications in 2114 pairs (hypervisor_name, hvparams) 2115 @raise errors.OpPrereqError: if the node doesn't have enough CPUs, 2116 or we cannot check the node 2117 2118 """ 2119 nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs) 2120 for node_uuid in node_uuids: 2121 info = nodeinfo[node_uuid] 2122 node_name = lu.cfg.GetNodeName(node_uuid) 2123 info.Raise("Cannot get current information from node %s" % node_name, 2124 prereq=True, ecode=errors.ECODE_ENVIRON) 2125 (_, _, (hv_info, )) = info.payload 2126 num_cpus = hv_info.get("cpu_total", None) 2127 if not isinstance(num_cpus, int): 2128 raise errors.OpPrereqError("Can't compute the number of physical CPUs" 2129 " on node %s, result was '%s'" % 2130 (node_name, num_cpus), errors.ECODE_ENVIRON) 2131 if requested > num_cpus: 2132 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are " 2133 "required" % (node_name, num_cpus, requested), 2134 errors.ECODE_NORES)
2135
2136 2137 -def GetItemFromContainer(identifier, kind, container):
2138 """Return the item refered by the identifier. 2139 2140 @type identifier: string 2141 @param identifier: Item index or name or UUID 2142 @type kind: string 2143 @param kind: One-word item description 2144 @type container: list 2145 @param container: Container to get the item from 2146 2147 """ 2148 # Index 2149 try: 2150 idx = int(identifier) 2151 if idx == -1: 2152 # Append 2153 absidx = len(container) - 1 2154 elif idx < 0: 2155 raise IndexError("Not accepting negative indices other than -1") 2156 elif idx > len(container): 2157 raise IndexError("Got %s index %s, but there are only %s" % 2158 (kind, idx, len(container))) 2159 else: 2160 absidx = idx 2161 return (absidx, container[idx]) 2162 except ValueError: 2163 pass 2164 2165 for idx, item in enumerate(container): 2166 if item.uuid == identifier or item.name == identifier: 2167 return (idx, item) 2168 2169 raise errors.OpPrereqError("Cannot find %s with identifier %s" % 2170 (kind, identifier), errors.ECODE_NOENT)
2171
2172 2173 -def _ApplyContainerMods(kind, container, chgdesc, mods, 2174 create_fn, modify_fn, remove_fn):
2175 """Applies descriptions in C{mods} to C{container}. 2176 2177 @type kind: string 2178 @param kind: One-word item description 2179 @type container: list 2180 @param container: Container to modify 2181 @type chgdesc: None or list 2182 @param chgdesc: List of applied changes 2183 @type mods: list 2184 @param mods: Modifications as returned by L{_PrepareContainerMods} 2185 @type create_fn: callable 2186 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD}); 2187 receives absolute item index, parameters and private data object as added 2188 by L{_PrepareContainerMods}, returns tuple containing new item and changes 2189 as list 2190 @type modify_fn: callable 2191 @param modify_fn: Callback for modifying an existing item 2192 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters 2193 and private data object as added by L{_PrepareContainerMods}, returns 2194 changes as list 2195 @type remove_fn: callable 2196 @param remove_fn: Callback on removing item; receives absolute item index, 2197 item and private data object as added by L{_PrepareContainerMods} 2198 2199 """ 2200 for (op, identifier, params, private) in mods: 2201 changes = None 2202 2203 if op == constants.DDM_ADD: 2204 # Calculate where item will be added 2205 # When adding an item, identifier can only be an index 2206 try: 2207 idx = int(identifier) 2208 except ValueError: 2209 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as" 2210 " identifier for %s" % constants.DDM_ADD, 2211 errors.ECODE_INVAL) 2212 if idx == -1: 2213 addidx = len(container) 2214 else: 2215 if idx < 0: 2216 raise IndexError("Not accepting negative indices other than -1") 2217 elif idx > len(container): 2218 raise IndexError("Got %s index %s, but there are only %s" % 2219 (kind, idx, len(container))) 2220 addidx = idx 2221 2222 if create_fn is None: 2223 item = params 2224 else: 2225 (item, changes) = create_fn(addidx, params, private) 2226 2227 if idx == -1: 2228 container.append(item) 2229 else: 2230 assert idx >= 0 2231 assert idx <= len(container) 2232 # list.insert does so before the specified index 2233 container.insert(idx, item) 2234 else: 2235 # Retrieve existing item 2236 (absidx, item) = GetItemFromContainer(identifier, kind, container) 2237 2238 if op == constants.DDM_REMOVE: 2239 assert not params 2240 2241 if remove_fn is not None: 2242 remove_fn(absidx, item, private) 2243 2244 changes = [("%s/%s" % (kind, absidx), "remove")] 2245 2246 assert container[absidx] == item 2247 del container[absidx] 2248 elif op == constants.DDM_MODIFY: 2249 if modify_fn is not None: 2250 changes = modify_fn(absidx, item, params, private) 2251 else: 2252 raise errors.ProgrammerError("Unhandled operation '%s'" % op) 2253 2254 assert _TApplyContModsCbChanges(changes) 2255 2256 if not (chgdesc is None or changes is None): 2257 chgdesc.extend(changes)
2258
2259 2260 -def _UpdateIvNames(base_index, disks):
2261 """Updates the C{iv_name} attribute of disks. 2262 2263 @type disks: list of L{objects.Disk} 2264 2265 """ 2266 for (idx, disk) in enumerate(disks): 2267 disk.iv_name = "disk/%s" % (base_index + idx, )
2268
2269 2270 -class LUInstanceSetParams(LogicalUnit):
2271 """Modifies an instances's parameters. 2272 2273 """ 2274 HPATH = "instance-modify" 2275 HTYPE = constants.HTYPE_INSTANCE 2276 REQ_BGL = False 2277 2278 @staticmethod
2279 - def _UpgradeDiskNicMods(kind, mods, verify_fn):
2280 assert ht.TList(mods) 2281 assert not mods or len(mods[0]) in (2, 3) 2282 2283 if mods and len(mods[0]) == 2: 2284 result = [] 2285 2286 addremove = 0 2287 for op, params in mods: 2288 if op in (constants.DDM_ADD, constants.DDM_REMOVE): 2289 result.append((op, -1, params)) 2290 addremove += 1 2291 2292 if addremove > 1: 2293 raise errors.OpPrereqError("Only one %s add or remove operation is" 2294 " supported at a time" % kind, 2295 errors.ECODE_INVAL) 2296 else: 2297 result.append((constants.DDM_MODIFY, op, params)) 2298 2299 assert verify_fn(result) 2300 else: 2301 result = mods 2302 2303 return result
2304 2305 @staticmethod
2306 - def _CheckMods(kind, mods, key_types, item_fn):
2307 """Ensures requested disk/NIC modifications are valid. 2308 2309 """ 2310 for (op, _, params) in mods: 2311 assert ht.TDict(params) 2312 2313 # If 'key_types' is an empty dict, we assume we have an 2314 # 'ext' template and thus do not ForceDictType 2315 if key_types: 2316 utils.ForceDictType(params, key_types) 2317 2318 if op == constants.DDM_REMOVE: 2319 if params: 2320 raise errors.OpPrereqError("No settings should be passed when" 2321 " removing a %s" % kind, 2322 errors.ECODE_INVAL) 2323 elif op in (constants.DDM_ADD, constants.DDM_MODIFY): 2324 item_fn(op, params) 2325 else: 2326 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2327
2328 - def _VerifyDiskModification(self, op, params, excl_stor):
2329 """Verifies a disk modification. 2330 2331 """ 2332 if op == constants.DDM_ADD: 2333 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR) 2334 if mode not in constants.DISK_ACCESS_SET: 2335 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode, 2336 errors.ECODE_INVAL) 2337 2338 size = params.get(constants.IDISK_SIZE, None) 2339 if size is None: 2340 raise errors.OpPrereqError("Required disk parameter '%s' missing" % 2341 constants.IDISK_SIZE, errors.ECODE_INVAL) 2342 2343 try: 2344 size = int(size) 2345 except (TypeError, ValueError), err: 2346 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err, 2347 errors.ECODE_INVAL) 2348 2349 params[constants.IDISK_SIZE] = size 2350 name = params.get(constants.IDISK_NAME, None) 2351 if name is not None and name.lower() == constants.VALUE_NONE: 2352 params[constants.IDISK_NAME] = None 2353 2354 CheckSpindlesExclusiveStorage(params, excl_stor, True) 2355 2356 elif op == constants.DDM_MODIFY: 2357 if constants.IDISK_SIZE in params: 2358 raise errors.OpPrereqError("Disk size change not possible, use" 2359 " grow-disk", errors.ECODE_INVAL) 2360 2361 # Disk modification supports changing only the disk name and mode. 2362 # Changing arbitrary parameters is allowed only for ext disk template", 2363 if self.instance.disk_template != constants.DT_EXT: 2364 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES) 2365 2366 name = params.get(constants.IDISK_NAME, None) 2367 if name is not None and name.lower() == constants.VALUE_NONE: 2368 params[constants.IDISK_NAME] = None
2369 2370 @staticmethod
2371 - def _VerifyNicModification(op, params):
2372 """Verifies a network interface modification. 2373 2374 """ 2375 if op in (constants.DDM_ADD, constants.DDM_MODIFY): 2376 ip = params.get(constants.INIC_IP, None) 2377 name = params.get(constants.INIC_NAME, None) 2378 req_net = params.get(constants.INIC_NETWORK, None) 2379 link = params.get(constants.NIC_LINK, None) 2380 mode = params.get(constants.NIC_MODE, None) 2381 if name is not None and name.lower() == constants.VALUE_NONE: 2382 params[constants.INIC_NAME] = None 2383 if req_net is not None: 2384 if req_net.lower() == constants.VALUE_NONE: 2385 params[constants.INIC_NETWORK] = None 2386 req_net = None 2387 elif link is not None or mode is not None: 2388 raise errors.OpPrereqError("If network is given" 2389 " mode or link should not", 2390 errors.ECODE_INVAL) 2391 2392 if op == constants.DDM_ADD: 2393 macaddr = params.get(constants.INIC_MAC, None) 2394 if macaddr is None: 2395 params[constants.INIC_MAC] = constants.VALUE_AUTO 2396 2397 if ip is not None: 2398 if ip.lower() == constants.VALUE_NONE: 2399 params[constants.INIC_IP] = None 2400 else: 2401 if ip.lower() == constants.NIC_IP_POOL: 2402 if op == constants.DDM_ADD and req_net is None: 2403 raise errors.OpPrereqError("If ip=pool, parameter network" 2404 " cannot be none", 2405 errors.ECODE_INVAL) 2406 else: 2407 if not netutils.IPAddress.IsValid(ip): 2408 raise errors.OpPrereqError("Invalid IP address '%s'" % ip, 2409 errors.ECODE_INVAL) 2410 2411 if constants.INIC_MAC in params: 2412 macaddr = params[constants.INIC_MAC] 2413 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE): 2414 macaddr = utils.NormalizeAndValidateMac(macaddr) 2415 2416 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO: 2417 raise errors.OpPrereqError("'auto' is not a valid MAC address when" 2418 " modifying an existing NIC", 2419 errors.ECODE_INVAL)
2420
2421 - def CheckArguments(self):
2422 if not (self.op.nics or self.op.disks or self.op.disk_template or 2423 self.op.hvparams or self.op.beparams or self.op.os_name or 2424 self.op.osparams or self.op.offline is not None or 2425 self.op.runtime_mem or self.op.pnode): 2426 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL) 2427 2428 if self.op.hvparams: 2429 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, 2430 "hypervisor", "instance", "cluster") 2431 2432 self.op.disks = self._UpgradeDiskNicMods( 2433 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications) 2434 self.op.nics = self._UpgradeDiskNicMods( 2435 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications) 2436 2437 if self.op.disks and self.op.disk_template is not None: 2438 raise errors.OpPrereqError("Disk template conversion and other disk" 2439 " changes not supported at the same time", 2440 errors.ECODE_INVAL) 2441 2442 if (self.op.disk_template and 2443 self.op.disk_template in constants.DTS_INT_MIRROR and 2444 self.op.remote_node is None): 2445 raise errors.OpPrereqError("Changing the disk template to a mirrored" 2446 " one requires specifying a secondary node", 2447 errors.ECODE_INVAL) 2448 2449 # Check NIC modifications 2450 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES, 2451 self._VerifyNicModification) 2452 2453 if self.op.pnode: 2454 (self.op.pnode_uuid, self.op.pnode) = \ 2455 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2456
2457 - def ExpandNames(self):
2458 self._ExpandAndLockInstance() 2459 self.needed_locks[locking.LEVEL_NODEGROUP] = [] 2460 # Can't even acquire node locks in shared mode as upcoming changes in 2461 # Ganeti 2.6 will start to modify the node object on disk conversion 2462 self.needed_locks[locking.LEVEL_NODE] = [] 2463 self.needed_locks[locking.LEVEL_NODE_RES] = [] 2464 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_REPLACE 2465 # Look node group to look up the ipolicy 2466 self.share_locks[locking.LEVEL_NODEGROUP] = 1
2467
2468 - def DeclareLocks(self, level):
2469 if level == locking.LEVEL_NODEGROUP: 2470 assert not self.needed_locks[locking.LEVEL_NODEGROUP] 2471 # Acquire locks for the instance's nodegroups optimistically. Needs 2472 # to be verified in CheckPrereq 2473 self.needed_locks[locking.LEVEL_NODEGROUP] = \ 2474 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid) 2475 elif level == locking.LEVEL_NODE: 2476 self._LockInstancesNodes() 2477 if self.op.disk_template and self.op.remote_node: 2478 (self.op.remote_node_uuid, self.op.remote_node) = \ 2479 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid, 2480 self.op.remote_node) 2481 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid) 2482 elif level == locking.LEVEL_NODE_RES and self.op.disk_template: 2483 # Copy node locks 2484 self.needed_locks[locking.LEVEL_NODE_RES] = \ 2485 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2486
2487 - def BuildHooksEnv(self):
2488 """Build hooks env. 2489 2490 This runs on the master, primary and secondaries. 2491 2492 """ 2493 args = {} 2494 if constants.BE_MINMEM in self.be_new: 2495 args["minmem"] = self.be_new[constants.BE_MINMEM] 2496 if constants.BE_MAXMEM in self.be_new: 2497 args[