1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 """Logical units dealing with instances."""
23
24 import OpenSSL
25 import copy
26 import logging
27 import os
28
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
32 from ganeti import ht
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
43
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
64
65 import ganeti.masterd.instance
66
67
68
69
70 _TApplyContModsCbChanges = \
71 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
72 ht.TNonEmptyString,
73 ht.TAny,
74 ])))
78 """Ensures that a given hostname resolves to a 'sane' name.
79
80 The given name is required to be a prefix of the resolved hostname,
81 to prevent accidental mismatches.
82
83 @param lu: the logical unit on behalf of which we're checking
84 @param name: the name we should resolve and check
85 @return: the resolved hostname object
86
87 """
88 hostname = netutils.GetHostname(name=name)
89 if hostname.name != name:
90 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91 if not utils.MatchNameComponent(name, [hostname.name]):
92 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93 " same as given hostname '%s'") %
94 (hostname.name, name), errors.ECODE_INVAL)
95 return hostname
96
99 """Generate error if opportunistic locking is not possible.
100
101 """
102 if op.opportunistic_locking and not op.iallocator:
103 raise errors.OpPrereqError("Opportunistic locking is only available in"
104 " combination with an instance allocator",
105 errors.ECODE_INVAL)
106
109 """Wrapper around IAReqInstanceAlloc.
110
111 @param op: The instance opcode
112 @param disks: The computed disks
113 @param nics: The computed nics
114 @param beparams: The full filled beparams
115 @param node_whitelist: List of nodes which should appear as online to the
116 allocator (unless the node is already marked offline)
117
118 @returns: A filled L{iallocator.IAReqInstanceAlloc}
119
120 """
121 spindle_use = beparams[constants.BE_SPINDLE_USE]
122 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123 disk_template=op.disk_template,
124 tags=op.tags,
125 os=op.os_type,
126 vcpus=beparams[constants.BE_VCPUS],
127 memory=beparams[constants.BE_MAXMEM],
128 spindle_use=spindle_use,
129 disks=disks,
130 nics=[n.ToDict() for n in nics],
131 hypervisor=op.hypervisor,
132 node_whitelist=node_whitelist)
133
151
154 """Computes the nics.
155
156 @param op: The instance opcode
157 @param cluster: Cluster configuration object
158 @param default_ip: The default ip to assign
159 @param cfg: An instance of the configuration object
160 @param ec_id: Execution context ID
161
162 @returns: The build up nics
163
164 """
165 nics = []
166 for nic in op.nics:
167 nic_mode_req = nic.get(constants.INIC_MODE, None)
168 nic_mode = nic_mode_req
169 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
171
172 net = nic.get(constants.INIC_NETWORK, None)
173 link = nic.get(constants.NIC_LINK, None)
174 ip = nic.get(constants.INIC_IP, None)
175
176 if net is None or net.lower() == constants.VALUE_NONE:
177 net = None
178 else:
179 if nic_mode_req is not None or link is not None:
180 raise errors.OpPrereqError("If network is given, no mode or link"
181 " is allowed to be passed",
182 errors.ECODE_INVAL)
183
184
185 if ip is None or ip.lower() == constants.VALUE_NONE:
186 nic_ip = None
187 elif ip.lower() == constants.VALUE_AUTO:
188 if not op.name_check:
189 raise errors.OpPrereqError("IP address set to auto but name checks"
190 " have been skipped",
191 errors.ECODE_INVAL)
192 nic_ip = default_ip
193 else:
194
195
196 if ip.lower() == constants.NIC_IP_POOL:
197 if net is None:
198 raise errors.OpPrereqError("if ip=pool, parameter network"
199 " must be passed too",
200 errors.ECODE_INVAL)
201
202 elif not netutils.IPAddress.IsValid(ip):
203 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
204 errors.ECODE_INVAL)
205
206 nic_ip = ip
207
208
209 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210 raise errors.OpPrereqError("Routed nic mode requires an ip address",
211 errors.ECODE_INVAL)
212
213
214 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216 mac = utils.NormalizeAndValidateMac(mac)
217
218 try:
219
220 cfg.ReserveMAC(mac, ec_id)
221 except errors.ReservationError:
222 raise errors.OpPrereqError("MAC address %s already in use"
223 " in cluster" % mac,
224 errors.ECODE_NOTUNIQUE)
225
226
227 nicparams = {}
228 if nic_mode_req:
229 nicparams[constants.NIC_MODE] = nic_mode
230 if link:
231 nicparams[constants.NIC_LINK] = link
232
233 check_params = cluster.SimpleFillNIC(nicparams)
234 objects.NIC.CheckParameterSyntax(check_params)
235 net_uuid = cfg.LookupNetwork(net)
236 name = nic.get(constants.INIC_NAME, None)
237 if name is not None and name.lower() == constants.VALUE_NONE:
238 name = None
239 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240 network=net_uuid, nicparams=nicparams)
241 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
242 nics.append(nic_obj)
243
244 return nics
245
248 """In case of conflicting IP address raise error.
249
250 @type ip: string
251 @param ip: IP address
252 @type node: string
253 @param node: node name
254
255 """
256 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257 if conf_net is not None:
258 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259 " network %s, but the target NIC does not." %
260 (ip, conf_net)),
261 errors.ECODE_STATE)
262
263 return (None, None)
264
269 """Compute if instance specs meets the specs of ipolicy.
270
271 @type ipolicy: dict
272 @param ipolicy: The ipolicy to verify against
273 @param instance_spec: dict
274 @param instance_spec: The instance spec to verify
275 @type disk_template: string
276 @param disk_template: the disk template of the instance
277 @param _compute_fn: The function to verify ipolicy (unittest only)
278 @see: L{ComputeIPolicySpecViolation}
279
280 """
281 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
287
288 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289 disk_sizes, spindle_use, disk_template)
290
293 """Check whether an OS name conforms to the os variants specification.
294
295 @type os_obj: L{objects.OS}
296 @param os_obj: OS object to check
297 @type name: string
298 @param name: OS name passed by the user, to check for validity
299
300 """
301 variant = objects.OS.GetVariant(name)
302 if not os_obj.supported_variants:
303 if variant:
304 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305 " passed)" % (os_obj.name, variant),
306 errors.ECODE_INVAL)
307 return
308 if not variant:
309 raise errors.OpPrereqError("OS name must include a variant",
310 errors.ECODE_INVAL)
311
312 if variant not in os_obj.supported_variants:
313 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
314
317 """Create an instance.
318
319 """
320 HPATH = "instance-add"
321 HTYPE = constants.HTYPE_INSTANCE
322 REQ_BGL = False
323
325 """Check arguments.
326
327 """
328
329
330 if self.op.no_install and self.op.start:
331 self.LogInfo("No-installation mode selected, disabling startup")
332 self.op.start = False
333
334 self.op.instance_name = \
335 netutils.Hostname.GetNormalizedName(self.op.instance_name)
336
337 if self.op.ip_check and not self.op.name_check:
338
339 raise errors.OpPrereqError("Cannot do IP address check without a name"
340 " check", errors.ECODE_INVAL)
341
342
343 for nic in self.op.nics:
344 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345
346 utils.ValidateDeviceNames("NIC", self.op.nics)
347
348
349 utils.ValidateDeviceNames("disk", self.op.disks)
350
351 cluster = self.cfg.GetClusterInfo()
352 if not self.op.disk_template in cluster.enabled_disk_templates:
353 raise errors.OpPrereqError("Cannot create an instance with disk template"
354 " '%s', because it is not enabled in the"
355 " cluster. Enabled disk templates are: %s." %
356 (self.op.disk_template,
357 ",".join(cluster.enabled_disk_templates)))
358
359
360 has_adopt = has_no_adopt = False
361 for disk in self.op.disks:
362 if self.op.disk_template != constants.DT_EXT:
363 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364 if constants.IDISK_ADOPT in disk:
365 has_adopt = True
366 else:
367 has_no_adopt = True
368 if has_adopt and has_no_adopt:
369 raise errors.OpPrereqError("Either all disks are adopted or none is",
370 errors.ECODE_INVAL)
371 if has_adopt:
372 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373 raise errors.OpPrereqError("Disk adoption is not supported for the"
374 " '%s' disk template" %
375 self.op.disk_template,
376 errors.ECODE_INVAL)
377 if self.op.iallocator is not None:
378 raise errors.OpPrereqError("Disk adoption not allowed with an"
379 " iallocator script", errors.ECODE_INVAL)
380 if self.op.mode == constants.INSTANCE_IMPORT:
381 raise errors.OpPrereqError("Disk adoption not allowed for"
382 " instance import", errors.ECODE_INVAL)
383 else:
384 if self.op.disk_template in constants.DTS_MUST_ADOPT:
385 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386 " but no 'adopt' parameter given" %
387 self.op.disk_template,
388 errors.ECODE_INVAL)
389
390 self.adopt_disks = has_adopt
391
392
393 if self.op.name_check:
394 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395 self.op.instance_name = self.hostname1.name
396
397 self.check_ip = self.hostname1.ip
398 else:
399 self.check_ip = None
400
401
402 if (self.op.file_driver and
403 not self.op.file_driver in constants.FILE_DRIVER):
404 raise errors.OpPrereqError("Invalid file driver name '%s'" %
405 self.op.file_driver, errors.ECODE_INVAL)
406
407
408 if (not self.op.file_driver and
409 self.op.disk_template in [constants.DT_FILE,
410 constants.DT_SHARED_FILE]):
411 self.op.file_driver = constants.FD_DEFAULT
412
413 if self.op.disk_template == constants.DT_FILE:
414 opcodes.RequireFileStorage()
415 elif self.op.disk_template == constants.DT_SHARED_FILE:
416 opcodes.RequireSharedFileStorage()
417
418
419 CheckIAllocatorOrNode(self, "iallocator", "pnode")
420
421 if self.op.pnode is not None:
422 if self.op.disk_template in constants.DTS_INT_MIRROR:
423 if self.op.snode is None:
424 raise errors.OpPrereqError("The networked disk templates need"
425 " a mirror node", errors.ECODE_INVAL)
426 elif self.op.snode:
427 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
428 " template")
429 self.op.snode = None
430
431 _CheckOpportunisticLocking(self.op)
432
433 self._cds = GetClusterDomainSecret()
434
435 if self.op.mode == constants.INSTANCE_IMPORT:
436
437
438
439 self.op.force_variant = True
440
441 if self.op.no_install:
442 self.LogInfo("No-installation mode has no effect during import")
443
444 elif self.op.mode == constants.INSTANCE_CREATE:
445 if self.op.os_type is None:
446 raise errors.OpPrereqError("No guest OS specified",
447 errors.ECODE_INVAL)
448 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
449 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
450 " installation" % self.op.os_type,
451 errors.ECODE_STATE)
452 if self.op.disk_template is None:
453 raise errors.OpPrereqError("No disk template specified",
454 errors.ECODE_INVAL)
455
456 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
457
458 src_handshake = self.op.source_handshake
459 if not src_handshake:
460 raise errors.OpPrereqError("Missing source handshake",
461 errors.ECODE_INVAL)
462
463 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
464 src_handshake)
465 if errmsg:
466 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
467 errors.ECODE_INVAL)
468
469
470 self.source_x509_ca_pem = self.op.source_x509_ca
471 if not self.source_x509_ca_pem:
472 raise errors.OpPrereqError("Missing source X509 CA",
473 errors.ECODE_INVAL)
474
475 try:
476 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
477 self._cds)
478 except OpenSSL.crypto.Error, err:
479 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
480 (err, ), errors.ECODE_INVAL)
481
482 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
483 if errcode is not None:
484 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
485 errors.ECODE_INVAL)
486
487 self.source_x509_ca = cert
488
489 src_instance_name = self.op.source_instance_name
490 if not src_instance_name:
491 raise errors.OpPrereqError("Missing source instance name",
492 errors.ECODE_INVAL)
493
494 self.source_instance_name = \
495 netutils.GetHostname(name=src_instance_name).name
496
497 else:
498 raise errors.OpPrereqError("Invalid instance creation mode %r" %
499 self.op.mode, errors.ECODE_INVAL)
500
502 """ExpandNames for CreateInstance.
503
504 Figure out the right locks for instance creation.
505
506 """
507 self.needed_locks = {}
508
509 instance_name = self.op.instance_name
510
511
512 if instance_name in self.cfg.GetInstanceList():
513 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
514 instance_name, errors.ECODE_EXISTS)
515
516 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
517
518 if self.op.iallocator:
519
520
521
522 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
523 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
524
525 if self.op.opportunistic_locking:
526 self.opportunistic_locks[locking.LEVEL_NODE] = True
527 else:
528 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
529 nodelist = [self.op.pnode]
530 if self.op.snode is not None:
531 self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
532 nodelist.append(self.op.snode)
533 self.needed_locks[locking.LEVEL_NODE] = nodelist
534
535
536 if self.op.mode == constants.INSTANCE_IMPORT:
537 src_node = self.op.src_node
538 src_path = self.op.src_path
539
540 if src_path is None:
541 self.op.src_path = src_path = self.op.instance_name
542
543 if src_node is None:
544 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
545 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
546 self.op.src_node = None
547 if os.path.isabs(src_path):
548 raise errors.OpPrereqError("Importing an instance from a path"
549 " requires a source node option",
550 errors.ECODE_INVAL)
551 else:
552 self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
553 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
554 self.needed_locks[locking.LEVEL_NODE].append(src_node)
555 if not os.path.isabs(src_path):
556 self.op.src_path = src_path = \
557 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
558
559 self.needed_locks[locking.LEVEL_NODE_RES] = \
560 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
561
562
563
564
565
566 if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
567
568
569
570
571 self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
572 else:
573 self.needed_locks[locking.LEVEL_NODEGROUP] = \
574 list(self.cfg.GetNodeGroupsFromNodes(
575 self.needed_locks[locking.LEVEL_NODE]))
576 self.share_locks[locking.LEVEL_NODEGROUP] = 1
577
579 if level == locking.LEVEL_NODE_RES and \
580 self.opportunistic_locks[locking.LEVEL_NODE]:
581
582
583 self.needed_locks[locking.LEVEL_NODE_RES] = \
584 self.owned_locks(locking.LEVEL_NODE)
585
587 """Run the allocator based on input opcode.
588
589 """
590 if self.op.opportunistic_locking:
591
592 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
593 else:
594 node_whitelist = None
595
596
597
598 req = _CreateInstanceAllocRequest(self.op, self.disks,
599 self.nics, self.be_full,
600 node_whitelist)
601 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
602
603 ial.Run(self.op.iallocator)
604
605 if not ial.success:
606
607 if self.op.opportunistic_locking:
608 ecode = errors.ECODE_TEMP_NORES
609 else:
610 ecode = errors.ECODE_NORES
611
612 raise errors.OpPrereqError("Can't compute nodes using"
613 " iallocator '%s': %s" %
614 (self.op.iallocator, ial.info),
615 ecode)
616
617 self.op.pnode = ial.result[0]
618 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
619 self.op.instance_name, self.op.iallocator,
620 utils.CommaJoin(ial.result))
621
622 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
623
624 if req.RequiredNodes() == 2:
625 self.op.snode = ial.result[1]
626
628 """Build hooks env.
629
630 This runs on master, primary and secondary nodes of the instance.
631
632 """
633 env = {
634 "ADD_MODE": self.op.mode,
635 }
636 if self.op.mode == constants.INSTANCE_IMPORT:
637 env["SRC_NODE"] = self.op.src_node
638 env["SRC_PATH"] = self.op.src_path
639 env["SRC_IMAGES"] = self.src_images
640
641 env.update(BuildInstanceHookEnv(
642 name=self.op.instance_name,
643 primary_node=self.op.pnode,
644 secondary_nodes=self.secondaries,
645 status=self.op.start,
646 os_type=self.op.os_type,
647 minmem=self.be_full[constants.BE_MINMEM],
648 maxmem=self.be_full[constants.BE_MAXMEM],
649 vcpus=self.be_full[constants.BE_VCPUS],
650 nics=NICListToTuple(self, self.nics),
651 disk_template=self.op.disk_template,
652 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
653 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
654 for d in self.disks],
655 bep=self.be_full,
656 hvp=self.hv_full,
657 hypervisor_name=self.op.hypervisor,
658 tags=self.op.tags,
659 ))
660
661 return env
662
664 """Build hooks nodes.
665
666 """
667 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
668 return nl, nl
669
716
718 """Use export parameters as defaults.
719
720 In case the opcode doesn't specify (as in override) some instance
721 parameters, then try to use them from the export information, if
722 that declares them.
723
724 """
725 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
726
727 if self.op.disk_template is None:
728 if einfo.has_option(constants.INISECT_INS, "disk_template"):
729 self.op.disk_template = einfo.get(constants.INISECT_INS,
730 "disk_template")
731 if self.op.disk_template not in constants.DISK_TEMPLATES:
732 raise errors.OpPrereqError("Disk template specified in configuration"
733 " file is not one of the allowed values:"
734 " %s" %
735 " ".join(constants.DISK_TEMPLATES),
736 errors.ECODE_INVAL)
737 else:
738 raise errors.OpPrereqError("No disk template specified and the export"
739 " is missing the disk_template information",
740 errors.ECODE_INVAL)
741
742 if not self.op.disks:
743 disks = []
744
745 for idx in range(constants.MAX_DISKS):
746 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
747 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
748 disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
749 disk = {
750 constants.IDISK_SIZE: disk_sz,
751 constants.IDISK_NAME: disk_name
752 }
753 disks.append(disk)
754 self.op.disks = disks
755 if not disks and self.op.disk_template != constants.DT_DISKLESS:
756 raise errors.OpPrereqError("No disk info specified and the export"
757 " is missing the disk information",
758 errors.ECODE_INVAL)
759
760 if not self.op.nics:
761 nics = []
762 for idx in range(constants.MAX_NICS):
763 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
764 ndict = {}
765 for name in [constants.INIC_IP,
766 constants.INIC_MAC, constants.INIC_NAME]:
767 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
768 ndict[name] = v
769 network = einfo.get(constants.INISECT_INS,
770 "nic%d_%s" % (idx, constants.INIC_NETWORK))
771
772
773 if network:
774 ndict[constants.INIC_NETWORK] = network
775 else:
776 for name in list(constants.NICS_PARAMETERS):
777 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
778 ndict[name] = v
779 nics.append(ndict)
780 else:
781 break
782 self.op.nics = nics
783
784 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
785 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
786
787 if (self.op.hypervisor is None and
788 einfo.has_option(constants.INISECT_INS, "hypervisor")):
789 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
790
791 if einfo.has_section(constants.INISECT_HYP):
792
793
794 for name, value in einfo.items(constants.INISECT_HYP):
795 if name not in self.op.hvparams:
796 self.op.hvparams[name] = value
797
798 if einfo.has_section(constants.INISECT_BEP):
799
800 for name, value in einfo.items(constants.INISECT_BEP):
801 if name not in self.op.beparams:
802 self.op.beparams[name] = value
803
804 if name == constants.BE_MEMORY:
805 if constants.BE_MAXMEM not in self.op.beparams:
806 self.op.beparams[constants.BE_MAXMEM] = value
807 if constants.BE_MINMEM not in self.op.beparams:
808 self.op.beparams[constants.BE_MINMEM] = value
809 else:
810
811 for name in constants.BES_PARAMETERS:
812 if (name not in self.op.beparams and
813 einfo.has_option(constants.INISECT_INS, name)):
814 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
815
816 if einfo.has_section(constants.INISECT_OSP):
817
818 for name, value in einfo.items(constants.INISECT_OSP):
819 if name not in self.op.osparams:
820 self.op.osparams[name] = value
821
847
849 """Calculate final instance file storage dir.
850
851 """
852
853 self.instance_file_storage_dir = None
854 if self.op.disk_template in constants.DTS_FILEBASED:
855
856 joinargs = []
857
858 if self.op.disk_template == constants.DT_SHARED_FILE:
859 get_fsd_fn = self.cfg.GetSharedFileStorageDir
860 else:
861 get_fsd_fn = self.cfg.GetFileStorageDir
862
863 cfg_storagedir = get_fsd_fn()
864 if not cfg_storagedir:
865 raise errors.OpPrereqError("Cluster file storage dir not defined",
866 errors.ECODE_STATE)
867 joinargs.append(cfg_storagedir)
868
869 if self.op.file_storage_dir is not None:
870 joinargs.append(self.op.file_storage_dir)
871
872 joinargs.append(self.op.instance_name)
873
874
875 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
876
878 """Check prerequisites.
879
880 """
881
882
883 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
884 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
885 cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
886 if not owned_groups.issuperset(cur_groups):
887 raise errors.OpPrereqError("New instance %s's node groups changed since"
888 " locks were acquired, current groups are"
889 " are '%s', owning groups '%s'; retry the"
890 " operation" %
891 (self.op.instance_name,
892 utils.CommaJoin(cur_groups),
893 utils.CommaJoin(owned_groups)),
894 errors.ECODE_STATE)
895
896 self._CalculateFileStorageDir()
897
898 if self.op.mode == constants.INSTANCE_IMPORT:
899 export_info = self._ReadExportInfo()
900 self._ReadExportParams(export_info)
901 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
902 else:
903 self._old_instance_name = None
904
905 if (not self.cfg.GetVGName() and
906 self.op.disk_template not in constants.DTS_NOT_LVM):
907 raise errors.OpPrereqError("Cluster does not support lvm-based"
908 " instances", errors.ECODE_STATE)
909
910 if (self.op.hypervisor is None or
911 self.op.hypervisor == constants.VALUE_AUTO):
912 self.op.hypervisor = self.cfg.GetHypervisorType()
913
914 cluster = self.cfg.GetClusterInfo()
915 enabled_hvs = cluster.enabled_hypervisors
916 if self.op.hypervisor not in enabled_hvs:
917 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
918 " cluster (%s)" %
919 (self.op.hypervisor, ",".join(enabled_hvs)),
920 errors.ECODE_STATE)
921
922
923 for tag in self.op.tags:
924 objects.TaggableObject.ValidateTag(tag)
925
926
927 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
928 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
929 self.op.hvparams)
930 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
931 hv_type.CheckParameterSyntax(filled_hvp)
932 self.hv_full = filled_hvp
933
934 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
935 "instance", "cluster")
936
937
938 self.be_full = _ComputeFullBeParams(self.op, cluster)
939
940
941 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
942
943
944
945 if self.op.identify_defaults:
946 self._RevertToDefaults(cluster)
947
948
949 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
950 self.proc.GetECId())
951
952
953 default_vg = self.cfg.GetVGName()
954 self.disks = ComputeDisks(self.op, default_vg)
955
956 if self.op.mode == constants.INSTANCE_IMPORT:
957 disk_images = []
958 for idx in range(len(self.disks)):
959 option = "disk%d_dump" % idx
960 if export_info.has_option(constants.INISECT_INS, option):
961
962 export_name = export_info.get(constants.INISECT_INS, option)
963 image = utils.PathJoin(self.op.src_path, export_name)
964 disk_images.append(image)
965 else:
966 disk_images.append(False)
967
968 self.src_images = disk_images
969
970 if self.op.instance_name == self._old_instance_name:
971 for idx, nic in enumerate(self.nics):
972 if nic.mac == constants.VALUE_AUTO:
973 nic_mac_ini = "nic%d_mac" % idx
974 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
975
976
977
978
979 if self.op.ip_check:
980 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
981 raise errors.OpPrereqError("IP %s of instance %s already in use" %
982 (self.check_ip, self.op.instance_name),
983 errors.ECODE_NOTUNIQUE)
984
985
986
987
988
989
990
991
992
993 for nic in self.nics:
994 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
995 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
996
997
998
999 if self.op.iallocator is not None:
1000 self._RunAllocator()
1001
1002
1003 keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
1004 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1005 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1006 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1007
1008 ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1009 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1010
1011 assert (self.owned_locks(locking.LEVEL_NODE) ==
1012 self.owned_locks(locking.LEVEL_NODE_RES)), \
1013 "Node locks differ from node resource locks"
1014
1015
1016
1017
1018 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
1019 assert self.pnode is not None, \
1020 "Cannot retrieve locked node %s" % self.op.pnode
1021 if pnode.offline:
1022 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1023 pnode.name, errors.ECODE_STATE)
1024 if pnode.drained:
1025 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1026 pnode.name, errors.ECODE_STATE)
1027 if not pnode.vm_capable:
1028 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1029 " '%s'" % pnode.name, errors.ECODE_STATE)
1030
1031 self.secondaries = []
1032
1033
1034
1035 for idx, nic in enumerate(self.nics):
1036 net_uuid = nic.network
1037 if net_uuid is not None:
1038 nobj = self.cfg.GetNetwork(net_uuid)
1039 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
1040 if netparams is None:
1041 raise errors.OpPrereqError("No netparams found for network"
1042 " %s. Propably not connected to"
1043 " node's %s nodegroup" %
1044 (nobj.name, self.pnode.name),
1045 errors.ECODE_INVAL)
1046 self.LogInfo("NIC/%d inherits netparams %s" %
1047 (idx, netparams.values()))
1048 nic.nicparams = dict(netparams)
1049 if nic.ip is not None:
1050 if nic.ip.lower() == constants.NIC_IP_POOL:
1051 try:
1052 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1053 except errors.ReservationError:
1054 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1055 " from the address pool" % idx,
1056 errors.ECODE_STATE)
1057 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1058 else:
1059 try:
1060 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1061 except errors.ReservationError:
1062 raise errors.OpPrereqError("IP address %s already in use"
1063 " or does not belong to network %s" %
1064 (nic.ip, nobj.name),
1065 errors.ECODE_NOTUNIQUE)
1066
1067
1068 elif self.op.conflicts_check:
1069 _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1070
1071
1072 if self.op.disk_template in constants.DTS_INT_MIRROR:
1073 if self.op.snode == pnode.name:
1074 raise errors.OpPrereqError("The secondary node cannot be the"
1075 " primary node", errors.ECODE_INVAL)
1076 CheckNodeOnline(self, self.op.snode)
1077 CheckNodeNotDrained(self, self.op.snode)
1078 CheckNodeVmCapable(self, self.op.snode)
1079 self.secondaries.append(self.op.snode)
1080
1081 snode = self.cfg.GetNodeInfo(self.op.snode)
1082 if pnode.group != snode.group:
1083 self.LogWarning("The primary and secondary nodes are in two"
1084 " different node groups; the disk parameters"
1085 " from the first disk's node group will be"
1086 " used")
1087
1088 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1089 nodes = [pnode]
1090 if self.op.disk_template in constants.DTS_INT_MIRROR:
1091 nodes.append(snode)
1092 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1093 if compat.any(map(has_es, nodes)):
1094 raise errors.OpPrereqError("Disk template %s not supported with"
1095 " exclusive storage" % self.op.disk_template,
1096 errors.ECODE_STATE)
1097
1098 nodenames = [pnode.name] + self.secondaries
1099
1100 if not self.adopt_disks:
1101 if self.op.disk_template == constants.DT_RBD:
1102
1103
1104
1105 CheckRADOSFreeSpace()
1106 elif self.op.disk_template == constants.DT_EXT:
1107
1108 pass
1109 else:
1110
1111 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1112 CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1113
1114 elif self.op.disk_template == constants.DT_PLAIN:
1115 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1116 disk[constants.IDISK_ADOPT])
1117 for disk in self.disks])
1118 if len(all_lvs) != len(self.disks):
1119 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1120 errors.ECODE_INVAL)
1121 for lv_name in all_lvs:
1122 try:
1123
1124
1125 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1126 except errors.ReservationError:
1127 raise errors.OpPrereqError("LV named %s used by another instance" %
1128 lv_name, errors.ECODE_NOTUNIQUE)
1129
1130 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1131 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1132
1133 node_lvs = self.rpc.call_lv_list([pnode.name],
1134 vg_names.payload.keys())[pnode.name]
1135 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1136 node_lvs = node_lvs.payload
1137
1138 delta = all_lvs.difference(node_lvs.keys())
1139 if delta:
1140 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1141 utils.CommaJoin(delta),
1142 errors.ECODE_INVAL)
1143 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1144 if online_lvs:
1145 raise errors.OpPrereqError("Online logical volumes found, cannot"
1146 " adopt: %s" % utils.CommaJoin(online_lvs),
1147 errors.ECODE_STATE)
1148
1149 for dsk in self.disks:
1150 dsk[constants.IDISK_SIZE] = \
1151 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1152 dsk[constants.IDISK_ADOPT])][0]))
1153
1154 elif self.op.disk_template == constants.DT_BLOCK:
1155
1156 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1157 for disk in self.disks])
1158 if len(all_disks) != len(self.disks):
1159 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1160 errors.ECODE_INVAL)
1161 baddisks = [d for d in all_disks
1162 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1163 if baddisks:
1164 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1165 " cannot be adopted" %
1166 (utils.CommaJoin(baddisks),
1167 constants.ADOPTABLE_BLOCKDEV_ROOT),
1168 errors.ECODE_INVAL)
1169
1170 node_disks = self.rpc.call_bdev_sizes([pnode.name],
1171 list(all_disks))[pnode.name]
1172 node_disks.Raise("Cannot get block device information from node %s" %
1173 pnode.name)
1174 node_disks = node_disks.payload
1175 delta = all_disks.difference(node_disks.keys())
1176 if delta:
1177 raise errors.OpPrereqError("Missing block device(s): %s" %
1178 utils.CommaJoin(delta),
1179 errors.ECODE_INVAL)
1180 for dsk in self.disks:
1181 dsk[constants.IDISK_SIZE] = \
1182 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1183
1184
1185 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1186 ispec = {
1187 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1188 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1189 constants.ISPEC_DISK_COUNT: len(self.disks),
1190 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1191 for disk in self.disks],
1192 constants.ISPEC_NIC_COUNT: len(self.nics),
1193 constants.ISPEC_SPINDLE_USE: spindle_use,
1194 }
1195
1196 group_info = self.cfg.GetNodeGroup(pnode.group)
1197 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1198 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1199 self.op.disk_template)
1200 if not self.op.ignore_ipolicy and res:
1201 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1202 (pnode.group, group_info.name, utils.CommaJoin(res)))
1203 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1204
1205 CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1206
1207 CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1208
1209 CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1210
1211 CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1212
1213
1214
1215
1216
1217
1218 if self.op.start:
1219 CheckNodeFreeMemory(self, self.pnode.name,
1220 "creating instance %s" % self.op.instance_name,
1221 self.be_full[constants.BE_MAXMEM],
1222 self.op.hypervisor)
1223
1224 self.dry_run_result = list(nodenames)
1225
1226 - def Exec(self, feedback_fn):
1227 """Create and add the instance to the cluster.
1228
1229 """
1230 instance = self.op.instance_name
1231 pnode_name = self.pnode.name
1232
1233 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1234 self.owned_locks(locking.LEVEL_NODE)), \
1235 "Node locks differ from node resource locks"
1236 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1237
1238 ht_kind = self.op.hypervisor
1239 if ht_kind in constants.HTS_REQ_PORT:
1240 network_port = self.cfg.AllocatePort()
1241 else:
1242 network_port = None
1243
1244
1245
1246
1247 node = self.cfg.GetNodeInfo(pnode_name)
1248 nodegroup = self.cfg.GetNodeGroup(node.group)
1249 disks = GenerateDiskTemplate(self,
1250 self.op.disk_template,
1251 instance, pnode_name,
1252 self.secondaries,
1253 self.disks,
1254 self.instance_file_storage_dir,
1255 self.op.file_driver,
1256 0,
1257 feedback_fn,
1258 self.cfg.GetGroupDiskParams(nodegroup))
1259
1260 iobj = objects.Instance(name=instance, os=self.op.os_type,
1261 primary_node=pnode_name,
1262 nics=self.nics, disks=disks,
1263 disk_template=self.op.disk_template,
1264 disks_active=False,
1265 admin_state=constants.ADMINST_DOWN,
1266 network_port=network_port,
1267 beparams=self.op.beparams,
1268 hvparams=self.op.hvparams,
1269 hypervisor=self.op.hypervisor,
1270 osparams=self.op.osparams,
1271 )
1272
1273 if self.op.tags:
1274 for tag in self.op.tags:
1275 iobj.AddTag(tag)
1276
1277 if self.adopt_disks:
1278 if self.op.disk_template == constants.DT_PLAIN:
1279
1280
1281 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1282 rename_to = []
1283 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1284 rename_to.append(t_dsk.logical_id)
1285 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1286 self.cfg.SetDiskID(t_dsk, pnode_name)
1287 result = self.rpc.call_blockdev_rename(pnode_name,
1288 zip(tmp_disks, rename_to))
1289 result.Raise("Failed to rename adoped LVs")
1290 else:
1291 feedback_fn("* creating instance disks...")
1292 try:
1293 CreateDisks(self, iobj)
1294 except errors.OpExecError:
1295 self.LogWarning("Device creation failed")
1296 self.cfg.ReleaseDRBDMinors(instance)
1297 raise
1298
1299 feedback_fn("adding instance %s to cluster config" % instance)
1300
1301 self.cfg.AddInstance(iobj, self.proc.GetECId())
1302
1303
1304
1305 del self.remove_locks[locking.LEVEL_INSTANCE]
1306
1307 if self.op.mode == constants.INSTANCE_IMPORT:
1308
1309 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1310 else:
1311
1312 ReleaseLocks(self, locking.LEVEL_NODE)
1313
1314 disk_abort = False
1315 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1316 feedback_fn("* wiping instance disks...")
1317 try:
1318 WipeDisks(self, iobj)
1319 except errors.OpExecError, err:
1320 logging.exception("Wiping disks failed")
1321 self.LogWarning("Wiping instance disks failed (%s)", err)
1322 disk_abort = True
1323
1324 if disk_abort:
1325
1326 pass
1327 elif self.op.wait_for_sync:
1328 disk_abort = not WaitForSync(self, iobj)
1329 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1330
1331 feedback_fn("* checking mirrors status")
1332 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1333 else:
1334 disk_abort = False
1335
1336 if disk_abort:
1337 RemoveDisks(self, iobj)
1338 self.cfg.RemoveInstance(iobj.name)
1339
1340 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1341 raise errors.OpExecError("There are some degraded disks for"
1342 " this instance")
1343
1344
1345 iobj.disks_active = True
1346
1347
1348 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1349
1350 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1351
1352
1353
1354 for disk in iobj.disks:
1355 self.cfg.SetDiskID(disk, pnode_name)
1356 if self.op.mode == constants.INSTANCE_CREATE:
1357 if not self.op.no_install:
1358 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1359 not self.op.wait_for_sync)
1360 if pause_sync:
1361 feedback_fn("* pausing disk sync to install instance OS")
1362 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1363 (iobj.disks,
1364 iobj), True)
1365 for idx, success in enumerate(result.payload):
1366 if not success:
1367 logging.warn("pause-sync of instance %s for disk %d failed",
1368 instance, idx)
1369
1370 feedback_fn("* running the instance OS create scripts...")
1371
1372 os_add_result = \
1373 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1374 self.op.debug_level)
1375 if pause_sync:
1376 feedback_fn("* resuming disk sync")
1377 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1378 (iobj.disks,
1379 iobj), False)
1380 for idx, success in enumerate(result.payload):
1381 if not success:
1382 logging.warn("resume-sync of instance %s for disk %d failed",
1383 instance, idx)
1384
1385 os_add_result.Raise("Could not add os for instance %s"
1386 " on node %s" % (instance, pnode_name))
1387
1388 else:
1389 if self.op.mode == constants.INSTANCE_IMPORT:
1390 feedback_fn("* running the instance OS import scripts...")
1391
1392 transfers = []
1393
1394 for idx, image in enumerate(self.src_images):
1395 if not image:
1396 continue
1397
1398
1399 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1400 constants.IEIO_FILE, (image, ),
1401 constants.IEIO_SCRIPT,
1402 (iobj.disks[idx], idx),
1403 None)
1404 transfers.append(dt)
1405
1406 import_result = \
1407 masterd.instance.TransferInstanceData(self, feedback_fn,
1408 self.op.src_node, pnode_name,
1409 self.pnode.secondary_ip,
1410 iobj, transfers)
1411 if not compat.all(import_result):
1412 self.LogWarning("Some disks for instance %s on node %s were not"
1413 " imported successfully" % (instance, pnode_name))
1414
1415 rename_from = self._old_instance_name
1416
1417 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1418 feedback_fn("* preparing remote import...")
1419
1420
1421
1422
1423 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1424 self.op.source_shutdown_timeout)
1425 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1426
1427 assert iobj.primary_node == self.pnode.name
1428 disk_results = \
1429 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1430 self.source_x509_ca,
1431 self._cds, timeouts)
1432 if not compat.all(disk_results):
1433
1434
1435 self.LogWarning("Some disks for instance %s on node %s were not"
1436 " imported successfully" % (instance, pnode_name))
1437
1438 rename_from = self.source_instance_name
1439
1440 else:
1441
1442 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1443 % self.op.mode)
1444
1445
1446 assert iobj.name == instance
1447 feedback_fn("Running rename script for %s" % instance)
1448 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1449 rename_from,
1450 self.op.debug_level)
1451 if result.fail_msg:
1452 self.LogWarning("Failed to run rename script for %s on node"
1453 " %s: %s" % (instance, pnode_name, result.fail_msg))
1454
1455 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1456
1457 if self.op.start:
1458 iobj.admin_state = constants.ADMINST_UP
1459 self.cfg.Update(iobj, feedback_fn)
1460 logging.info("Starting instance %s on node %s", instance, pnode_name)
1461 feedback_fn("* starting instance...")
1462 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1463 False, self.op.reason)
1464 result.Raise("Could not start instance")
1465
1466 return list(iobj.all_nodes)
1467
1470 """Rename an instance.
1471
1472 """
1473 HPATH = "instance-rename"
1474 HTYPE = constants.HTYPE_INSTANCE
1475
1477 """Check arguments.
1478
1479 """
1480 if self.op.ip_check and not self.op.name_check:
1481
1482 raise errors.OpPrereqError("IP address check requires a name check",
1483 errors.ECODE_INVAL)
1484
1486 """Build hooks env.
1487
1488 This runs on master, primary and secondary nodes of the instance.
1489
1490 """
1491 env = BuildInstanceHookEnvByObject(self, self.instance)
1492 env["INSTANCE_NEW_NAME"] = self.op.new_name
1493 return env
1494
1501
1503 """Check prerequisites.
1504
1505 This checks that the instance is in the cluster and is not running.
1506
1507 """
1508 self.op.instance_name = ExpandInstanceName(self.cfg,
1509 self.op.instance_name)
1510 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1511 assert instance is not None
1512 CheckNodeOnline(self, instance.primary_node)
1513 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1514 msg="cannot rename")
1515 self.instance = instance
1516
1517 new_name = self.op.new_name
1518 if self.op.name_check:
1519 hostname = _CheckHostnameSane(self, new_name)
1520 new_name = self.op.new_name = hostname.name
1521 if (self.op.ip_check and
1522 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1523 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1524 (hostname.ip, new_name),
1525 errors.ECODE_NOTUNIQUE)
1526
1527 instance_list = self.cfg.GetInstanceList()
1528 if new_name in instance_list and new_name != instance.name:
1529 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1530 new_name, errors.ECODE_EXISTS)
1531
1532 - def Exec(self, feedback_fn):
1533 """Rename the instance.
1534
1535 """
1536 inst = self.instance
1537 old_name = inst.name
1538
1539 rename_file_storage = False
1540 if (inst.disk_template in constants.DTS_FILEBASED and
1541 self.op.new_name != inst.name):
1542 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1543 rename_file_storage = True
1544
1545 self.cfg.RenameInstance(inst.name, self.op.new_name)
1546
1547
1548 assert self.REQ_BGL
1549 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1550 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1551 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1552
1553
1554 inst = self.cfg.GetInstanceInfo(self.op.new_name)
1555
1556 if rename_file_storage:
1557 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1558 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1559 old_file_storage_dir,
1560 new_file_storage_dir)
1561 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1562 " (but the instance has been renamed in Ganeti)" %
1563 (inst.primary_node, old_file_storage_dir,
1564 new_file_storage_dir))
1565
1566 StartInstanceDisks(self, inst, None)
1567
1568 info = GetInstanceInfoText(inst)
1569 for (idx, disk) in enumerate(inst.disks):
1570 for node in inst.all_nodes:
1571 self.cfg.SetDiskID(disk, node)
1572 result = self.rpc.call_blockdev_setinfo(node, disk, info)
1573 if result.fail_msg:
1574 self.LogWarning("Error setting info on node %s for disk %s: %s",
1575 node, idx, result.fail_msg)
1576 try:
1577 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1578 old_name, self.op.debug_level)
1579 msg = result.fail_msg
1580 if msg:
1581 msg = ("Could not run OS rename script for instance %s on node %s"
1582 " (but the instance has been renamed in Ganeti): %s" %
1583 (inst.name, inst.primary_node, msg))
1584 self.LogWarning(msg)
1585 finally:
1586 ShutdownInstanceDisks(self, inst)
1587
1588 return inst.name
1589
1592 """Remove an instance.
1593
1594 """
1595 HPATH = "instance-remove"
1596 HTYPE = constants.HTYPE_INSTANCE
1597 REQ_BGL = False
1598
1604
1612
1614 """Build hooks env.
1615
1616 This runs on master, primary and secondary nodes of the instance.
1617
1618 """
1619 env = BuildInstanceHookEnvByObject(self, self.instance)
1620 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1621 return env
1622
1624 """Build hooks nodes.
1625
1626 """
1627 nl = [self.cfg.GetMasterNode()]
1628 nl_post = list(self.instance.all_nodes) + nl
1629 return (nl, nl_post)
1630
1632 """Check prerequisites.
1633
1634 This checks that the instance is in the cluster.
1635
1636 """
1637 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1638 assert self.instance is not None, \
1639 "Cannot retrieve locked instance %s" % self.op.instance_name
1640
1641 - def Exec(self, feedback_fn):
1642 """Remove the instance.
1643
1644 """
1645 instance = self.instance
1646 logging.info("Shutting down instance %s on node %s",
1647 instance.name, instance.primary_node)
1648
1649 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1650 self.op.shutdown_timeout,
1651 self.op.reason)
1652 msg = result.fail_msg
1653 if msg:
1654 if self.op.ignore_failures:
1655 feedback_fn("Warning: can't shutdown instance: %s" % msg)
1656 else:
1657 raise errors.OpExecError("Could not shutdown instance %s on"
1658 " node %s: %s" %
1659 (instance.name, instance.primary_node, msg))
1660
1661 assert (self.owned_locks(locking.LEVEL_NODE) ==
1662 self.owned_locks(locking.LEVEL_NODE_RES))
1663 assert not (set(instance.all_nodes) -
1664 self.owned_locks(locking.LEVEL_NODE)), \
1665 "Not owning correct locks"
1666
1667 RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1668
1671 """Move an instance by data-copying.
1672
1673 """
1674 HPATH = "instance-move"
1675 HTYPE = constants.HTYPE_INSTANCE
1676 REQ_BGL = False
1677
1685
1693
1695 """Build hooks env.
1696
1697 This runs on master, primary and secondary nodes of the instance.
1698
1699 """
1700 env = {
1701 "TARGET_NODE": self.op.target_node,
1702 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1703 }
1704 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1705 return env
1706
1708 """Build hooks nodes.
1709
1710 """
1711 nl = [
1712 self.cfg.GetMasterNode(),
1713 self.instance.primary_node,
1714 self.op.target_node,
1715 ]
1716 return (nl, nl)
1717
1719 """Check prerequisites.
1720
1721 This checks that the instance is in the cluster.
1722
1723 """
1724 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1725 assert self.instance is not None, \
1726 "Cannot retrieve locked instance %s" % self.op.instance_name
1727
1728 if instance.disk_template not in constants.DTS_COPYABLE:
1729 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1730 instance.disk_template, errors.ECODE_STATE)
1731
1732 node = self.cfg.GetNodeInfo(self.op.target_node)
1733 assert node is not None, \
1734 "Cannot retrieve locked node %s" % self.op.target_node
1735
1736 self.target_node = target_node = node.name
1737
1738 if target_node == instance.primary_node:
1739 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1740 (instance.name, target_node),
1741 errors.ECODE_STATE)
1742
1743 bep = self.cfg.GetClusterInfo().FillBE(instance)
1744
1745 for idx, dsk in enumerate(instance.disks):
1746 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1747 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1748 " cannot copy" % idx, errors.ECODE_STATE)
1749
1750 CheckNodeOnline(self, target_node)
1751 CheckNodeNotDrained(self, target_node)
1752 CheckNodeVmCapable(self, target_node)
1753 cluster = self.cfg.GetClusterInfo()
1754 group_info = self.cfg.GetNodeGroup(node.group)
1755 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1756 CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1757 ignore=self.op.ignore_ipolicy)
1758
1759 if instance.admin_state == constants.ADMINST_UP:
1760
1761 CheckNodeFreeMemory(self, target_node,
1762 "failing over instance %s" %
1763 instance.name, bep[constants.BE_MAXMEM],
1764 instance.hypervisor)
1765 else:
1766 self.LogInfo("Not checking memory on the secondary node as"
1767 " instance will not be started")
1768
1769
1770 CheckInstanceBridgesExist(self, instance, node=target_node)
1771
1772 - def Exec(self, feedback_fn):
1773 """Move an instance.
1774
1775 The move is done by shutting it down on its present node, copying
1776 the data over (slow) and starting it on the new node.
1777
1778 """
1779 instance = self.instance
1780
1781 source_node = instance.primary_node
1782 target_node = self.target_node
1783
1784 self.LogInfo("Shutting down instance %s on source node %s",
1785 instance.name, source_node)
1786
1787 assert (self.owned_locks(locking.LEVEL_NODE) ==
1788 self.owned_locks(locking.LEVEL_NODE_RES))
1789
1790 result = self.rpc.call_instance_shutdown(source_node, instance,
1791 self.op.shutdown_timeout,
1792 self.op.reason)
1793 msg = result.fail_msg
1794 if msg:
1795 if self.op.ignore_consistency:
1796 self.LogWarning("Could not shutdown instance %s on node %s."
1797 " Proceeding anyway. Please make sure node"
1798 " %s is down. Error details: %s",
1799 instance.name, source_node, source_node, msg)
1800 else:
1801 raise errors.OpExecError("Could not shutdown instance %s on"
1802 " node %s: %s" %
1803 (instance.name, source_node, msg))
1804
1805
1806 try:
1807 CreateDisks(self, instance, target_node=target_node)
1808 except errors.OpExecError:
1809 self.LogWarning("Device creation failed")
1810 self.cfg.ReleaseDRBDMinors(instance.name)
1811 raise
1812
1813 cluster_name = self.cfg.GetClusterInfo().cluster_name
1814
1815 errs = []
1816
1817 for idx, disk in enumerate(instance.disks):
1818 self.LogInfo("Copying data for disk %d", idx)
1819 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1820 instance.name, True, idx)
1821 if result.fail_msg:
1822 self.LogWarning("Can't assemble newly created disk %d: %s",
1823 idx, result.fail_msg)
1824 errs.append(result.fail_msg)
1825 break
1826 dev_path = result.payload
1827 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1828 target_node, dev_path,
1829 cluster_name)
1830 if result.fail_msg:
1831 self.LogWarning("Can't copy data over for disk %d: %s",
1832 idx, result.fail_msg)
1833 errs.append(result.fail_msg)
1834 break
1835
1836 if errs:
1837 self.LogWarning("Some disks failed to copy, aborting")
1838 try:
1839 RemoveDisks(self, instance, target_node=target_node)
1840 finally:
1841 self.cfg.ReleaseDRBDMinors(instance.name)
1842 raise errors.OpExecError("Errors during disk copy: %s" %
1843 (",".join(errs),))
1844
1845 instance.primary_node = target_node
1846 self.cfg.Update(instance, feedback_fn)
1847
1848 self.LogInfo("Removing the disks on the original node")
1849 RemoveDisks(self, instance, target_node=source_node)
1850
1851
1852 if instance.admin_state == constants.ADMINST_UP:
1853 self.LogInfo("Starting instance %s on node %s",
1854 instance.name, target_node)
1855
1856 disks_ok, _ = AssembleInstanceDisks(self, instance,
1857 ignore_secondaries=True)
1858 if not disks_ok:
1859 ShutdownInstanceDisks(self, instance)
1860 raise errors.OpExecError("Can't activate the instance's disks")
1861
1862 result = self.rpc.call_instance_start(target_node,
1863 (instance, None, None), False,
1864 self.op.reason)
1865 msg = result.fail_msg
1866 if msg:
1867 ShutdownInstanceDisks(self, instance)
1868 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1869 (instance.name, target_node, msg))
1870
1873 """Allocates multiple instances at the same time.
1874
1875 """
1876 REQ_BGL = False
1877
1879 """Check arguments.
1880
1881 """
1882 nodes = []
1883 for inst in self.op.instances:
1884 if inst.iallocator is not None:
1885 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1886 " instance objects", errors.ECODE_INVAL)
1887 nodes.append(bool(inst.pnode))
1888 if inst.disk_template in constants.DTS_INT_MIRROR:
1889 nodes.append(bool(inst.snode))
1890
1891 has_nodes = compat.any(nodes)
1892 if compat.all(nodes) ^ has_nodes:
1893 raise errors.OpPrereqError("There are instance objects providing"
1894 " pnode/snode while others do not",
1895 errors.ECODE_INVAL)
1896
1897 if not has_nodes and self.op.iallocator is None:
1898 default_iallocator = self.cfg.GetDefaultIAllocator()
1899 if default_iallocator:
1900 self.op.iallocator = default_iallocator
1901 else:
1902 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1903 " given and no cluster-wide default"
1904 " iallocator found; please specify either"
1905 " an iallocator or nodes on the instances"
1906 " or set a cluster-wide default iallocator",
1907 errors.ECODE_INVAL)
1908
1909 _CheckOpportunisticLocking(self.op)
1910
1911 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1912 if dups:
1913 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1914 utils.CommaJoin(dups), errors.ECODE_INVAL)
1915
1917 """Calculate the locks.
1918
1919 """
1920 self.share_locks = ShareAll()
1921 self.needed_locks = {
1922
1923
1924 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1925 }
1926
1927 if self.op.iallocator:
1928 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1929 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1930
1931 if self.op.opportunistic_locking:
1932 self.opportunistic_locks[locking.LEVEL_NODE] = True
1933 else:
1934 nodeslist = []
1935 for inst in self.op.instances:
1936 inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1937 nodeslist.append(inst.pnode)
1938 if inst.snode is not None:
1939 inst.snode = ExpandNodeName(self.cfg, inst.snode)
1940 nodeslist.append(inst.snode)
1941
1942 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1943
1944
1945 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1946
1948 if level == locking.LEVEL_NODE_RES and \
1949 self.opportunistic_locks[locking.LEVEL_NODE]:
1950
1951
1952 self.needed_locks[locking.LEVEL_NODE_RES] = \
1953 self.owned_locks(locking.LEVEL_NODE)
1954
1956 """Check prerequisite.
1957
1958 """
1959 if self.op.iallocator:
1960 cluster = self.cfg.GetClusterInfo()
1961 default_vg = self.cfg.GetVGName()
1962 ec_id = self.proc.GetECId()
1963
1964 if self.op.opportunistic_locking:
1965
1966 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1967 else:
1968 node_whitelist = None
1969
1970 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1971 _ComputeNics(op, cluster, None,
1972 self.cfg, ec_id),
1973 _ComputeFullBeParams(op, cluster),
1974 node_whitelist)
1975 for op in self.op.instances]
1976
1977 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1978 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1979
1980 ial.Run(self.op.iallocator)
1981
1982 if not ial.success:
1983 raise errors.OpPrereqError("Can't compute nodes using"
1984 " iallocator '%s': %s" %
1985 (self.op.iallocator, ial.info),
1986 errors.ECODE_NORES)
1987
1988 self.ia_result = ial.result
1989
1990 if self.op.dry_run:
1991 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1992 constants.JOB_IDS_KEY: [],
1993 })
1994
2010
2011 - def Exec(self, feedback_fn):
2012 """Executes the opcode.
2013
2014 """
2015 jobs = []
2016 if self.op.iallocator:
2017 op2inst = dict((op.instance_name, op) for op in self.op.instances)
2018 (allocatable, failed) = self.ia_result
2019
2020 for (name, nodes) in allocatable:
2021 op = op2inst.pop(name)
2022
2023 if len(nodes) > 1:
2024 (op.pnode, op.snode) = nodes
2025 else:
2026 (op.pnode,) = nodes
2027
2028 jobs.append([op])
2029
2030 missing = set(op2inst.keys()) - set(failed)
2031 assert not missing, \
2032 "Iallocator did return incomplete result: %s" % \
2033 utils.CommaJoin(missing)
2034 else:
2035 jobs.extend([op] for op in self.op.instances)
2036
2037 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2038
2041 """Data structure for network interface modifications.
2042
2043 Used by L{LUInstanceSetParams}.
2044
2045 """
2047 self.params = None
2048 self.filled = None
2049
2052 """Prepares a list of container modifications by adding a private data field.
2053
2054 @type mods: list of tuples; (operation, index, parameters)
2055 @param mods: List of modifications
2056 @type private_fn: callable or None
2057 @param private_fn: Callable for constructing a private data field for a
2058 modification
2059 @rtype: list
2060
2061 """
2062 if private_fn is None:
2063 fn = lambda: None
2064 else:
2065 fn = private_fn
2066
2067 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2068
2071 """Checks if nodes have enough physical CPUs
2072
2073 This function checks if all given nodes have the needed number of
2074 physical CPUs. In case any node has less CPUs or we cannot get the
2075 information from the node, this function raises an OpPrereqError
2076 exception.
2077
2078 @type lu: C{LogicalUnit}
2079 @param lu: a logical unit from which we get configuration data
2080 @type nodenames: C{list}
2081 @param nodenames: the list of node names to check
2082 @type requested: C{int}
2083 @param requested: the minimum acceptable number of physical CPUs
2084 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2085 or we cannot check the node
2086
2087 """
2088 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2089 for node in nodenames:
2090 info = nodeinfo[node]
2091 info.Raise("Cannot get current information from node %s" % node,
2092 prereq=True, ecode=errors.ECODE_ENVIRON)
2093 (_, _, (hv_info, )) = info.payload
2094 num_cpus = hv_info.get("cpu_total", None)
2095 if not isinstance(num_cpus, int):
2096 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2097 " on node %s, result was '%s'" %
2098 (node, num_cpus), errors.ECODE_ENVIRON)
2099 if requested > num_cpus:
2100 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2101 "required" % (node, num_cpus, requested),
2102 errors.ECODE_NORES)
2103
2106 """Return the item refered by the identifier.
2107
2108 @type identifier: string
2109 @param identifier: Item index or name or UUID
2110 @type kind: string
2111 @param kind: One-word item description
2112 @type container: list
2113 @param container: Container to get the item from
2114
2115 """
2116
2117 try:
2118 idx = int(identifier)
2119 if idx == -1:
2120
2121 absidx = len(container) - 1
2122 elif idx < 0:
2123 raise IndexError("Not accepting negative indices other than -1")
2124 elif idx > len(container):
2125 raise IndexError("Got %s index %s, but there are only %s" %
2126 (kind, idx, len(container)))
2127 else:
2128 absidx = idx
2129 return (absidx, container[idx])
2130 except ValueError:
2131 pass
2132
2133 for idx, item in enumerate(container):
2134 if item.uuid == identifier or item.name == identifier:
2135 return (idx, item)
2136
2137 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2138 (kind, identifier), errors.ECODE_NOENT)
2139
2140
2141 -def _ApplyContainerMods(kind, container, chgdesc, mods,
2142 create_fn, modify_fn, remove_fn):
2143 """Applies descriptions in C{mods} to C{container}.
2144
2145 @type kind: string
2146 @param kind: One-word item description
2147 @type container: list
2148 @param container: Container to modify
2149 @type chgdesc: None or list
2150 @param chgdesc: List of applied changes
2151 @type mods: list
2152 @param mods: Modifications as returned by L{_PrepareContainerMods}
2153 @type create_fn: callable
2154 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2155 receives absolute item index, parameters and private data object as added
2156 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2157 as list
2158 @type modify_fn: callable
2159 @param modify_fn: Callback for modifying an existing item
2160 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2161 and private data object as added by L{_PrepareContainerMods}, returns
2162 changes as list
2163 @type remove_fn: callable
2164 @param remove_fn: Callback on removing item; receives absolute item index,
2165 item and private data object as added by L{_PrepareContainerMods}
2166
2167 """
2168 for (op, identifier, params, private) in mods:
2169 changes = None
2170
2171 if op == constants.DDM_ADD:
2172
2173
2174 try:
2175 idx = int(identifier)
2176 except ValueError:
2177 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2178 " identifier for %s" % constants.DDM_ADD,
2179 errors.ECODE_INVAL)
2180 if idx == -1:
2181 addidx = len(container)
2182 else:
2183 if idx < 0:
2184 raise IndexError("Not accepting negative indices other than -1")
2185 elif idx > len(container):
2186 raise IndexError("Got %s index %s, but there are only %s" %
2187 (kind, idx, len(container)))
2188 addidx = idx
2189
2190 if create_fn is None:
2191 item = params
2192 else:
2193 (item, changes) = create_fn(addidx, params, private)
2194
2195 if idx == -1:
2196 container.append(item)
2197 else:
2198 assert idx >= 0
2199 assert idx <= len(container)
2200
2201 container.insert(idx, item)
2202 else:
2203
2204 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2205
2206 if op == constants.DDM_REMOVE:
2207 assert not params
2208
2209 if remove_fn is not None:
2210 remove_fn(absidx, item, private)
2211
2212 changes = [("%s/%s" % (kind, absidx), "remove")]
2213
2214 assert container[absidx] == item
2215 del container[absidx]
2216 elif op == constants.DDM_MODIFY:
2217 if modify_fn is not None:
2218 changes = modify_fn(absidx, item, params, private)
2219 else:
2220 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2221
2222 assert _TApplyContModsCbChanges(changes)
2223
2224 if not (chgdesc is None or changes is None):
2225 chgdesc.extend(changes)
2226
2229 """Updates the C{iv_name} attribute of disks.
2230
2231 @type disks: list of L{objects.Disk}
2232
2233 """
2234 for (idx, disk) in enumerate(disks):
2235 disk.iv_name = "disk/%s" % (base_index + idx, )
2236
2239 """Modifies an instances's parameters.
2240
2241 """
2242 HPATH = "instance-modify"
2243 HTYPE = constants.HTYPE_INSTANCE
2244 REQ_BGL = False
2245
2246 @staticmethod
2248 assert ht.TList(mods)
2249 assert not mods or len(mods[0]) in (2, 3)
2250
2251 if mods and len(mods[0]) == 2:
2252 result = []
2253
2254 addremove = 0
2255 for op, params in mods:
2256 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2257 result.append((op, -1, params))
2258 addremove += 1
2259
2260 if addremove > 1:
2261 raise errors.OpPrereqError("Only one %s add or remove operation is"
2262 " supported at a time" % kind,
2263 errors.ECODE_INVAL)
2264 else:
2265 result.append((constants.DDM_MODIFY, op, params))
2266
2267 assert verify_fn(result)
2268 else:
2269 result = mods
2270
2271 return result
2272
2273 @staticmethod
2295
2297 """Verifies a disk modification.
2298
2299 """
2300 if op == constants.DDM_ADD:
2301 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2302 if mode not in constants.DISK_ACCESS_SET:
2303 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2304 errors.ECODE_INVAL)
2305
2306 size = params.get(constants.IDISK_SIZE, None)
2307 if size is None:
2308 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2309 constants.IDISK_SIZE, errors.ECODE_INVAL)
2310
2311 try:
2312 size = int(size)
2313 except (TypeError, ValueError), err:
2314 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2315 errors.ECODE_INVAL)
2316
2317 params[constants.IDISK_SIZE] = size
2318 name = params.get(constants.IDISK_NAME, None)
2319 if name is not None and name.lower() == constants.VALUE_NONE:
2320 params[constants.IDISK_NAME] = None
2321
2322 elif op == constants.DDM_MODIFY:
2323 if constants.IDISK_SIZE in params:
2324 raise errors.OpPrereqError("Disk size change not possible, use"
2325 " grow-disk", errors.ECODE_INVAL)
2326
2327
2328
2329 if self.instance.disk_template != constants.DT_EXT:
2330 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2331
2332 name = params.get(constants.IDISK_NAME, None)
2333 if name is not None and name.lower() == constants.VALUE_NONE:
2334 params[constants.IDISK_NAME] = None
2335
2336 @staticmethod
2338 """Verifies a network interface modification.
2339
2340 """
2341 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2342 ip = params.get(constants.INIC_IP, None)
2343 name = params.get(constants.INIC_NAME, None)
2344 req_net = params.get(constants.INIC_NETWORK, None)
2345 link = params.get(constants.NIC_LINK, None)
2346 mode = params.get(constants.NIC_MODE, None)
2347 if name is not None and name.lower() == constants.VALUE_NONE:
2348 params[constants.INIC_NAME] = None
2349 if req_net is not None:
2350 if req_net.lower() == constants.VALUE_NONE:
2351 params[constants.INIC_NETWORK] = None
2352 req_net = None
2353 elif link is not None or mode is not None:
2354 raise errors.OpPrereqError("If network is given"
2355 " mode or link should not",
2356 errors.ECODE_INVAL)
2357
2358 if op == constants.DDM_ADD:
2359 macaddr = params.get(constants.INIC_MAC, None)
2360 if macaddr is None:
2361 params[constants.INIC_MAC] = constants.VALUE_AUTO
2362
2363 if ip is not None:
2364 if ip.lower() == constants.VALUE_NONE:
2365 params[constants.INIC_IP] = None
2366 else:
2367 if ip.lower() == constants.NIC_IP_POOL:
2368 if op == constants.DDM_ADD and req_net is None:
2369 raise errors.OpPrereqError("If ip=pool, parameter network"
2370 " cannot be none",
2371 errors.ECODE_INVAL)
2372 else:
2373 if not netutils.IPAddress.IsValid(ip):
2374 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2375 errors.ECODE_INVAL)
2376
2377 if constants.INIC_MAC in params:
2378 macaddr = params[constants.INIC_MAC]
2379 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2380 macaddr = utils.NormalizeAndValidateMac(macaddr)
2381
2382 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2383 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2384 " modifying an existing NIC",
2385 errors.ECODE_INVAL)
2386
2388 if not (self.op.nics or self.op.disks or self.op.disk_template or
2389 self.op.hvparams or self.op.beparams or self.op.os_name or
2390 self.op.osparams or self.op.offline is not None or
2391 self.op.runtime_mem or self.op.pnode):
2392 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2393
2394 if self.op.hvparams:
2395 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2396 "hypervisor", "instance", "cluster")
2397
2398 self.op.disks = self._UpgradeDiskNicMods(
2399 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2400 self.op.nics = self._UpgradeDiskNicMods(
2401 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2402
2403 if self.op.disks and self.op.disk_template is not None:
2404 raise errors.OpPrereqError("Disk template conversion and other disk"
2405 " changes not supported at the same time",
2406 errors.ECODE_INVAL)
2407
2408 if (self.op.disk_template and
2409 self.op.disk_template in constants.DTS_INT_MIRROR and
2410 self.op.remote_node is None):
2411 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2412 " one requires specifying a secondary node",
2413 errors.ECODE_INVAL)
2414
2415
2416 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2417 self._VerifyNicModification)
2418
2419 if self.op.pnode:
2420 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2421
2432
2434 if level == locking.LEVEL_NODEGROUP:
2435 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2436
2437
2438 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2439 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2440 elif level == locking.LEVEL_NODE:
2441 self._LockInstancesNodes()
2442 if self.op.disk_template and self.op.remote_node:
2443 self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2444 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2445 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2446
2447 self.needed_locks[locking.LEVEL_NODE_RES] = \
2448 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2449
2484
2491
2494
2495 update_params_dict = dict([(key, params[key])
2496 for key in constants.NICS_PARAMETERS
2497 if key in params])
2498
2499 req_link = update_params_dict.get(constants.NIC_LINK, None)
2500 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2501
2502 new_net_uuid = None
2503 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2504 if new_net_uuid_or_name:
2505 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2506 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2507
2508 if old_net_uuid:
2509 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2510
2511 if new_net_uuid:
2512 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode)
2513 if not netparams:
2514 raise errors.OpPrereqError("No netparams found for the network"
2515 " %s, probably not connected" %
2516 new_net_obj.name, errors.ECODE_INVAL)
2517 new_params = dict(netparams)
2518 else:
2519 new_params = GetUpdatedParams(old_params, update_params_dict)
2520
2521 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2522
2523 new_filled_params = cluster.SimpleFillNIC(new_params)
2524 objects.NIC.CheckParameterSyntax(new_filled_params)
2525
2526 new_mode = new_filled_params[constants.NIC_MODE]
2527 if new_mode == constants.NIC_MODE_BRIDGED:
2528 bridge = new_filled_params[constants.NIC_LINK]
2529 msg = self.rpc.call_bridges_exist(pnode, [bridge]).fail_msg
2530 if msg:
2531 msg = "Error checking bridges on node '%s': %s" % (pnode, msg)
2532 if self.op.force:
2533 self.warn.append(msg)
2534 else:
2535 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2536
2537 elif new_mode == constants.NIC_MODE_ROUTED:
2538 ip = params.get(constants.INIC_IP, old_ip)
2539 if ip is None:
2540 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2541 " on a routed NIC", errors.ECODE_INVAL)
2542
2543 elif new_mode == constants.NIC_MODE_OVS:
2544
2545 self.LogInfo("OVS links are currently not checked for correctness")
2546
2547 if constants.INIC_MAC in params:
2548 mac = params[constants.INIC_MAC]
2549 if mac is None:
2550 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2551 errors.ECODE_INVAL)
2552 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2553
2554 params[constants.INIC_MAC] = \
2555 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2556 else:
2557
2558 try:
2559 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2560 except errors.ReservationError:
2561 raise errors.OpPrereqError("MAC address '%s' already in use"
2562 " in cluster" % mac,
2563 errors.ECODE_NOTUNIQUE)
2564 elif new_net_uuid != old_net_uuid:
2565
2566 def get_net_prefix(net_uuid):
2567 mac_prefix = None
2568 if net_uuid:
2569 nobj = self.cfg.GetNetwork(net_uuid)
2570 mac_prefix = nobj.mac_prefix
2571
2572 return mac_prefix
2573
2574 new_prefix = get_net_prefix(new_net_uuid)
2575 old_prefix = get_net_prefix(old_net_uuid)
2576 if old_prefix != new_prefix:
2577 params[constants.INIC_MAC] = \
2578 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2579
2580
2581 new_ip = params.get(constants.INIC_IP, old_ip)
2582 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2583 if new_ip:
2584
2585 if new_ip.lower() == constants.NIC_IP_POOL:
2586 if new_net_uuid:
2587 try:
2588 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2589 except errors.ReservationError:
2590 raise errors.OpPrereqError("Unable to get a free IP"
2591 " from the address pool",
2592 errors.ECODE_STATE)
2593 self.LogInfo("Chose IP %s from network %s",
2594 new_ip,
2595 new_net_obj.name)
2596 params[constants.INIC_IP] = new_ip
2597 else:
2598 raise errors.OpPrereqError("ip=pool, but no network found",
2599 errors.ECODE_INVAL)
2600
2601 elif new_net_uuid:
2602 try:
2603 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2604 self.LogInfo("Reserving IP %s in network %s",
2605 new_ip, new_net_obj.name)
2606 except errors.ReservationError:
2607 raise errors.OpPrereqError("IP %s not available in network %s" %
2608 (new_ip, new_net_obj.name),
2609 errors.ECODE_NOTUNIQUE)
2610
2611 elif self.op.conflicts_check:
2612 _CheckForConflictingIp(self, new_ip, pnode)
2613
2614
2615 if old_ip and old_net_uuid:
2616 try:
2617 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2618 except errors.AddressPoolError:
2619 logging.warning("Release IP %s not contained in network %s",
2620 old_ip, old_net_obj.name)
2621
2622
2623 elif (old_net_uuid is not None and
2624 (req_link is not None or req_mode is not None)):
2625 raise errors.OpPrereqError("Not allowed to change link or mode of"
2626 " a NIC that is connected to a network",
2627 errors.ECODE_INVAL)
2628
2629 private.params = new_params
2630 private.filled = new_filled_params
2631
2633 """CheckPrereq checks related to a new disk template."""
2634
2635 instance = self.instance
2636 pnode = instance.primary_node
2637 cluster = self.cluster
2638 if instance.disk_template == self.op.disk_template:
2639 raise errors.OpPrereqError("Instance already has disk template %s" %
2640 instance.disk_template, errors.ECODE_INVAL)
2641
2642 if (instance.disk_template,
2643 self.op.disk_template) not in self._DISK_CONVERSIONS:
2644 raise errors.OpPrereqError("Unsupported disk template conversion from"
2645 " %s to %s" % (instance.disk_template,
2646 self.op.disk_template),
2647 errors.ECODE_INVAL)
2648 CheckInstanceState(self, instance, INSTANCE_DOWN,
2649 msg="cannot change disk template")
2650 if self.op.disk_template in constants.DTS_INT_MIRROR:
2651 if self.op.remote_node == pnode:
2652 raise errors.OpPrereqError("Given new secondary node %s is the same"
2653 " as the primary node of the instance" %
2654 self.op.remote_node, errors.ECODE_STATE)
2655 CheckNodeOnline(self, self.op.remote_node)
2656 CheckNodeNotDrained(self, self.op.remote_node)
2657
2658 assert instance.disk_template == constants.DT_PLAIN
2659 disks = [{constants.IDISK_SIZE: d.size,
2660 constants.IDISK_VG: d.logical_id[0]}
2661 for d in instance.disks]
2662 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2663 CheckNodesFreeDiskPerVG(self, [self.op.remote_node], required)
2664
2665 snode_info = self.cfg.GetNodeInfo(self.op.remote_node)
2666 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2667 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
2668 snode_group)
2669 CheckTargetNodeIPolicy(self, ipolicy, instance, snode_info, self.cfg,
2670 ignore=self.op.ignore_ipolicy)
2671 if pnode_info.group != snode_info.group:
2672 self.LogWarning("The primary and secondary nodes are in two"
2673 " different node groups; the disk parameters"
2674 " from the first disk's node group will be"
2675 " used")
2676
2677 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2678
2679 nodes = [pnode_info]
2680 if self.op.disk_template in constants.DTS_INT_MIRROR:
2681 assert snode_info
2682 nodes.append(snode_info)
2683 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2684 if compat.any(map(has_es, nodes)):
2685 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2686 " storage is enabled" % (instance.disk_template,
2687 self.op.disk_template))
2688 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2689
2691 """Check prerequisites.
2692
2693 This only checks the instance list against the existing names.
2694
2695 """
2696 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2697 instance = self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
2698
2699 cluster = self.cluster = self.cfg.GetClusterInfo()
2700 assert self.instance is not None, \
2701 "Cannot retrieve locked instance %s" % self.op.instance_name
2702
2703 pnode = instance.primary_node
2704
2705 self.warn = []
2706
2707 if (self.op.pnode is not None and self.op.pnode != pnode and
2708 not self.op.force):
2709
2710 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2711 instance.hypervisor)
2712 if instance_info.fail_msg:
2713 self.warn.append("Can't get instance runtime information: %s" %
2714 instance_info.fail_msg)
2715 elif instance_info.payload:
2716 raise errors.OpPrereqError("Instance is still running on %s" % pnode,
2717 errors.ECODE_STATE)
2718
2719 assert pnode in self.owned_locks(locking.LEVEL_NODE)
2720 nodelist = list(instance.all_nodes)
2721 pnode_info = self.cfg.GetNodeInfo(pnode)
2722 self.diskparams = self.cfg.GetInstanceDiskParams(instance)
2723
2724
2725 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2726 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2727
2728
2729 ispec = {}
2730
2731
2732
2733 if instance.disk_template == constants.DT_EXT:
2734 self._CheckMods("disk", self.op.disks, {},
2735 self._VerifyDiskModification)
2736 else:
2737 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2738 self._VerifyDiskModification)
2739
2740
2741 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2742 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2743
2744
2745 if instance.disk_template in constants.DT_EXT:
2746 for mod in self.diskmod:
2747 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2748 if mod[0] == constants.DDM_ADD:
2749 if ext_provider is None:
2750 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2751 " '%s' missing, during disk add" %
2752 (constants.DT_EXT,
2753 constants.IDISK_PROVIDER),
2754 errors.ECODE_NOENT)
2755 elif mod[0] == constants.DDM_MODIFY:
2756 if ext_provider:
2757 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2758 " modification" %
2759 constants.IDISK_PROVIDER,
2760 errors.ECODE_INVAL)
2761 else:
2762 for mod in self.diskmod:
2763 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2764 if ext_provider is not None:
2765 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2766 " instances of type '%s'" %
2767 (constants.IDISK_PROVIDER,
2768 constants.DT_EXT),
2769 errors.ECODE_INVAL)
2770
2771
2772 if self.op.os_name and not self.op.force:
2773 CheckNodeHasOS(self, instance.primary_node, self.op.os_name,
2774 self.op.force_variant)
2775 instance_os = self.op.os_name
2776 else:
2777 instance_os = instance.os
2778
2779 assert not (self.op.disk_template and self.op.disks), \
2780 "Can't modify disk template and apply disk changes at the same time"
2781
2782 if self.op.disk_template:
2783 self._PreCheckDiskTemplate(pnode_info)
2784
2785
2786 if self.op.hvparams:
2787 hv_type = instance.hypervisor
2788 i_hvdict = GetUpdatedParams(instance.hvparams, self.op.hvparams)
2789 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2790 hv_new = cluster.SimpleFillHV(hv_type, instance.os, i_hvdict)
2791
2792
2793 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2794 CheckHVParams(self, nodelist, instance.hypervisor, hv_new)
2795 self.hv_proposed = self.hv_new = hv_new
2796 self.hv_inst = i_hvdict
2797 else:
2798 self.hv_proposed = cluster.SimpleFillHV(instance.hypervisor, instance.os,
2799 instance.hvparams)
2800 self.hv_new = self.hv_inst = {}
2801
2802
2803 if self.op.beparams:
2804 i_bedict = GetUpdatedParams(instance.beparams, self.op.beparams,
2805 use_none=True)
2806 objects.UpgradeBeParams(i_bedict)
2807 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2808 be_new = cluster.SimpleFillBE(i_bedict)
2809 self.be_proposed = self.be_new = be_new
2810 self.be_inst = i_bedict
2811 else:
2812 self.be_new = self.be_inst = {}
2813 self.be_proposed = cluster.SimpleFillBE(instance.beparams)
2814 be_old = cluster.FillBE(instance)
2815
2816
2817
2818
2819 if (constants.BE_VCPUS in self.be_proposed and
2820 constants.HV_CPU_MASK in self.hv_proposed):
2821 cpu_list = \
2822 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2823
2824
2825
2826 if (len(cpu_list) > 1 and
2827 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2828 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2829 " CPU mask [%s]" %
2830 (self.be_proposed[constants.BE_VCPUS],
2831 self.hv_proposed[constants.HV_CPU_MASK]),
2832 errors.ECODE_INVAL)
2833
2834
2835 if constants.HV_CPU_MASK in self.hv_new:
2836
2837 max_requested_cpu = max(map(max, cpu_list))
2838
2839
2840 _CheckNodesPhysicalCPUs(self, instance.all_nodes,
2841 max_requested_cpu + 1, instance.hypervisor)
2842
2843
2844 if self.op.osparams:
2845 i_osdict = GetUpdatedParams(instance.osparams, self.op.osparams)
2846 CheckOSParams(self, True, nodelist, instance_os, i_osdict)
2847 self.os_inst = i_osdict
2848 else:
2849 self.os_inst = {}
2850
2851
2852 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2853 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2854 mem_check_list = [pnode]
2855 if be_new[constants.BE_AUTO_BALANCE]:
2856
2857 mem_check_list.extend(instance.secondary_nodes)
2858 instance_info = self.rpc.call_instance_info(pnode, instance.name,
2859 instance.hypervisor)
2860 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2861 [instance.hypervisor], False)
2862 pninfo = nodeinfo[pnode]
2863 msg = pninfo.fail_msg
2864 if msg:
2865
2866 self.warn.append("Can't get info from primary node %s: %s" %
2867 (pnode, msg))
2868 else:
2869 (_, _, (pnhvinfo, )) = pninfo.payload
2870 if not isinstance(pnhvinfo.get("memory_free", None), int):
2871 self.warn.append("Node data from primary node %s doesn't contain"
2872 " free memory information" % pnode)
2873 elif instance_info.fail_msg:
2874 self.warn.append("Can't get instance runtime information: %s" %
2875 instance_info.fail_msg)
2876 else:
2877 if instance_info.payload:
2878 current_mem = int(instance_info.payload["memory"])
2879 else:
2880
2881
2882
2883
2884 current_mem = 0
2885
2886 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2887 pnhvinfo["memory_free"])
2888 if miss_mem > 0:
2889 raise errors.OpPrereqError("This change will prevent the instance"
2890 " from starting, due to %d MB of memory"
2891 " missing on its primary node" %
2892 miss_mem, errors.ECODE_NORES)
2893
2894 if be_new[constants.BE_AUTO_BALANCE]:
2895 for node, nres in nodeinfo.items():
2896 if node not in instance.secondary_nodes:
2897 continue
2898 nres.Raise("Can't get info from secondary node %s" % node,
2899 prereq=True, ecode=errors.ECODE_STATE)
2900 (_, _, (nhvinfo, )) = nres.payload
2901 if not isinstance(nhvinfo.get("memory_free", None), int):
2902 raise errors.OpPrereqError("Secondary node %s didn't return free"
2903 " memory information" % node,
2904 errors.ECODE_STATE)
2905
2906 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
2907 raise errors.OpPrereqError("This change will prevent the instance"
2908 " from failover to its secondary node"
2909 " %s, due to not enough memory" % node,
2910 errors.ECODE_STATE)
2911
2912 if self.op.runtime_mem:
2913 remote_info = self.rpc.call_instance_info(instance.primary_node,
2914 instance.name,
2915 instance.hypervisor)
2916 remote_info.Raise("Error checking node %s" % instance.primary_node)
2917 if not remote_info.payload:
2918 raise errors.OpPrereqError("Instance %s is not running" %
2919 instance.name, errors.ECODE_STATE)
2920
2921 current_memory = remote_info.payload["memory"]
2922 if (not self.op.force and
2923 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
2924 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
2925 raise errors.OpPrereqError("Instance %s must have memory between %d"
2926 " and %d MB of memory unless --force is"
2927 " given" %
2928 (instance.name,
2929 self.be_proposed[constants.BE_MINMEM],
2930 self.be_proposed[constants.BE_MAXMEM]),
2931 errors.ECODE_INVAL)
2932
2933 delta = self.op.runtime_mem - current_memory
2934 if delta > 0:
2935 CheckNodeFreeMemory(self, instance.primary_node,
2936 "ballooning memory for instance %s" %
2937 instance.name, delta, instance.hypervisor)
2938
2939 if self.op.disks and instance.disk_template == constants.DT_DISKLESS:
2940 raise errors.OpPrereqError("Disk operations not supported for"
2941 " diskless instances", errors.ECODE_INVAL)
2942
2943 def _PrepareNicCreate(_, params, private):
2944 self._PrepareNicModification(params, private, None, None,
2945 {}, cluster, pnode)
2946 return (None, None)
2947
2948 def _PrepareNicMod(_, nic, params, private):
2949 self._PrepareNicModification(params, private, nic.ip, nic.network,
2950 nic.nicparams, cluster, pnode)
2951 return None
2952
2953 def _PrepareNicRemove(_, params, __):
2954 ip = params.ip
2955 net = params.network
2956 if net is not None and ip is not None:
2957 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
2958
2959
2960 nics = instance.nics[:]
2961 _ApplyContainerMods("NIC", nics, None, self.nicmod,
2962 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
2963 if len(nics) > constants.MAX_NICS:
2964 raise errors.OpPrereqError("Instance has too many network interfaces"
2965 " (%d), cannot add more" % constants.MAX_NICS,
2966 errors.ECODE_STATE)
2967
2968 def _PrepareDiskMod(_, disk, params, __):
2969 disk.name = params.get(constants.IDISK_NAME, None)
2970
2971
2972 disks = copy.deepcopy(instance.disks)
2973 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2974 _PrepareDiskMod, None)
2975 utils.ValidateDeviceNames("disk", disks)
2976 if len(disks) > constants.MAX_DISKS:
2977 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2978 " more" % constants.MAX_DISKS,
2979 errors.ECODE_STATE)
2980 disk_sizes = [disk.size for disk in instance.disks]
2981 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2982 self.diskmod if op == constants.DDM_ADD)
2983 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2984 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2985
2986 if self.op.offline is not None and self.op.offline:
2987 CheckInstanceState(self, instance, CAN_CHANGE_INSTANCE_OFFLINE,
2988 msg="can't change to offline")
2989
2990
2991 self._nic_chgdesc = []
2992 if self.nicmod:
2993
2994 nics = [nic.Copy() for nic in instance.nics]
2995 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
2996 self._CreateNewNic, self._ApplyNicMods, None)
2997
2998 utils.ValidateDeviceNames("NIC", nics)
2999 self._new_nics = nics
3000 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3001 else:
3002 self._new_nics = None
3003 ispec[constants.ISPEC_NIC_COUNT] = len(instance.nics)
3004
3005 if not self.op.ignore_ipolicy:
3006 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster,
3007 group_info)
3008
3009
3010 ispec[constants.ISPEC_SPINDLE_USE] = \
3011 self.be_new.get(constants.BE_SPINDLE_USE, None)
3012 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3013 None)
3014
3015
3016 if self.op.disk_template:
3017 new_disk_template = self.op.disk_template
3018 else:
3019 new_disk_template = instance.disk_template
3020 ispec_max = ispec.copy()
3021 ispec_max[constants.ISPEC_MEM_SIZE] = \
3022 self.be_new.get(constants.BE_MAXMEM, None)
3023 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3024 new_disk_template)
3025 ispec_min = ispec.copy()
3026 ispec_min[constants.ISPEC_MEM_SIZE] = \
3027 self.be_new.get(constants.BE_MINMEM, None)
3028 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3029 new_disk_template)
3030
3031 if (res_max or res_min):
3032
3033
3034 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3035 (group_info, group_info.name,
3036 utils.CommaJoin(set(res_max + res_min))))
3037 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3038
3040 """Converts an instance from plain to drbd.
3041
3042 """
3043 feedback_fn("Converting template to drbd")
3044 instance = self.instance
3045 pnode = instance.primary_node
3046 snode = self.op.remote_node
3047
3048 assert instance.disk_template == constants.DT_PLAIN
3049
3050
3051 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3052 constants.IDISK_VG: d.logical_id[0],
3053 constants.IDISK_NAME: d.name}
3054 for d in instance.disks]
3055 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3056 instance.name, pnode, [snode],
3057 disk_info, None, None, 0, feedback_fn,
3058 self.diskparams)
3059 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3060 self.diskparams)
3061 p_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, pnode)
3062 s_excl_stor = IsExclusiveStorageEnabledNodeName(self.cfg, snode)
3063 info = GetInstanceInfoText(instance)
3064 feedback_fn("Creating additional volumes...")
3065
3066 for disk in anno_disks:
3067
3068 CreateSingleBlockDev(self, pnode, instance, disk.children[1],
3069 info, True, p_excl_stor)
3070 for child in disk.children:
3071 CreateSingleBlockDev(self, snode, instance, child, info, True,
3072 s_excl_stor)
3073
3074
3075 feedback_fn("Renaming original volumes...")
3076 rename_list = [(o, n.children[0].logical_id)
3077 for (o, n) in zip(instance.disks, new_disks)]
3078 result = self.rpc.call_blockdev_rename(pnode, rename_list)
3079 result.Raise("Failed to rename original LVs")
3080
3081 feedback_fn("Initializing DRBD devices...")
3082
3083 try:
3084 for disk in anno_disks:
3085 for (node, excl_stor) in [(pnode, p_excl_stor), (snode, s_excl_stor)]:
3086 f_create = node == pnode
3087 CreateSingleBlockDev(self, node, instance, disk, info, f_create,
3088 excl_stor)
3089 except errors.GenericError, e:
3090 feedback_fn("Initializing of DRBD devices failed;"
3091 " renaming back original volumes...")
3092 for disk in new_disks:
3093 self.cfg.SetDiskID(disk, pnode)
3094 rename_back_list = [(n.children[0], o.logical_id)
3095 for (n, o) in zip(new_disks, instance.disks)]
3096 result = self.rpc.call_blockdev_rename(pnode, rename_back_list)
3097 result.Raise("Failed to rename LVs back after error %s" % str(e))
3098 raise
3099
3100
3101 instance.disk_template = constants.DT_DRBD8
3102 instance.disks = new_disks
3103 self.cfg.Update(instance, feedback_fn)
3104
3105
3106 ReleaseLocks(self, locking.LEVEL_NODE)
3107
3108
3109 disk_abort = not WaitForSync(self, instance,
3110 oneshot=not self.op.wait_for_sync)
3111 if disk_abort:
3112 raise errors.OpExecError("There are some degraded disks for"
3113 " this instance, please cleanup manually")
3114
3115
3116
3118 """Converts an instance from drbd to plain.
3119
3120 """
3121 instance = self.instance
3122
3123 assert len(instance.secondary_nodes) == 1
3124 assert instance.disk_template == constants.DT_DRBD8
3125
3126 pnode = instance.primary_node
3127 snode = instance.secondary_nodes[0]
3128 feedback_fn("Converting template to plain")
3129
3130 old_disks = AnnotateDiskParams(instance, instance.disks, self.cfg)
3131 new_disks = [d.children[0] for d in instance.disks]
3132
3133
3134 for parent, child in zip(old_disks, new_disks):
3135 child.size = parent.size
3136 child.mode = parent.mode
3137 child.name = parent.name
3138
3139
3140
3141 for disk in old_disks:
3142 tcp_port = disk.logical_id[2]
3143 self.cfg.AddTcpUdpPort(tcp_port)
3144
3145
3146 instance.disks = new_disks
3147 instance.disk_template = constants.DT_PLAIN
3148 _UpdateIvNames(0, instance.disks)
3149 self.cfg.Update(instance, feedback_fn)
3150
3151
3152 ReleaseLocks(self, locking.LEVEL_NODE)
3153
3154 feedback_fn("Removing volumes on the secondary node...")
3155 for disk in old_disks:
3156 self.cfg.SetDiskID(disk, snode)
3157 msg = self.rpc.call_blockdev_remove(snode, disk).fail_msg
3158 if msg:
3159 self.LogWarning("Could not remove block device %s on node %s,"
3160 " continuing anyway: %s", disk.iv_name, snode, msg)
3161
3162 feedback_fn("Removing unneeded volumes on the primary node...")
3163 for idx, disk in enumerate(old_disks):
3164 meta = disk.children[1]
3165 self.cfg.SetDiskID(meta, pnode)
3166 msg = self.rpc.call_blockdev_remove(pnode, meta).fail_msg
3167 if msg:
3168 self.LogWarning("Could not remove metadata for disk %d on node %s,"
3169 " continuing anyway: %s", idx, pnode, msg)
3170
3172 """Creates a new disk.
3173
3174 """
3175 instance = self.instance
3176
3177
3178 if instance.disk_template in constants.DTS_FILEBASED:
3179 (file_driver, file_path) = instance.disks[0].logical_id
3180 file_path = os.path.dirname(file_path)
3181 else:
3182 file_driver = file_path = None
3183
3184 disk = \
3185 GenerateDiskTemplate(self, instance.disk_template, instance.name,
3186 instance.primary_node, instance.secondary_nodes,
3187 [params], file_path, file_driver, idx,
3188 self.Log, self.diskparams)[0]
3189
3190 new_disks = CreateDisks(self, instance, disks=[disk])
3191
3192 if self.cluster.prealloc_wipe_disks:
3193
3194 WipeOrCleanupDisks(self, instance,
3195 disks=[(idx, disk, 0)],
3196 cleanup=new_disks)
3197
3198 return (disk, [
3199 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3200 ])
3201
3230
3232 """Removes a disk.
3233
3234 """
3235 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3236 for node, disk in anno_disk.ComputeNodeTree(self.instance.primary_node):
3237 self.cfg.SetDiskID(disk, node)
3238 msg = self.rpc.call_blockdev_remove(node, disk).fail_msg
3239 if msg:
3240 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3241 " continuing anyway", idx, node, msg)
3242
3243
3244 if root.dev_type in constants.LDS_DRBD:
3245 self.cfg.AddTcpUdpPort(root.logical_id[2])
3246
3248 """Creates data structure for a new network interface.
3249
3250 """
3251 mac = params[constants.INIC_MAC]
3252 ip = params.get(constants.INIC_IP, None)
3253 net = params.get(constants.INIC_NETWORK, None)
3254 name = params.get(constants.INIC_NAME, None)
3255 net_uuid = self.cfg.LookupNetwork(net)
3256
3257 nicparams = private.filled
3258 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3259 nicparams=nicparams)
3260 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3261
3262 return (nobj, [
3263 ("nic.%d" % idx,
3264 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3265 (mac, ip, private.filled[constants.NIC_MODE],
3266 private.filled[constants.NIC_LINK],
3267 net)),
3268 ])
3269
3271 """Modifies a network interface.
3272
3273 """
3274 changes = []
3275
3276 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3277 if key in params:
3278 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3279 setattr(nic, key, params[key])
3280
3281 new_net = params.get(constants.INIC_NETWORK, nic.network)
3282 new_net_uuid = self.cfg.LookupNetwork(new_net)
3283 if new_net_uuid != nic.network:
3284 changes.append(("nic.network/%d" % idx, new_net))
3285 nic.network = new_net_uuid
3286
3287 if private.filled:
3288 nic.nicparams = private.filled
3289
3290 for (key, val) in nic.nicparams.items():
3291 changes.append(("nic.%s/%d" % (key, idx), val))
3292
3293 return changes
3294
3295 - def Exec(self, feedback_fn):
3296 """Modifies an instance.
3297
3298 All parameters take effect only at the next restart of the instance.
3299
3300 """
3301
3302
3303
3304 for warn in self.warn:
3305 feedback_fn("WARNING: %s" % warn)
3306
3307 assert ((self.op.disk_template is None) ^
3308 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3309 "Not owning any node resource locks"
3310
3311 result = []
3312 instance = self.instance
3313
3314
3315 if self.op.pnode:
3316 instance.primary_node = self.op.pnode
3317
3318
3319 if self.op.runtime_mem:
3320 rpcres = self.rpc.call_instance_balloon_memory(instance.primary_node,
3321 instance,
3322 self.op.runtime_mem)
3323 rpcres.Raise("Cannot modify instance runtime memory")
3324 result.append(("runtime_memory", self.op.runtime_mem))
3325
3326
3327 _ApplyContainerMods("disk", instance.disks, result, self.diskmod,
3328 self._CreateNewDisk, self._ModifyDisk,
3329 self._RemoveDisk)
3330 _UpdateIvNames(0, instance.disks)
3331
3332 if self.op.disk_template:
3333 if __debug__:
3334 check_nodes = set(instance.all_nodes)
3335 if self.op.remote_node:
3336 check_nodes.add(self.op.remote_node)
3337 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3338 owned = self.owned_locks(level)
3339 assert not (check_nodes - owned), \
3340 ("Not owning the correct locks, owning %r, expected at least %r" %
3341 (owned, check_nodes))
3342
3343 r_shut = ShutdownInstanceDisks(self, instance)
3344 if not r_shut:
3345 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3346 " proceed with disk template conversion")
3347 mode = (instance.disk_template, self.op.disk_template)
3348 try:
3349 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3350 except:
3351 self.cfg.ReleaseDRBDMinors(instance.name)
3352 raise
3353 result.append(("disk_template", self.op.disk_template))
3354
3355 assert instance.disk_template == self.op.disk_template, \
3356 ("Expected disk template '%s', found '%s'" %
3357 (self.op.disk_template, instance.disk_template))
3358
3359
3360
3361 ReleaseLocks(self, locking.LEVEL_NODE)
3362 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3363
3364
3365 if self._new_nics is not None:
3366 instance.nics = self._new_nics
3367 result.extend(self._nic_chgdesc)
3368
3369
3370 if self.op.hvparams:
3371 instance.hvparams = self.hv_inst
3372 for key, val in self.op.hvparams.iteritems():
3373 result.append(("hv/%s" % key, val))
3374
3375
3376 if self.op.beparams:
3377 instance.beparams = self.be_inst
3378 for key, val in self.op.beparams.iteritems():
3379 result.append(("be/%s" % key, val))
3380
3381
3382 if self.op.os_name:
3383 instance.os = self.op.os_name
3384
3385
3386 if self.op.osparams:
3387 instance.osparams = self.os_inst
3388 for key, val in self.op.osparams.iteritems():
3389 result.append(("os/%s" % key, val))
3390
3391 if self.op.offline is None:
3392
3393 pass
3394 elif self.op.offline:
3395
3396 self.cfg.MarkInstanceOffline(instance.name)
3397 result.append(("admin_state", constants.ADMINST_OFFLINE))
3398 else:
3399
3400 self.cfg.MarkInstanceDown(instance.name)
3401 result.append(("admin_state", constants.ADMINST_DOWN))
3402
3403 self.cfg.Update(instance, feedback_fn, self.proc.GetECId())
3404
3405 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3406 self.owned_locks(locking.LEVEL_NODE)), \
3407 "All node locks should have been released by now"
3408
3409 return result
3410
3411 _DISK_CONVERSIONS = {
3412 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3413 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3414 }
3415
3418 HPATH = "instance-change-group"
3419 HTYPE = constants.HTYPE_INSTANCE
3420 REQ_BGL = False
3421
3440
3442 if level == locking.LEVEL_NODEGROUP:
3443 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3444
3445 if self.req_target_uuids:
3446 lock_groups = set(self.req_target_uuids)
3447
3448
3449
3450 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_name)
3451 lock_groups.update(instance_groups)
3452 else:
3453
3454 lock_groups = locking.ALL_SET
3455
3456 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3457
3458 elif level == locking.LEVEL_NODE:
3459 if self.req_target_uuids:
3460
3461 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3462 self._LockInstancesNodes()
3463
3464
3465 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3466 self.cfg.GetInstanceNodeGroups(self.op.instance_name))
3467 member_nodes = [node_name
3468 for group in lock_groups
3469 for node_name in self.cfg.GetNodeGroup(group).members]
3470 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3471 else:
3472
3473 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3474
3476 owned_instances = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3477 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3478 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3479
3480 assert (self.req_target_uuids is None or
3481 owned_groups.issuperset(self.req_target_uuids))
3482 assert owned_instances == set([self.op.instance_name])
3483
3484
3485 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
3486
3487
3488 assert owned_nodes.issuperset(self.instance.all_nodes), \
3489 ("Instance %s's nodes changed while we kept the lock" %
3490 self.op.instance_name)
3491
3492 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_name,
3493 owned_groups)
3494
3495 if self.req_target_uuids:
3496
3497 self.target_uuids = frozenset(self.req_target_uuids)
3498 else:
3499
3500 self.target_uuids = owned_groups - inst_groups
3501
3502 conflicting_groups = self.target_uuids & inst_groups
3503 if conflicting_groups:
3504 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3505 " used by the instance '%s'" %
3506 (utils.CommaJoin(conflicting_groups),
3507 self.op.instance_name),
3508 errors.ECODE_INVAL)
3509
3510 if not self.target_uuids:
3511 raise errors.OpPrereqError("There are no possible target groups",
3512 errors.ECODE_INVAL)
3513
3515 """Build hooks env.
3516
3517 """
3518 assert self.target_uuids
3519
3520 env = {
3521 "TARGET_GROUPS": " ".join(self.target_uuids),
3522 }
3523
3524 env.update(BuildInstanceHookEnvByObject(self, self.instance))
3525
3526 return env
3527
3529 """Build hooks nodes.
3530
3531 """
3532 mn = self.cfg.GetMasterNode()
3533 return ([mn], [mn])
3534
3535 - def Exec(self, feedback_fn):
3536 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3537
3538 assert instances == [self.op.instance_name], "Instance not locked"
3539
3540 req = iallocator.IAReqGroupChange(instances=instances,
3541 target_groups=list(self.target_uuids))
3542 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3543
3544 ial.Run(self.op.iallocator)
3545
3546 if not ial.success:
3547 raise errors.OpPrereqError("Can't compute solution for changing group of"
3548 " instance '%s' using iallocator '%s': %s" %
3549 (self.op.instance_name, self.op.iallocator,
3550 ial.info), errors.ECODE_NORES)
3551
3552 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3553
3554 self.LogInfo("Iallocator returned %s job(s) for changing group of"
3555 " instance '%s'", len(jobs), self.op.instance_name)
3556
3557 return ResultWithJobs(jobs)
3558