1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 """Logical units dealing with instances."""
23
24 import OpenSSL
25 import copy
26 import logging
27 import os
28
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
32 from ganeti import ht
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
43
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53 CheckDiskTemplateEnabled
54 from ganeti.cmdlib.instance_storage import CreateDisks, \
55 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
56 IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
57 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
58 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
59 CheckSpindlesExclusiveStorage
60 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66
67 import ganeti.masterd.instance
68
69
70
71
72 _TApplyContModsCbChanges = \
73 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
74 ht.TNonEmptyString,
75 ht.TAny,
76 ])))
80 """Ensures that a given hostname resolves to a 'sane' name.
81
82 The given name is required to be a prefix of the resolved hostname,
83 to prevent accidental mismatches.
84
85 @param lu: the logical unit on behalf of which we're checking
86 @param name: the name we should resolve and check
87 @return: the resolved hostname object
88
89 """
90 hostname = netutils.GetHostname(name=name)
91 if hostname.name != name:
92 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
93 if not utils.MatchNameComponent(name, [hostname.name]):
94 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
95 " same as given hostname '%s'") %
96 (hostname.name, name), errors.ECODE_INVAL)
97 return hostname
98
101 """Generate error if opportunistic locking is not possible.
102
103 """
104 if op.opportunistic_locking and not op.iallocator:
105 raise errors.OpPrereqError("Opportunistic locking is only available in"
106 " combination with an instance allocator",
107 errors.ECODE_INVAL)
108
111 """Wrapper around IAReqInstanceAlloc.
112
113 @param op: The instance opcode
114 @param disks: The computed disks
115 @param nics: The computed nics
116 @param beparams: The full filled beparams
117 @param node_name_whitelist: List of nodes which should appear as online to the
118 allocator (unless the node is already marked offline)
119
120 @returns: A filled L{iallocator.IAReqInstanceAlloc}
121
122 """
123 spindle_use = beparams[constants.BE_SPINDLE_USE]
124 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
125 disk_template=op.disk_template,
126 tags=op.tags,
127 os=op.os_type,
128 vcpus=beparams[constants.BE_VCPUS],
129 memory=beparams[constants.BE_MAXMEM],
130 spindle_use=spindle_use,
131 disks=disks,
132 nics=[n.ToDict() for n in nics],
133 hypervisor=op.hypervisor,
134 node_whitelist=node_name_whitelist)
135
153
156 """Computes the nics.
157
158 @param op: The instance opcode
159 @param cluster: Cluster configuration object
160 @param default_ip: The default ip to assign
161 @param cfg: An instance of the configuration object
162 @param ec_id: Execution context ID
163
164 @returns: The build up nics
165
166 """
167 nics = []
168 for nic in op.nics:
169 nic_mode_req = nic.get(constants.INIC_MODE, None)
170 nic_mode = nic_mode_req
171 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
172 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173
174 net = nic.get(constants.INIC_NETWORK, None)
175 link = nic.get(constants.NIC_LINK, None)
176 ip = nic.get(constants.INIC_IP, None)
177
178 if net is None or net.lower() == constants.VALUE_NONE:
179 net = None
180 else:
181 if nic_mode_req is not None or link is not None:
182 raise errors.OpPrereqError("If network is given, no mode or link"
183 " is allowed to be passed",
184 errors.ECODE_INVAL)
185
186
187 if ip is None or ip.lower() == constants.VALUE_NONE:
188 nic_ip = None
189 elif ip.lower() == constants.VALUE_AUTO:
190 if not op.name_check:
191 raise errors.OpPrereqError("IP address set to auto but name checks"
192 " have been skipped",
193 errors.ECODE_INVAL)
194 nic_ip = default_ip
195 else:
196
197
198 if ip.lower() == constants.NIC_IP_POOL:
199 if net is None:
200 raise errors.OpPrereqError("if ip=pool, parameter network"
201 " must be passed too",
202 errors.ECODE_INVAL)
203
204 elif not netutils.IPAddress.IsValid(ip):
205 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
206 errors.ECODE_INVAL)
207
208 nic_ip = ip
209
210
211 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
212 raise errors.OpPrereqError("Routed nic mode requires an ip address",
213 errors.ECODE_INVAL)
214
215
216 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
217 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
218 mac = utils.NormalizeAndValidateMac(mac)
219
220 try:
221
222 cfg.ReserveMAC(mac, ec_id)
223 except errors.ReservationError:
224 raise errors.OpPrereqError("MAC address %s already in use"
225 " in cluster" % mac,
226 errors.ECODE_NOTUNIQUE)
227
228
229 nicparams = {}
230 if nic_mode_req:
231 nicparams[constants.NIC_MODE] = nic_mode
232 if link:
233 nicparams[constants.NIC_LINK] = link
234
235 check_params = cluster.SimpleFillNIC(nicparams)
236 objects.NIC.CheckParameterSyntax(check_params)
237 net_uuid = cfg.LookupNetwork(net)
238 name = nic.get(constants.INIC_NAME, None)
239 if name is not None and name.lower() == constants.VALUE_NONE:
240 name = None
241 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
242 network=net_uuid, nicparams=nicparams)
243 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
244 nics.append(nic_obj)
245
246 return nics
247
250 """In case of conflicting IP address raise error.
251
252 @type ip: string
253 @param ip: IP address
254 @type node_uuid: string
255 @param node_uuid: node UUID
256
257 """
258 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
259 if conf_net is not None:
260 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
261 " network %s, but the target NIC does not." %
262 (ip, conf_net)),
263 errors.ECODE_STATE)
264
265 return (None, None)
266
271 """Compute if instance specs meets the specs of ipolicy.
272
273 @type ipolicy: dict
274 @param ipolicy: The ipolicy to verify against
275 @param instance_spec: dict
276 @param instance_spec: The instance spec to verify
277 @type disk_template: string
278 @param disk_template: the disk template of the instance
279 @param _compute_fn: The function to verify ipolicy (unittest only)
280 @see: L{ComputeIPolicySpecViolation}
281
282 """
283 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
284 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
285 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
286 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
287 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
288 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
289
290 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
291 disk_sizes, spindle_use, disk_template)
292
295 """Check whether an OS name conforms to the os variants specification.
296
297 @type os_obj: L{objects.OS}
298 @param os_obj: OS object to check
299 @type name: string
300 @param name: OS name passed by the user, to check for validity
301
302 """
303 variant = objects.OS.GetVariant(name)
304 if not os_obj.supported_variants:
305 if variant:
306 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
307 " passed)" % (os_obj.name, variant),
308 errors.ECODE_INVAL)
309 return
310 if not variant:
311 raise errors.OpPrereqError("OS name must include a variant",
312 errors.ECODE_INVAL)
313
314 if variant not in os_obj.supported_variants:
315 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
316
319 """Create an instance.
320
321 """
322 HPATH = "instance-add"
323 HTYPE = constants.HTYPE_INSTANCE
324 REQ_BGL = False
325
339
381
383 """Check arguments.
384
385 """
386
387
388 if self.op.no_install and self.op.start:
389 self.LogInfo("No-installation mode selected, disabling startup")
390 self.op.start = False
391
392 self.op.instance_name = \
393 netutils.Hostname.GetNormalizedName(self.op.instance_name)
394
395 if self.op.ip_check and not self.op.name_check:
396
397 raise errors.OpPrereqError("Cannot do IP address check without a name"
398 " check", errors.ECODE_INVAL)
399
400
401 for nic in self.op.nics:
402 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
403
404 utils.ValidateDeviceNames("NIC", self.op.nics)
405
406 self._CheckDiskArguments()
407
408
409 if self.op.name_check:
410 self.hostname = _CheckHostnameSane(self, self.op.instance_name)
411 self.op.instance_name = self.hostname.name
412
413 self.check_ip = self.hostname.ip
414 else:
415 self.check_ip = None
416
417
418 if (self.op.file_driver and
419 not self.op.file_driver in constants.FILE_DRIVER):
420 raise errors.OpPrereqError("Invalid file driver name '%s'" %
421 self.op.file_driver, errors.ECODE_INVAL)
422
423
424 if (not self.op.file_driver and
425 self.op.disk_template in [constants.DT_FILE,
426 constants.DT_SHARED_FILE]):
427 self.op.file_driver = constants.FD_DEFAULT
428
429
430 CheckIAllocatorOrNode(self, "iallocator", "pnode")
431
432 if self.op.pnode is not None:
433 if self.op.disk_template in constants.DTS_INT_MIRROR:
434 if self.op.snode is None:
435 raise errors.OpPrereqError("The networked disk templates need"
436 " a mirror node", errors.ECODE_INVAL)
437 elif self.op.snode:
438 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
439 " template")
440 self.op.snode = None
441
442 _CheckOpportunisticLocking(self.op)
443
444 self._cds = GetClusterDomainSecret()
445
446 if self.op.mode == constants.INSTANCE_IMPORT:
447
448
449
450 self.op.force_variant = True
451
452 if self.op.no_install:
453 self.LogInfo("No-installation mode has no effect during import")
454
455 elif self.op.mode == constants.INSTANCE_CREATE:
456 if self.op.os_type is None:
457 raise errors.OpPrereqError("No guest OS specified",
458 errors.ECODE_INVAL)
459 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
460 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
461 " installation" % self.op.os_type,
462 errors.ECODE_STATE)
463 if self.op.disk_template is None:
464 raise errors.OpPrereqError("No disk template specified",
465 errors.ECODE_INVAL)
466
467 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
468
469 src_handshake = self.op.source_handshake
470 if not src_handshake:
471 raise errors.OpPrereqError("Missing source handshake",
472 errors.ECODE_INVAL)
473
474 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
475 src_handshake)
476 if errmsg:
477 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
478 errors.ECODE_INVAL)
479
480
481 self.source_x509_ca_pem = self.op.source_x509_ca
482 if not self.source_x509_ca_pem:
483 raise errors.OpPrereqError("Missing source X509 CA",
484 errors.ECODE_INVAL)
485
486 try:
487 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
488 self._cds)
489 except OpenSSL.crypto.Error, err:
490 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
491 (err, ), errors.ECODE_INVAL)
492
493 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
494 if errcode is not None:
495 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
496 errors.ECODE_INVAL)
497
498 self.source_x509_ca = cert
499
500 src_instance_name = self.op.source_instance_name
501 if not src_instance_name:
502 raise errors.OpPrereqError("Missing source instance name",
503 errors.ECODE_INVAL)
504
505 self.source_instance_name = \
506 netutils.GetHostname(name=src_instance_name).name
507
508 else:
509 raise errors.OpPrereqError("Invalid instance creation mode %r" %
510 self.op.mode, errors.ECODE_INVAL)
511
513 """ExpandNames for CreateInstance.
514
515 Figure out the right locks for instance creation.
516
517 """
518 self.needed_locks = {}
519
520
521
522 if self.op.instance_name in\
523 [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
524 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
525 self.op.instance_name, errors.ECODE_EXISTS)
526
527 self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
528
529 if self.op.iallocator:
530
531
532
533 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
534 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
535
536 if self.op.opportunistic_locking:
537 self.opportunistic_locks[locking.LEVEL_NODE] = True
538 else:
539 (self.op.pnode_uuid, self.op.pnode) = \
540 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
541 nodelist = [self.op.pnode_uuid]
542 if self.op.snode is not None:
543 (self.op.snode_uuid, self.op.snode) = \
544 ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
545 nodelist.append(self.op.snode_uuid)
546 self.needed_locks[locking.LEVEL_NODE] = nodelist
547
548
549 if self.op.mode == constants.INSTANCE_IMPORT:
550 src_node = self.op.src_node
551 src_path = self.op.src_path
552
553 if src_path is None:
554 self.op.src_path = src_path = self.op.instance_name
555
556 if src_node is None:
557 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
558 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
559 self.op.src_node = None
560 if os.path.isabs(src_path):
561 raise errors.OpPrereqError("Importing an instance from a path"
562 " requires a source node option",
563 errors.ECODE_INVAL)
564 else:
565 (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
566 ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
567 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
568 self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
569 if not os.path.isabs(src_path):
570 self.op.src_path = \
571 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
572
573 self.needed_locks[locking.LEVEL_NODE_RES] = \
574 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
575
576
577
578
579
580 if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
581
582
583
584
585 self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
586 else:
587 self.needed_locks[locking.LEVEL_NODEGROUP] = \
588 list(self.cfg.GetNodeGroupsFromNodes(
589 self.needed_locks[locking.LEVEL_NODE]))
590 self.share_locks[locking.LEVEL_NODEGROUP] = 1
591
593 if level == locking.LEVEL_NODE_RES and \
594 self.opportunistic_locks[locking.LEVEL_NODE]:
595
596
597 self.needed_locks[locking.LEVEL_NODE_RES] = \
598 self.owned_locks(locking.LEVEL_NODE)
599
601 """Run the allocator based on input opcode.
602
603 """
604 if self.op.opportunistic_locking:
605
606 node_name_whitelist = self.cfg.GetNodeNames(
607 self.owned_locks(locking.LEVEL_NODE))
608 else:
609 node_name_whitelist = None
610
611
612
613 req = _CreateInstanceAllocRequest(self.op, self.disks,
614 self.nics, self.be_full,
615 node_name_whitelist)
616 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
617
618 ial.Run(self.op.iallocator)
619
620 if not ial.success:
621
622 if self.op.opportunistic_locking:
623 ecode = errors.ECODE_TEMP_NORES
624 else:
625 ecode = errors.ECODE_NORES
626
627 raise errors.OpPrereqError("Can't compute nodes using"
628 " iallocator '%s': %s" %
629 (self.op.iallocator, ial.info),
630 ecode)
631
632 (self.op.pnode_uuid, self.op.pnode) = \
633 ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
634 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
635 self.op.instance_name, self.op.iallocator,
636 utils.CommaJoin(ial.result))
637
638 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
639
640 if req.RequiredNodes() == 2:
641 (self.op.snode_uuid, self.op.snode) = \
642 ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
643
645 """Build hooks env.
646
647 This runs on master, primary and secondary nodes of the instance.
648
649 """
650 env = {
651 "ADD_MODE": self.op.mode,
652 }
653 if self.op.mode == constants.INSTANCE_IMPORT:
654 env["SRC_NODE"] = self.op.src_node
655 env["SRC_PATH"] = self.op.src_path
656 env["SRC_IMAGES"] = self.src_images
657
658 env.update(BuildInstanceHookEnv(
659 name=self.op.instance_name,
660 primary_node_name=self.op.pnode,
661 secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
662 status=self.op.start,
663 os_type=self.op.os_type,
664 minmem=self.be_full[constants.BE_MINMEM],
665 maxmem=self.be_full[constants.BE_MAXMEM],
666 vcpus=self.be_full[constants.BE_VCPUS],
667 nics=NICListToTuple(self, self.nics),
668 disk_template=self.op.disk_template,
669 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
670 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
671 for d in self.disks],
672 bep=self.be_full,
673 hvp=self.hv_full,
674 hypervisor_name=self.op.hypervisor,
675 tags=self.op.tags,
676 ))
677
678 return env
679
681 """Build hooks nodes.
682
683 """
684 nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
685 return nl, nl
686
732
734 """Use export parameters as defaults.
735
736 In case the opcode doesn't specify (as in override) some instance
737 parameters, then try to use them from the export information, if
738 that declares them.
739
740 """
741 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
742
743 if not self.op.disks:
744 disks = []
745
746 for idx in range(constants.MAX_DISKS):
747 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
748 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
749 disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
750 disk = {
751 constants.IDISK_SIZE: disk_sz,
752 constants.IDISK_NAME: disk_name
753 }
754 disks.append(disk)
755 self.op.disks = disks
756 if not disks and self.op.disk_template != constants.DT_DISKLESS:
757 raise errors.OpPrereqError("No disk info specified and the export"
758 " is missing the disk information",
759 errors.ECODE_INVAL)
760
761 if not self.op.nics:
762 nics = []
763 for idx in range(constants.MAX_NICS):
764 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
765 ndict = {}
766 for name in [constants.INIC_IP,
767 constants.INIC_MAC, constants.INIC_NAME]:
768 nic_param_name = "nic%d_%s" % (idx, name)
769 if einfo.has_option(constants.INISECT_INS, nic_param_name):
770 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
771 ndict[name] = v
772 network = einfo.get(constants.INISECT_INS,
773 "nic%d_%s" % (idx, constants.INIC_NETWORK))
774
775
776 if network:
777 ndict[constants.INIC_NETWORK] = network
778 else:
779 for name in list(constants.NICS_PARAMETERS):
780 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
781 ndict[name] = v
782 nics.append(ndict)
783 else:
784 break
785 self.op.nics = nics
786
787 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
788 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
789
790 if (self.op.hypervisor is None and
791 einfo.has_option(constants.INISECT_INS, "hypervisor")):
792 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
793
794 if einfo.has_section(constants.INISECT_HYP):
795
796
797 for name, value in einfo.items(constants.INISECT_HYP):
798 if name not in self.op.hvparams:
799 self.op.hvparams[name] = value
800
801 if einfo.has_section(constants.INISECT_BEP):
802
803 for name, value in einfo.items(constants.INISECT_BEP):
804 if name not in self.op.beparams:
805 self.op.beparams[name] = value
806
807 if name == constants.BE_MEMORY:
808 if constants.BE_MAXMEM not in self.op.beparams:
809 self.op.beparams[constants.BE_MAXMEM] = value
810 if constants.BE_MINMEM not in self.op.beparams:
811 self.op.beparams[constants.BE_MINMEM] = value
812 else:
813
814 for name in constants.BES_PARAMETERS:
815 if (name not in self.op.beparams and
816 einfo.has_option(constants.INISECT_INS, name)):
817 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
818
819 if einfo.has_section(constants.INISECT_OSP):
820
821 for name, value in einfo.items(constants.INISECT_OSP):
822 if name not in self.op.osparams:
823 self.op.osparams[name] = value
824
850
852 """Calculate final instance file storage dir.
853
854 """
855
856 self.instance_file_storage_dir = None
857 if self.op.disk_template in constants.DTS_FILEBASED:
858
859 joinargs = []
860
861 if self.op.disk_template == constants.DT_SHARED_FILE:
862 get_fsd_fn = self.cfg.GetSharedFileStorageDir
863 else:
864 get_fsd_fn = self.cfg.GetFileStorageDir
865
866 cfg_storagedir = get_fsd_fn()
867 if not cfg_storagedir:
868 raise errors.OpPrereqError("Cluster file storage dir not defined",
869 errors.ECODE_STATE)
870 joinargs.append(cfg_storagedir)
871
872 if self.op.file_storage_dir is not None:
873 joinargs.append(self.op.file_storage_dir)
874
875 joinargs.append(self.op.instance_name)
876
877
878 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
879
881 """Check prerequisites.
882
883 """
884
885
886 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
887 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
888 cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
889 if not owned_groups.issuperset(cur_groups):
890 raise errors.OpPrereqError("New instance %s's node groups changed since"
891 " locks were acquired, current groups are"
892 " are '%s', owning groups '%s'; retry the"
893 " operation" %
894 (self.op.instance_name,
895 utils.CommaJoin(cur_groups),
896 utils.CommaJoin(owned_groups)),
897 errors.ECODE_STATE)
898
899 self._CalculateFileStorageDir()
900
901 if self.op.mode == constants.INSTANCE_IMPORT:
902 export_info = self._ReadExportInfo()
903 self._ReadExportParams(export_info)
904 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
905 else:
906 self._old_instance_name = None
907
908 if (not self.cfg.GetVGName() and
909 self.op.disk_template not in constants.DTS_NOT_LVM):
910 raise errors.OpPrereqError("Cluster does not support lvm-based"
911 " instances", errors.ECODE_STATE)
912
913 if (self.op.hypervisor is None or
914 self.op.hypervisor == constants.VALUE_AUTO):
915 self.op.hypervisor = self.cfg.GetHypervisorType()
916
917 cluster = self.cfg.GetClusterInfo()
918 enabled_hvs = cluster.enabled_hypervisors
919 if self.op.hypervisor not in enabled_hvs:
920 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
921 " cluster (%s)" %
922 (self.op.hypervisor, ",".join(enabled_hvs)),
923 errors.ECODE_STATE)
924
925
926 for tag in self.op.tags:
927 objects.TaggableObject.ValidateTag(tag)
928
929
930 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
931 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
932 self.op.hvparams)
933 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
934 hv_type.CheckParameterSyntax(filled_hvp)
935 self.hv_full = filled_hvp
936
937 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
938 "instance", "cluster")
939
940
941 self.be_full = _ComputeFullBeParams(self.op, cluster)
942
943
944 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
945
946
947
948 if self.op.identify_defaults:
949 self._RevertToDefaults(cluster)
950
951
952 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
953 self.proc.GetECId())
954
955
956 default_vg = self.cfg.GetVGName()
957 self.disks = ComputeDisks(self.op, default_vg)
958
959 if self.op.mode == constants.INSTANCE_IMPORT:
960 disk_images = []
961 for idx in range(len(self.disks)):
962 option = "disk%d_dump" % idx
963 if export_info.has_option(constants.INISECT_INS, option):
964
965 export_name = export_info.get(constants.INISECT_INS, option)
966 image = utils.PathJoin(self.op.src_path, export_name)
967 disk_images.append(image)
968 else:
969 disk_images.append(False)
970
971 self.src_images = disk_images
972
973 if self.op.instance_name == self._old_instance_name:
974 for idx, nic in enumerate(self.nics):
975 if nic.mac == constants.VALUE_AUTO:
976 nic_mac_ini = "nic%d_mac" % idx
977 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
978
979
980
981
982 if self.op.ip_check:
983 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
984 raise errors.OpPrereqError("IP %s of instance %s already in use" %
985 (self.check_ip, self.op.instance_name),
986 errors.ECODE_NOTUNIQUE)
987
988
989
990
991
992
993
994
995
996 for nic in self.nics:
997 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
998 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
999
1000
1001
1002 if self.op.iallocator is not None:
1003 self._RunAllocator()
1004
1005
1006 keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1007 self.op.src_node_uuid])
1008 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1009 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1010 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1011
1012 ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1013 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1014
1015 assert (self.owned_locks(locking.LEVEL_NODE) ==
1016 self.owned_locks(locking.LEVEL_NODE_RES)), \
1017 "Node locks differ from node resource locks"
1018
1019
1020
1021
1022 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1023 assert self.pnode is not None, \
1024 "Cannot retrieve locked node %s" % self.op.pnode_uuid
1025 if pnode.offline:
1026 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1027 pnode.name, errors.ECODE_STATE)
1028 if pnode.drained:
1029 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1030 pnode.name, errors.ECODE_STATE)
1031 if not pnode.vm_capable:
1032 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1033 " '%s'" % pnode.name, errors.ECODE_STATE)
1034
1035 self.secondaries = []
1036
1037
1038
1039 for idx, nic in enumerate(self.nics):
1040 net_uuid = nic.network
1041 if net_uuid is not None:
1042 nobj = self.cfg.GetNetwork(net_uuid)
1043 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1044 if netparams is None:
1045 raise errors.OpPrereqError("No netparams found for network"
1046 " %s. Propably not connected to"
1047 " node's %s nodegroup" %
1048 (nobj.name, self.pnode.name),
1049 errors.ECODE_INVAL)
1050 self.LogInfo("NIC/%d inherits netparams %s" %
1051 (idx, netparams.values()))
1052 nic.nicparams = dict(netparams)
1053 if nic.ip is not None:
1054 if nic.ip.lower() == constants.NIC_IP_POOL:
1055 try:
1056 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1057 except errors.ReservationError:
1058 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1059 " from the address pool" % idx,
1060 errors.ECODE_STATE)
1061 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1062 else:
1063 try:
1064 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1065 except errors.ReservationError:
1066 raise errors.OpPrereqError("IP address %s already in use"
1067 " or does not belong to network %s" %
1068 (nic.ip, nobj.name),
1069 errors.ECODE_NOTUNIQUE)
1070
1071
1072 elif self.op.conflicts_check:
1073 _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1074
1075
1076 if self.op.disk_template in constants.DTS_INT_MIRROR:
1077 if self.op.snode_uuid == pnode.uuid:
1078 raise errors.OpPrereqError("The secondary node cannot be the"
1079 " primary node", errors.ECODE_INVAL)
1080 CheckNodeOnline(self, self.op.snode_uuid)
1081 CheckNodeNotDrained(self, self.op.snode_uuid)
1082 CheckNodeVmCapable(self, self.op.snode_uuid)
1083 self.secondaries.append(self.op.snode_uuid)
1084
1085 snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1086 if pnode.group != snode.group:
1087 self.LogWarning("The primary and secondary nodes are in two"
1088 " different node groups; the disk parameters"
1089 " from the first disk's node group will be"
1090 " used")
1091
1092 nodes = [pnode]
1093 if self.op.disk_template in constants.DTS_INT_MIRROR:
1094 nodes.append(snode)
1095 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1096 excl_stor = compat.any(map(has_es, nodes))
1097 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1098 raise errors.OpPrereqError("Disk template %s not supported with"
1099 " exclusive storage" % self.op.disk_template,
1100 errors.ECODE_STATE)
1101 for disk in self.disks:
1102 CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1103
1104 node_uuids = [pnode.uuid] + self.secondaries
1105
1106 if not self.adopt_disks:
1107 if self.op.disk_template == constants.DT_RBD:
1108
1109
1110
1111 CheckRADOSFreeSpace()
1112 elif self.op.disk_template == constants.DT_EXT:
1113
1114 pass
1115 elif self.op.disk_template in utils.GetLvmDiskTemplates():
1116
1117 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1118 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1119 else:
1120
1121 pass
1122
1123 elif self.op.disk_template == constants.DT_PLAIN:
1124 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1125 disk[constants.IDISK_ADOPT])
1126 for disk in self.disks])
1127 if len(all_lvs) != len(self.disks):
1128 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1129 errors.ECODE_INVAL)
1130 for lv_name in all_lvs:
1131 try:
1132
1133
1134 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1135 except errors.ReservationError:
1136 raise errors.OpPrereqError("LV named %s used by another instance" %
1137 lv_name, errors.ECODE_NOTUNIQUE)
1138
1139 vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1140 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1141
1142 node_lvs = self.rpc.call_lv_list([pnode.uuid],
1143 vg_names.payload.keys())[pnode.uuid]
1144 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1145 node_lvs = node_lvs.payload
1146
1147 delta = all_lvs.difference(node_lvs.keys())
1148 if delta:
1149 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1150 utils.CommaJoin(delta),
1151 errors.ECODE_INVAL)
1152 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1153 if online_lvs:
1154 raise errors.OpPrereqError("Online logical volumes found, cannot"
1155 " adopt: %s" % utils.CommaJoin(online_lvs),
1156 errors.ECODE_STATE)
1157
1158 for dsk in self.disks:
1159 dsk[constants.IDISK_SIZE] = \
1160 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1161 dsk[constants.IDISK_ADOPT])][0]))
1162
1163 elif self.op.disk_template == constants.DT_BLOCK:
1164
1165 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1166 for disk in self.disks])
1167 if len(all_disks) != len(self.disks):
1168 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1169 errors.ECODE_INVAL)
1170 baddisks = [d for d in all_disks
1171 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1172 if baddisks:
1173 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1174 " cannot be adopted" %
1175 (utils.CommaJoin(baddisks),
1176 constants.ADOPTABLE_BLOCKDEV_ROOT),
1177 errors.ECODE_INVAL)
1178
1179 node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1180 list(all_disks))[pnode.uuid]
1181 node_disks.Raise("Cannot get block device information from node %s" %
1182 pnode.name)
1183 node_disks = node_disks.payload
1184 delta = all_disks.difference(node_disks.keys())
1185 if delta:
1186 raise errors.OpPrereqError("Missing block device(s): %s" %
1187 utils.CommaJoin(delta),
1188 errors.ECODE_INVAL)
1189 for dsk in self.disks:
1190 dsk[constants.IDISK_SIZE] = \
1191 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1192
1193
1194 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1195 ispec = {
1196 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1197 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1198 constants.ISPEC_DISK_COUNT: len(self.disks),
1199 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1200 for disk in self.disks],
1201 constants.ISPEC_NIC_COUNT: len(self.nics),
1202 constants.ISPEC_SPINDLE_USE: spindle_use,
1203 }
1204
1205 group_info = self.cfg.GetNodeGroup(pnode.group)
1206 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1207 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1208 self.op.disk_template)
1209 if not self.op.ignore_ipolicy and res:
1210 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1211 (pnode.group, group_info.name, utils.CommaJoin(res)))
1212 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1213
1214 CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1215
1216 CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1217
1218 CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1219
1220 CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1221
1222
1223
1224
1225
1226
1227 if self.op.start:
1228 hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1229 self.op.hvparams)
1230 CheckNodeFreeMemory(self, self.pnode.uuid,
1231 "creating instance %s" % self.op.instance_name,
1232 self.be_full[constants.BE_MAXMEM],
1233 self.op.hypervisor, hvfull)
1234
1235 self.dry_run_result = list(node_uuids)
1236
1237 - def Exec(self, feedback_fn):
1238 """Create and add the instance to the cluster.
1239
1240 """
1241 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1242 self.owned_locks(locking.LEVEL_NODE)), \
1243 "Node locks differ from node resource locks"
1244 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1245
1246 ht_kind = self.op.hypervisor
1247 if ht_kind in constants.HTS_REQ_PORT:
1248 network_port = self.cfg.AllocatePort()
1249 else:
1250 network_port = None
1251
1252 instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1253
1254
1255
1256
1257 nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1258 disks = GenerateDiskTemplate(self,
1259 self.op.disk_template,
1260 instance_uuid, self.pnode.uuid,
1261 self.secondaries,
1262 self.disks,
1263 self.instance_file_storage_dir,
1264 self.op.file_driver,
1265 0,
1266 feedback_fn,
1267 self.cfg.GetGroupDiskParams(nodegroup))
1268
1269 iobj = objects.Instance(name=self.op.instance_name,
1270 uuid=instance_uuid,
1271 os=self.op.os_type,
1272 primary_node=self.pnode.uuid,
1273 nics=self.nics, disks=disks,
1274 disk_template=self.op.disk_template,
1275 disks_active=False,
1276 admin_state=constants.ADMINST_DOWN,
1277 network_port=network_port,
1278 beparams=self.op.beparams,
1279 hvparams=self.op.hvparams,
1280 hypervisor=self.op.hypervisor,
1281 osparams=self.op.osparams,
1282 )
1283
1284 if self.op.tags:
1285 for tag in self.op.tags:
1286 iobj.AddTag(tag)
1287
1288 if self.adopt_disks:
1289 if self.op.disk_template == constants.DT_PLAIN:
1290
1291
1292 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1293 rename_to = []
1294 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1295 rename_to.append(t_dsk.logical_id)
1296 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1297 self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1298 result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1299 zip(tmp_disks, rename_to))
1300 result.Raise("Failed to rename adoped LVs")
1301 else:
1302 feedback_fn("* creating instance disks...")
1303 try:
1304 CreateDisks(self, iobj)
1305 except errors.OpExecError:
1306 self.LogWarning("Device creation failed")
1307 self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1308 raise
1309
1310 feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1311
1312 self.cfg.AddInstance(iobj, self.proc.GetECId())
1313
1314
1315
1316 del self.remove_locks[locking.LEVEL_INSTANCE]
1317
1318 if self.op.mode == constants.INSTANCE_IMPORT:
1319
1320 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1321 else:
1322
1323 ReleaseLocks(self, locking.LEVEL_NODE)
1324
1325 disk_abort = False
1326 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1327 feedback_fn("* wiping instance disks...")
1328 try:
1329 WipeDisks(self, iobj)
1330 except errors.OpExecError, err:
1331 logging.exception("Wiping disks failed")
1332 self.LogWarning("Wiping instance disks failed (%s)", err)
1333 disk_abort = True
1334
1335 if disk_abort:
1336
1337 pass
1338 elif self.op.wait_for_sync:
1339 disk_abort = not WaitForSync(self, iobj)
1340 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1341
1342 feedback_fn("* checking mirrors status")
1343 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1344 else:
1345 disk_abort = False
1346
1347 if disk_abort:
1348 RemoveDisks(self, iobj)
1349 self.cfg.RemoveInstance(iobj.uuid)
1350
1351 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1352 raise errors.OpExecError("There are some degraded disks for"
1353 " this instance")
1354
1355
1356 iobj.disks_active = True
1357
1358
1359 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1360
1361 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1362
1363
1364
1365 for disk in iobj.disks:
1366 self.cfg.SetDiskID(disk, self.pnode.uuid)
1367 if self.op.mode == constants.INSTANCE_CREATE:
1368 if not self.op.no_install:
1369 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1370 not self.op.wait_for_sync)
1371 if pause_sync:
1372 feedback_fn("* pausing disk sync to install instance OS")
1373 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1374 (iobj.disks,
1375 iobj), True)
1376 for idx, success in enumerate(result.payload):
1377 if not success:
1378 logging.warn("pause-sync of instance %s for disk %d failed",
1379 self.op.instance_name, idx)
1380
1381 feedback_fn("* running the instance OS create scripts...")
1382
1383 os_add_result = \
1384 self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1385 self.op.debug_level)
1386 if pause_sync:
1387 feedback_fn("* resuming disk sync")
1388 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1389 (iobj.disks,
1390 iobj), False)
1391 for idx, success in enumerate(result.payload):
1392 if not success:
1393 logging.warn("resume-sync of instance %s for disk %d failed",
1394 self.op.instance_name, idx)
1395
1396 os_add_result.Raise("Could not add os for instance %s"
1397 " on node %s" % (self.op.instance_name,
1398 self.pnode.name))
1399
1400 else:
1401 if self.op.mode == constants.INSTANCE_IMPORT:
1402 feedback_fn("* running the instance OS import scripts...")
1403
1404 transfers = []
1405
1406 for idx, image in enumerate(self.src_images):
1407 if not image:
1408 continue
1409
1410
1411 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1412 constants.IEIO_FILE, (image, ),
1413 constants.IEIO_SCRIPT,
1414 (iobj.disks[idx], idx),
1415 None)
1416 transfers.append(dt)
1417
1418 import_result = \
1419 masterd.instance.TransferInstanceData(self, feedback_fn,
1420 self.op.src_node_uuid,
1421 self.pnode.uuid,
1422 self.pnode.secondary_ip,
1423 iobj, transfers)
1424 if not compat.all(import_result):
1425 self.LogWarning("Some disks for instance %s on node %s were not"
1426 " imported successfully" % (self.op.instance_name,
1427 self.pnode.name))
1428
1429 rename_from = self._old_instance_name
1430
1431 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1432 feedback_fn("* preparing remote import...")
1433
1434
1435
1436
1437 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1438 self.op.source_shutdown_timeout)
1439 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1440
1441 assert iobj.primary_node == self.pnode.uuid
1442 disk_results = \
1443 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1444 self.source_x509_ca,
1445 self._cds, timeouts)
1446 if not compat.all(disk_results):
1447
1448
1449 self.LogWarning("Some disks for instance %s on node %s were not"
1450 " imported successfully" % (self.op.instance_name,
1451 self.pnode.name))
1452
1453 rename_from = self.source_instance_name
1454
1455 else:
1456
1457 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1458 % self.op.mode)
1459
1460
1461 assert iobj.name == self.op.instance_name
1462 feedback_fn("Running rename script for %s" % self.op.instance_name)
1463 result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1464 rename_from,
1465 self.op.debug_level)
1466 result.Warn("Failed to run rename script for %s on node %s" %
1467 (self.op.instance_name, self.pnode.name), self.LogWarning)
1468
1469 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1470
1471 if self.op.start:
1472 iobj.admin_state = constants.ADMINST_UP
1473 self.cfg.Update(iobj, feedback_fn)
1474 logging.info("Starting instance %s on node %s", self.op.instance_name,
1475 self.pnode.name)
1476 feedback_fn("* starting instance...")
1477 result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1478 False, self.op.reason)
1479 result.Raise("Could not start instance")
1480
1481 return self.cfg.GetNodeNames(list(iobj.all_nodes))
1482
1485 """Rename an instance.
1486
1487 """
1488 HPATH = "instance-rename"
1489 HTYPE = constants.HTYPE_INSTANCE
1490
1492 """Check arguments.
1493
1494 """
1495 if self.op.ip_check and not self.op.name_check:
1496
1497 raise errors.OpPrereqError("IP address check requires a name check",
1498 errors.ECODE_INVAL)
1499
1501 """Build hooks env.
1502
1503 This runs on master, primary and secondary nodes of the instance.
1504
1505 """
1506 env = BuildInstanceHookEnvByObject(self, self.instance)
1507 env["INSTANCE_NEW_NAME"] = self.op.new_name
1508 return env
1509
1516
1518 """Check prerequisites.
1519
1520 This checks that the instance is in the cluster and is not running.
1521
1522 """
1523 (self.op.instance_uuid, self.op.instance_name) = \
1524 ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1525 self.op.instance_name)
1526 instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1527 assert instance is not None
1528
1529
1530
1531
1532 if (instance.disk_template in constants.DTS_FILEBASED and
1533 self.op.new_name != instance.name):
1534 CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1535 instance.disk_template)
1536
1537 CheckNodeOnline(self, instance.primary_node)
1538 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1539 msg="cannot rename")
1540 self.instance = instance
1541
1542 new_name = self.op.new_name
1543 if self.op.name_check:
1544 hostname = _CheckHostnameSane(self, new_name)
1545 new_name = self.op.new_name = hostname.name
1546 if (self.op.ip_check and
1547 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1548 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1549 (hostname.ip, new_name),
1550 errors.ECODE_NOTUNIQUE)
1551
1552 instance_names = [inst.name for
1553 inst in self.cfg.GetAllInstancesInfo().values()]
1554 if new_name in instance_names and new_name != instance.name:
1555 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1556 new_name, errors.ECODE_EXISTS)
1557
1558 - def Exec(self, feedback_fn):
1559 """Rename the instance.
1560
1561 """
1562 old_name = self.instance.name
1563
1564 rename_file_storage = False
1565 if (self.instance.disk_template in constants.DTS_FILEBASED and
1566 self.op.new_name != self.instance.name):
1567 old_file_storage_dir = os.path.dirname(
1568 self.instance.disks[0].logical_id[1])
1569 rename_file_storage = True
1570
1571 self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1572
1573
1574 assert self.REQ_BGL
1575 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1576 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1577 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1578
1579
1580 renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1581
1582 if rename_file_storage:
1583 new_file_storage_dir = os.path.dirname(
1584 renamed_inst.disks[0].logical_id[1])
1585 result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1586 old_file_storage_dir,
1587 new_file_storage_dir)
1588 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1589 " (but the instance has been renamed in Ganeti)" %
1590 (self.cfg.GetNodeName(renamed_inst.primary_node),
1591 old_file_storage_dir, new_file_storage_dir))
1592
1593 StartInstanceDisks(self, renamed_inst, None)
1594
1595 info = GetInstanceInfoText(renamed_inst)
1596 for (idx, disk) in enumerate(renamed_inst.disks):
1597 for node_uuid in renamed_inst.all_nodes:
1598 self.cfg.SetDiskID(disk, node_uuid)
1599 result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1600 result.Warn("Error setting info on node %s for disk %s" %
1601 (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1602 try:
1603 result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1604 renamed_inst, old_name,
1605 self.op.debug_level)
1606 result.Warn("Could not run OS rename script for instance %s on node %s"
1607 " (but the instance has been renamed in Ganeti)" %
1608 (renamed_inst.name,
1609 self.cfg.GetNodeName(renamed_inst.primary_node)),
1610 self.LogWarning)
1611 finally:
1612 ShutdownInstanceDisks(self, renamed_inst)
1613
1614 return renamed_inst.name
1615
1618 """Remove an instance.
1619
1620 """
1621 HPATH = "instance-remove"
1622 HTYPE = constants.HTYPE_INSTANCE
1623 REQ_BGL = False
1624
1630
1638
1640 """Build hooks env.
1641
1642 This runs on master, primary and secondary nodes of the instance.
1643
1644 """
1645 env = BuildInstanceHookEnvByObject(self, self.instance)
1646 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1647 return env
1648
1650 """Build hooks nodes.
1651
1652 """
1653 nl = [self.cfg.GetMasterNode()]
1654 nl_post = list(self.instance.all_nodes) + nl
1655 return (nl, nl_post)
1656
1658 """Check prerequisites.
1659
1660 This checks that the instance is in the cluster.
1661
1662 """
1663 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1664 assert self.instance is not None, \
1665 "Cannot retrieve locked instance %s" % self.op.instance_name
1666
1667 - def Exec(self, feedback_fn):
1668 """Remove the instance.
1669
1670 """
1671 logging.info("Shutting down instance %s on node %s", self.instance.name,
1672 self.cfg.GetNodeName(self.instance.primary_node))
1673
1674 result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1675 self.instance,
1676 self.op.shutdown_timeout,
1677 self.op.reason)
1678 if self.op.ignore_failures:
1679 result.Warn("Warning: can't shutdown instance", feedback_fn)
1680 else:
1681 result.Raise("Could not shutdown instance %s on node %s" %
1682 (self.instance.name,
1683 self.cfg.GetNodeName(self.instance.primary_node)))
1684
1685 assert (self.owned_locks(locking.LEVEL_NODE) ==
1686 self.owned_locks(locking.LEVEL_NODE_RES))
1687 assert not (set(self.instance.all_nodes) -
1688 self.owned_locks(locking.LEVEL_NODE)), \
1689 "Not owning correct locks"
1690
1691 RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1692
1695 """Move an instance by data-copying.
1696
1697 """
1698 HPATH = "instance-move"
1699 HTYPE = constants.HTYPE_INSTANCE
1700 REQ_BGL = False
1701
1710
1718
1720 """Build hooks env.
1721
1722 This runs on master, primary and secondary nodes of the instance.
1723
1724 """
1725 env = {
1726 "TARGET_NODE": self.op.target_node,
1727 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1728 }
1729 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1730 return env
1731
1733 """Build hooks nodes.
1734
1735 """
1736 nl = [
1737 self.cfg.GetMasterNode(),
1738 self.instance.primary_node,
1739 self.op.target_node_uuid,
1740 ]
1741 return (nl, nl)
1742
1744 """Check prerequisites.
1745
1746 This checks that the instance is in the cluster.
1747
1748 """
1749 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1750 assert self.instance is not None, \
1751 "Cannot retrieve locked instance %s" % self.op.instance_name
1752
1753 if self.instance.disk_template not in constants.DTS_COPYABLE:
1754 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1755 self.instance.disk_template,
1756 errors.ECODE_STATE)
1757
1758 target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1759 assert target_node is not None, \
1760 "Cannot retrieve locked node %s" % self.op.target_node
1761
1762 self.target_node_uuid = target_node.uuid
1763 if target_node.uuid == self.instance.primary_node:
1764 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1765 (self.instance.name, target_node.name),
1766 errors.ECODE_STATE)
1767
1768 bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1769
1770 for idx, dsk in enumerate(self.instance.disks):
1771 if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1772 constants.DT_SHARED_FILE):
1773 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1774 " cannot copy" % idx, errors.ECODE_STATE)
1775
1776 CheckNodeOnline(self, target_node.uuid)
1777 CheckNodeNotDrained(self, target_node.uuid)
1778 CheckNodeVmCapable(self, target_node.uuid)
1779 cluster = self.cfg.GetClusterInfo()
1780 group_info = self.cfg.GetNodeGroup(target_node.group)
1781 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1782 CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1783 ignore=self.op.ignore_ipolicy)
1784
1785 if self.instance.admin_state == constants.ADMINST_UP:
1786
1787 CheckNodeFreeMemory(
1788 self, target_node.uuid, "failing over instance %s" %
1789 self.instance.name, bep[constants.BE_MAXMEM],
1790 self.instance.hypervisor,
1791 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1792 else:
1793 self.LogInfo("Not checking memory on the secondary node as"
1794 " instance will not be started")
1795
1796
1797 CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1798
1799 - def Exec(self, feedback_fn):
1800 """Move an instance.
1801
1802 The move is done by shutting it down on its present node, copying
1803 the data over (slow) and starting it on the new node.
1804
1805 """
1806 source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1807 target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1808
1809 self.LogInfo("Shutting down instance %s on source node %s",
1810 self.instance.name, source_node.name)
1811
1812 assert (self.owned_locks(locking.LEVEL_NODE) ==
1813 self.owned_locks(locking.LEVEL_NODE_RES))
1814
1815 result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1816 self.op.shutdown_timeout,
1817 self.op.reason)
1818 if self.op.ignore_consistency:
1819 result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1820 " anyway. Please make sure node %s is down. Error details" %
1821 (self.instance.name, source_node.name, source_node.name),
1822 self.LogWarning)
1823 else:
1824 result.Raise("Could not shutdown instance %s on node %s" %
1825 (self.instance.name, source_node.name))
1826
1827
1828 try:
1829 CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1830 except errors.OpExecError:
1831 self.LogWarning("Device creation failed")
1832 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1833 raise
1834
1835 cluster_name = self.cfg.GetClusterInfo().cluster_name
1836
1837 errs = []
1838
1839 for idx, disk in enumerate(self.instance.disks):
1840 self.LogInfo("Copying data for disk %d", idx)
1841 result = self.rpc.call_blockdev_assemble(
1842 target_node.uuid, (disk, self.instance), self.instance.name,
1843 True, idx)
1844 if result.fail_msg:
1845 self.LogWarning("Can't assemble newly created disk %d: %s",
1846 idx, result.fail_msg)
1847 errs.append(result.fail_msg)
1848 break
1849 dev_path = result.payload
1850 result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1851 self.instance),
1852 target_node.secondary_ip,
1853 dev_path, cluster_name)
1854 if result.fail_msg:
1855 self.LogWarning("Can't copy data over for disk %d: %s",
1856 idx, result.fail_msg)
1857 errs.append(result.fail_msg)
1858 break
1859
1860 if errs:
1861 self.LogWarning("Some disks failed to copy, aborting")
1862 try:
1863 RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1864 finally:
1865 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1866 raise errors.OpExecError("Errors during disk copy: %s" %
1867 (",".join(errs),))
1868
1869 self.instance.primary_node = target_node.uuid
1870 self.cfg.Update(self.instance, feedback_fn)
1871
1872 self.LogInfo("Removing the disks on the original node")
1873 RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1874
1875
1876 if self.instance.admin_state == constants.ADMINST_UP:
1877 self.LogInfo("Starting instance %s on node %s",
1878 self.instance.name, target_node.name)
1879
1880 disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1881 ignore_secondaries=True)
1882 if not disks_ok:
1883 ShutdownInstanceDisks(self, self.instance)
1884 raise errors.OpExecError("Can't activate the instance's disks")
1885
1886 result = self.rpc.call_instance_start(target_node.uuid,
1887 (self.instance, None, None), False,
1888 self.op.reason)
1889 msg = result.fail_msg
1890 if msg:
1891 ShutdownInstanceDisks(self, self.instance)
1892 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1893 (self.instance.name, target_node.name, msg))
1894
1897 """Allocates multiple instances at the same time.
1898
1899 """
1900 REQ_BGL = False
1901
1903 """Check arguments.
1904
1905 """
1906 nodes = []
1907 for inst in self.op.instances:
1908 if inst.iallocator is not None:
1909 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1910 " instance objects", errors.ECODE_INVAL)
1911 nodes.append(bool(inst.pnode))
1912 if inst.disk_template in constants.DTS_INT_MIRROR:
1913 nodes.append(bool(inst.snode))
1914
1915 has_nodes = compat.any(nodes)
1916 if compat.all(nodes) ^ has_nodes:
1917 raise errors.OpPrereqError("There are instance objects providing"
1918 " pnode/snode while others do not",
1919 errors.ECODE_INVAL)
1920
1921 if not has_nodes and self.op.iallocator is None:
1922 default_iallocator = self.cfg.GetDefaultIAllocator()
1923 if default_iallocator:
1924 self.op.iallocator = default_iallocator
1925 else:
1926 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1927 " given and no cluster-wide default"
1928 " iallocator found; please specify either"
1929 " an iallocator or nodes on the instances"
1930 " or set a cluster-wide default iallocator",
1931 errors.ECODE_INVAL)
1932
1933 _CheckOpportunisticLocking(self.op)
1934
1935 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1936 if dups:
1937 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1938 utils.CommaJoin(dups), errors.ECODE_INVAL)
1939
1941 """Calculate the locks.
1942
1943 """
1944 self.share_locks = ShareAll()
1945 self.needed_locks = {
1946
1947
1948 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1949 }
1950
1951 if self.op.iallocator:
1952 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1953 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1954
1955 if self.op.opportunistic_locking:
1956 self.opportunistic_locks[locking.LEVEL_NODE] = True
1957 else:
1958 nodeslist = []
1959 for inst in self.op.instances:
1960 (inst.pnode_uuid, inst.pnode) = \
1961 ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1962 nodeslist.append(inst.pnode_uuid)
1963 if inst.snode is not None:
1964 (inst.snode_uuid, inst.snode) = \
1965 ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1966 nodeslist.append(inst.snode_uuid)
1967
1968 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1969
1970
1971 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1972
1974 if level == locking.LEVEL_NODE_RES and \
1975 self.opportunistic_locks[locking.LEVEL_NODE]:
1976
1977
1978 self.needed_locks[locking.LEVEL_NODE_RES] = \
1979 self.owned_locks(locking.LEVEL_NODE)
1980
1982 """Check prerequisite.
1983
1984 """
1985 if self.op.iallocator:
1986 cluster = self.cfg.GetClusterInfo()
1987 default_vg = self.cfg.GetVGName()
1988 ec_id = self.proc.GetECId()
1989
1990 if self.op.opportunistic_locking:
1991
1992 node_whitelist = self.cfg.GetNodeNames(
1993 list(self.owned_locks(locking.LEVEL_NODE)))
1994 else:
1995 node_whitelist = None
1996
1997 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1998 _ComputeNics(op, cluster, None,
1999 self.cfg, ec_id),
2000 _ComputeFullBeParams(op, cluster),
2001 node_whitelist)
2002 for op in self.op.instances]
2003
2004 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2005 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2006
2007 ial.Run(self.op.iallocator)
2008
2009 if not ial.success:
2010 raise errors.OpPrereqError("Can't compute nodes using"
2011 " iallocator '%s': %s" %
2012 (self.op.iallocator, ial.info),
2013 errors.ECODE_NORES)
2014
2015 self.ia_result = ial.result
2016
2017 if self.op.dry_run:
2018 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2019 constants.JOB_IDS_KEY: [],
2020 })
2021
2037
2038 - def Exec(self, feedback_fn):
2039 """Executes the opcode.
2040
2041 """
2042 jobs = []
2043 if self.op.iallocator:
2044 op2inst = dict((op.instance_name, op) for op in self.op.instances)
2045 (allocatable, failed) = self.ia_result
2046
2047 for (name, node_names) in allocatable:
2048 op = op2inst.pop(name)
2049
2050 (op.pnode_uuid, op.pnode) = \
2051 ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2052 if len(node_names) > 1:
2053 (op.snode_uuid, op.snode) = \
2054 ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2055
2056 jobs.append([op])
2057
2058 missing = set(op2inst.keys()) - set(failed)
2059 assert not missing, \
2060 "Iallocator did return incomplete result: %s" % \
2061 utils.CommaJoin(missing)
2062 else:
2063 jobs.extend([op] for op in self.op.instances)
2064
2065 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2066
2069 """Data structure for network interface modifications.
2070
2071 Used by L{LUInstanceSetParams}.
2072
2073 """
2075 self.params = None
2076 self.filled = None
2077
2080 """Prepares a list of container modifications by adding a private data field.
2081
2082 @type mods: list of tuples; (operation, index, parameters)
2083 @param mods: List of modifications
2084 @type private_fn: callable or None
2085 @param private_fn: Callable for constructing a private data field for a
2086 modification
2087 @rtype: list
2088
2089 """
2090 if private_fn is None:
2091 fn = lambda: None
2092 else:
2093 fn = private_fn
2094
2095 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2096
2099 """Checks if nodes have enough physical CPUs
2100
2101 This function checks if all given nodes have the needed number of
2102 physical CPUs. In case any node has less CPUs or we cannot get the
2103 information from the node, this function raises an OpPrereqError
2104 exception.
2105
2106 @type lu: C{LogicalUnit}
2107 @param lu: a logical unit from which we get configuration data
2108 @type node_uuids: C{list}
2109 @param node_uuids: the list of node UUIDs to check
2110 @type requested: C{int}
2111 @param requested: the minimum acceptable number of physical CPUs
2112 @type hypervisor_specs: list of pairs (string, dict of strings)
2113 @param hypervisor_specs: list of hypervisor specifications in
2114 pairs (hypervisor_name, hvparams)
2115 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2116 or we cannot check the node
2117
2118 """
2119 nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2120 for node_uuid in node_uuids:
2121 info = nodeinfo[node_uuid]
2122 node_name = lu.cfg.GetNodeName(node_uuid)
2123 info.Raise("Cannot get current information from node %s" % node_name,
2124 prereq=True, ecode=errors.ECODE_ENVIRON)
2125 (_, _, (hv_info, )) = info.payload
2126 num_cpus = hv_info.get("cpu_total", None)
2127 if not isinstance(num_cpus, int):
2128 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2129 " on node %s, result was '%s'" %
2130 (node_name, num_cpus), errors.ECODE_ENVIRON)
2131 if requested > num_cpus:
2132 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2133 "required" % (node_name, num_cpus, requested),
2134 errors.ECODE_NORES)
2135
2138 """Return the item refered by the identifier.
2139
2140 @type identifier: string
2141 @param identifier: Item index or name or UUID
2142 @type kind: string
2143 @param kind: One-word item description
2144 @type container: list
2145 @param container: Container to get the item from
2146
2147 """
2148
2149 try:
2150 idx = int(identifier)
2151 if idx == -1:
2152
2153 absidx = len(container) - 1
2154 elif idx < 0:
2155 raise IndexError("Not accepting negative indices other than -1")
2156 elif idx > len(container):
2157 raise IndexError("Got %s index %s, but there are only %s" %
2158 (kind, idx, len(container)))
2159 else:
2160 absidx = idx
2161 return (absidx, container[idx])
2162 except ValueError:
2163 pass
2164
2165 for idx, item in enumerate(container):
2166 if item.uuid == identifier or item.name == identifier:
2167 return (idx, item)
2168
2169 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2170 (kind, identifier), errors.ECODE_NOENT)
2171
2172
2173 -def _ApplyContainerMods(kind, container, chgdesc, mods,
2174 create_fn, modify_fn, remove_fn):
2175 """Applies descriptions in C{mods} to C{container}.
2176
2177 @type kind: string
2178 @param kind: One-word item description
2179 @type container: list
2180 @param container: Container to modify
2181 @type chgdesc: None or list
2182 @param chgdesc: List of applied changes
2183 @type mods: list
2184 @param mods: Modifications as returned by L{_PrepareContainerMods}
2185 @type create_fn: callable
2186 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2187 receives absolute item index, parameters and private data object as added
2188 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2189 as list
2190 @type modify_fn: callable
2191 @param modify_fn: Callback for modifying an existing item
2192 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2193 and private data object as added by L{_PrepareContainerMods}, returns
2194 changes as list
2195 @type remove_fn: callable
2196 @param remove_fn: Callback on removing item; receives absolute item index,
2197 item and private data object as added by L{_PrepareContainerMods}
2198
2199 """
2200 for (op, identifier, params, private) in mods:
2201 changes = None
2202
2203 if op == constants.DDM_ADD:
2204
2205
2206 try:
2207 idx = int(identifier)
2208 except ValueError:
2209 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2210 " identifier for %s" % constants.DDM_ADD,
2211 errors.ECODE_INVAL)
2212 if idx == -1:
2213 addidx = len(container)
2214 else:
2215 if idx < 0:
2216 raise IndexError("Not accepting negative indices other than -1")
2217 elif idx > len(container):
2218 raise IndexError("Got %s index %s, but there are only %s" %
2219 (kind, idx, len(container)))
2220 addidx = idx
2221
2222 if create_fn is None:
2223 item = params
2224 else:
2225 (item, changes) = create_fn(addidx, params, private)
2226
2227 if idx == -1:
2228 container.append(item)
2229 else:
2230 assert idx >= 0
2231 assert idx <= len(container)
2232
2233 container.insert(idx, item)
2234 else:
2235
2236 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2237
2238 if op == constants.DDM_REMOVE:
2239 assert not params
2240
2241 if remove_fn is not None:
2242 remove_fn(absidx, item, private)
2243
2244 changes = [("%s/%s" % (kind, absidx), "remove")]
2245
2246 assert container[absidx] == item
2247 del container[absidx]
2248 elif op == constants.DDM_MODIFY:
2249 if modify_fn is not None:
2250 changes = modify_fn(absidx, item, params, private)
2251 else:
2252 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2253
2254 assert _TApplyContModsCbChanges(changes)
2255
2256 if not (chgdesc is None or changes is None):
2257 chgdesc.extend(changes)
2258
2261 """Updates the C{iv_name} attribute of disks.
2262
2263 @type disks: list of L{objects.Disk}
2264
2265 """
2266 for (idx, disk) in enumerate(disks):
2267 disk.iv_name = "disk/%s" % (base_index + idx, )
2268
2271 """Modifies an instances's parameters.
2272
2273 """
2274 HPATH = "instance-modify"
2275 HTYPE = constants.HTYPE_INSTANCE
2276 REQ_BGL = False
2277
2278 @staticmethod
2280 assert ht.TList(mods)
2281 assert not mods or len(mods[0]) in (2, 3)
2282
2283 if mods and len(mods[0]) == 2:
2284 result = []
2285
2286 addremove = 0
2287 for op, params in mods:
2288 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2289 result.append((op, -1, params))
2290 addremove += 1
2291
2292 if addremove > 1:
2293 raise errors.OpPrereqError("Only one %s add or remove operation is"
2294 " supported at a time" % kind,
2295 errors.ECODE_INVAL)
2296 else:
2297 result.append((constants.DDM_MODIFY, op, params))
2298
2299 assert verify_fn(result)
2300 else:
2301 result = mods
2302
2303 return result
2304
2305 @staticmethod
2327
2329 """Verifies a disk modification.
2330
2331 """
2332 if op == constants.DDM_ADD:
2333 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2334 if mode not in constants.DISK_ACCESS_SET:
2335 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2336 errors.ECODE_INVAL)
2337
2338 size = params.get(constants.IDISK_SIZE, None)
2339 if size is None:
2340 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2341 constants.IDISK_SIZE, errors.ECODE_INVAL)
2342
2343 try:
2344 size = int(size)
2345 except (TypeError, ValueError), err:
2346 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2347 errors.ECODE_INVAL)
2348
2349 params[constants.IDISK_SIZE] = size
2350 name = params.get(constants.IDISK_NAME, None)
2351 if name is not None and name.lower() == constants.VALUE_NONE:
2352 params[constants.IDISK_NAME] = None
2353
2354 CheckSpindlesExclusiveStorage(params, excl_stor, True)
2355
2356 elif op == constants.DDM_MODIFY:
2357 if constants.IDISK_SIZE in params:
2358 raise errors.OpPrereqError("Disk size change not possible, use"
2359 " grow-disk", errors.ECODE_INVAL)
2360
2361
2362
2363 if self.instance.disk_template != constants.DT_EXT:
2364 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2365
2366 name = params.get(constants.IDISK_NAME, None)
2367 if name is not None and name.lower() == constants.VALUE_NONE:
2368 params[constants.IDISK_NAME] = None
2369
2370 @staticmethod
2372 """Verifies a network interface modification.
2373
2374 """
2375 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2376 ip = params.get(constants.INIC_IP, None)
2377 name = params.get(constants.INIC_NAME, None)
2378 req_net = params.get(constants.INIC_NETWORK, None)
2379 link = params.get(constants.NIC_LINK, None)
2380 mode = params.get(constants.NIC_MODE, None)
2381 if name is not None and name.lower() == constants.VALUE_NONE:
2382 params[constants.INIC_NAME] = None
2383 if req_net is not None:
2384 if req_net.lower() == constants.VALUE_NONE:
2385 params[constants.INIC_NETWORK] = None
2386 req_net = None
2387 elif link is not None or mode is not None:
2388 raise errors.OpPrereqError("If network is given"
2389 " mode or link should not",
2390 errors.ECODE_INVAL)
2391
2392 if op == constants.DDM_ADD:
2393 macaddr = params.get(constants.INIC_MAC, None)
2394 if macaddr is None:
2395 params[constants.INIC_MAC] = constants.VALUE_AUTO
2396
2397 if ip is not None:
2398 if ip.lower() == constants.VALUE_NONE:
2399 params[constants.INIC_IP] = None
2400 else:
2401 if ip.lower() == constants.NIC_IP_POOL:
2402 if op == constants.DDM_ADD and req_net is None:
2403 raise errors.OpPrereqError("If ip=pool, parameter network"
2404 " cannot be none",
2405 errors.ECODE_INVAL)
2406 else:
2407 if not netutils.IPAddress.IsValid(ip):
2408 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2409 errors.ECODE_INVAL)
2410
2411 if constants.INIC_MAC in params:
2412 macaddr = params[constants.INIC_MAC]
2413 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2414 macaddr = utils.NormalizeAndValidateMac(macaddr)
2415
2416 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2417 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2418 " modifying an existing NIC",
2419 errors.ECODE_INVAL)
2420
2422 if not (self.op.nics or self.op.disks or self.op.disk_template or
2423 self.op.hvparams or self.op.beparams or self.op.os_name or
2424 self.op.osparams or self.op.offline is not None or
2425 self.op.runtime_mem or self.op.pnode):
2426 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2427
2428 if self.op.hvparams:
2429 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2430 "hypervisor", "instance", "cluster")
2431
2432 self.op.disks = self._UpgradeDiskNicMods(
2433 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2434 self.op.nics = self._UpgradeDiskNicMods(
2435 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2436
2437 if self.op.disks and self.op.disk_template is not None:
2438 raise errors.OpPrereqError("Disk template conversion and other disk"
2439 " changes not supported at the same time",
2440 errors.ECODE_INVAL)
2441
2442 if (self.op.disk_template and
2443 self.op.disk_template in constants.DTS_INT_MIRROR and
2444 self.op.remote_node is None):
2445 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2446 " one requires specifying a secondary node",
2447 errors.ECODE_INVAL)
2448
2449
2450 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2451 self._VerifyNicModification)
2452
2453 if self.op.pnode:
2454 (self.op.pnode_uuid, self.op.pnode) = \
2455 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2456
2467
2469 if level == locking.LEVEL_NODEGROUP:
2470 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2471
2472
2473 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2474 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2475 elif level == locking.LEVEL_NODE:
2476 self._LockInstancesNodes()
2477 if self.op.disk_template and self.op.remote_node:
2478 (self.op.remote_node_uuid, self.op.remote_node) = \
2479 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2480 self.op.remote_node)
2481 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2482 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2483
2484 self.needed_locks[locking.LEVEL_NODE_RES] = \
2485 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2486
2521
2528
2531
2532 update_params_dict = dict([(key, params[key])
2533 for key in constants.NICS_PARAMETERS
2534 if key in params])
2535
2536 req_link = update_params_dict.get(constants.NIC_LINK, None)
2537 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2538
2539 new_net_uuid = None
2540 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2541 if new_net_uuid_or_name:
2542 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2543 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2544
2545 if old_net_uuid:
2546 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2547
2548 if new_net_uuid:
2549 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2550 if not netparams:
2551 raise errors.OpPrereqError("No netparams found for the network"
2552 " %s, probably not connected" %
2553 new_net_obj.name, errors.ECODE_INVAL)
2554 new_params = dict(netparams)
2555 else:
2556 new_params = GetUpdatedParams(old_params, update_params_dict)
2557
2558 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2559
2560 new_filled_params = cluster.SimpleFillNIC(new_params)
2561 objects.NIC.CheckParameterSyntax(new_filled_params)
2562
2563 new_mode = new_filled_params[constants.NIC_MODE]
2564 if new_mode == constants.NIC_MODE_BRIDGED:
2565 bridge = new_filled_params[constants.NIC_LINK]
2566 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2567 if msg:
2568 msg = "Error checking bridges on node '%s': %s" % \
2569 (self.cfg.GetNodeName(pnode_uuid), msg)
2570 if self.op.force:
2571 self.warn.append(msg)
2572 else:
2573 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2574
2575 elif new_mode == constants.NIC_MODE_ROUTED:
2576 ip = params.get(constants.INIC_IP, old_ip)
2577 if ip is None:
2578 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2579 " on a routed NIC", errors.ECODE_INVAL)
2580
2581 elif new_mode == constants.NIC_MODE_OVS:
2582
2583 self.LogInfo("OVS links are currently not checked for correctness")
2584
2585 if constants.INIC_MAC in params:
2586 mac = params[constants.INIC_MAC]
2587 if mac is None:
2588 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2589 errors.ECODE_INVAL)
2590 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2591
2592 params[constants.INIC_MAC] = \
2593 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2594 else:
2595
2596 try:
2597 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2598 except errors.ReservationError:
2599 raise errors.OpPrereqError("MAC address '%s' already in use"
2600 " in cluster" % mac,
2601 errors.ECODE_NOTUNIQUE)
2602 elif new_net_uuid != old_net_uuid:
2603
2604 def get_net_prefix(net_uuid):
2605 mac_prefix = None
2606 if net_uuid:
2607 nobj = self.cfg.GetNetwork(net_uuid)
2608 mac_prefix = nobj.mac_prefix
2609
2610 return mac_prefix
2611
2612 new_prefix = get_net_prefix(new_net_uuid)
2613 old_prefix = get_net_prefix(old_net_uuid)
2614 if old_prefix != new_prefix:
2615 params[constants.INIC_MAC] = \
2616 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2617
2618
2619 new_ip = params.get(constants.INIC_IP, old_ip)
2620 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2621 if new_ip:
2622
2623 if new_ip.lower() == constants.NIC_IP_POOL:
2624 if new_net_uuid:
2625 try:
2626 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2627 except errors.ReservationError:
2628 raise errors.OpPrereqError("Unable to get a free IP"
2629 " from the address pool",
2630 errors.ECODE_STATE)
2631 self.LogInfo("Chose IP %s from network %s",
2632 new_ip,
2633 new_net_obj.name)
2634 params[constants.INIC_IP] = new_ip
2635 else:
2636 raise errors.OpPrereqError("ip=pool, but no network found",
2637 errors.ECODE_INVAL)
2638
2639 elif new_net_uuid:
2640 try:
2641 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId())
2642 self.LogInfo("Reserving IP %s in network %s",
2643 new_ip, new_net_obj.name)
2644 except errors.ReservationError:
2645 raise errors.OpPrereqError("IP %s not available in network %s" %
2646 (new_ip, new_net_obj.name),
2647 errors.ECODE_NOTUNIQUE)
2648
2649 elif self.op.conflicts_check:
2650 _CheckForConflictingIp(self, new_ip, pnode_uuid)
2651
2652
2653 if old_ip and old_net_uuid:
2654 try:
2655 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2656 except errors.AddressPoolError:
2657 logging.warning("Release IP %s not contained in network %s",
2658 old_ip, old_net_obj.name)
2659
2660
2661 elif (old_net_uuid is not None and
2662 (req_link is not None or req_mode is not None)):
2663 raise errors.OpPrereqError("Not allowed to change link or mode of"
2664 " a NIC that is connected to a network",
2665 errors.ECODE_INVAL)
2666
2667 private.params = new_params
2668 private.filled = new_filled_params
2669
2671 """CheckPrereq checks related to a new disk template."""
2672
2673 pnode_uuid = self.instance.primary_node
2674 if self.instance.disk_template == self.op.disk_template:
2675 raise errors.OpPrereqError("Instance already has disk template %s" %
2676 self.instance.disk_template,
2677 errors.ECODE_INVAL)
2678
2679 if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2680 raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2681 " cluster." % self.op.disk_template)
2682
2683 if (self.instance.disk_template,
2684 self.op.disk_template) not in self._DISK_CONVERSIONS:
2685 raise errors.OpPrereqError("Unsupported disk template conversion from"
2686 " %s to %s" % (self.instance.disk_template,
2687 self.op.disk_template),
2688 errors.ECODE_INVAL)
2689 CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2690 msg="cannot change disk template")
2691 if self.op.disk_template in constants.DTS_INT_MIRROR:
2692 if self.op.remote_node_uuid == pnode_uuid:
2693 raise errors.OpPrereqError("Given new secondary node %s is the same"
2694 " as the primary node of the instance" %
2695 self.op.remote_node, errors.ECODE_STATE)
2696 CheckNodeOnline(self, self.op.remote_node_uuid)
2697 CheckNodeNotDrained(self, self.op.remote_node_uuid)
2698
2699 assert self.instance.disk_template == constants.DT_PLAIN
2700 disks = [{constants.IDISK_SIZE: d.size,
2701 constants.IDISK_VG: d.logical_id[0]}
2702 for d in self.instance.disks]
2703 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2704 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2705
2706 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2707 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2708 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2709 snode_group)
2710 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2711 ignore=self.op.ignore_ipolicy)
2712 if pnode_info.group != snode_info.group:
2713 self.LogWarning("The primary and secondary nodes are in two"
2714 " different node groups; the disk parameters"
2715 " from the first disk's node group will be"
2716 " used")
2717
2718 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2719
2720 nodes = [pnode_info]
2721 if self.op.disk_template in constants.DTS_INT_MIRROR:
2722 assert snode_info
2723 nodes.append(snode_info)
2724 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2725 if compat.any(map(has_es, nodes)):
2726 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2727 " storage is enabled" % (self.instance.disk_template,
2728 self.op.disk_template))
2729 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2730
2732 """CheckPrereq checks related to disk changes.
2733
2734 @type ispec: dict
2735 @param ispec: instance specs to be updated with the new disks
2736
2737 """
2738 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2739
2740 excl_stor = compat.any(
2741 rpc.GetExclusiveStorageForNodes(self.cfg,
2742 self.instance.all_nodes).values()
2743 )
2744
2745
2746
2747 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2748 if self.instance.disk_template == constants.DT_EXT:
2749 self._CheckMods("disk", self.op.disks, {}, ver_fn)
2750 else:
2751 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2752 ver_fn)
2753
2754 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2755
2756
2757 if self.instance.disk_template in constants.DT_EXT:
2758 for mod in self.diskmod:
2759 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2760 if mod[0] == constants.DDM_ADD:
2761 if ext_provider is None:
2762 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2763 " '%s' missing, during disk add" %
2764 (constants.DT_EXT,
2765 constants.IDISK_PROVIDER),
2766 errors.ECODE_NOENT)
2767 elif mod[0] == constants.DDM_MODIFY:
2768 if ext_provider:
2769 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2770 " modification" %
2771 constants.IDISK_PROVIDER,
2772 errors.ECODE_INVAL)
2773 else:
2774 for mod in self.diskmod:
2775 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2776 if ext_provider is not None:
2777 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2778 " instances of type '%s'" %
2779 (constants.IDISK_PROVIDER,
2780 constants.DT_EXT),
2781 errors.ECODE_INVAL)
2782
2783 if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2784 raise errors.OpPrereqError("Disk operations not supported for"
2785 " diskless instances", errors.ECODE_INVAL)
2786
2787 def _PrepareDiskMod(_, disk, params, __):
2788 disk.name = params.get(constants.IDISK_NAME, None)
2789
2790
2791 disks = copy.deepcopy(self.instance.disks)
2792 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2793 _PrepareDiskMod, None)
2794 utils.ValidateDeviceNames("disk", disks)
2795 if len(disks) > constants.MAX_DISKS:
2796 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2797 " more" % constants.MAX_DISKS,
2798 errors.ECODE_STATE)
2799 disk_sizes = [disk.size for disk in self.instance.disks]
2800 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2801 self.diskmod if op == constants.DDM_ADD)
2802 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2803 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2804
2805 if self.op.offline is not None and self.op.offline:
2806 CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2807 msg="can't change to offline")
2808
2810 """Check prerequisites.
2811
2812 This only checks the instance list against the existing names.
2813
2814 """
2815 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2816 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2817 self.cluster = self.cfg.GetClusterInfo()
2818
2819 assert self.instance is not None, \
2820 "Cannot retrieve locked instance %s" % self.op.instance_name
2821
2822 pnode_uuid = self.instance.primary_node
2823
2824 self.warn = []
2825
2826 if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2827 not self.op.force):
2828
2829 instance_info = self.rpc.call_instance_info(
2830 pnode_uuid, self.instance.name, self.instance.hypervisor,
2831 self.instance.hvparams)
2832 if instance_info.fail_msg:
2833 self.warn.append("Can't get instance runtime information: %s" %
2834 instance_info.fail_msg)
2835 elif instance_info.payload:
2836 raise errors.OpPrereqError("Instance is still running on %s" %
2837 self.cfg.GetNodeName(pnode_uuid),
2838 errors.ECODE_STATE)
2839
2840 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2841 node_uuids = list(self.instance.all_nodes)
2842 pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2843
2844
2845 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2846 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2847
2848
2849 ispec = {}
2850
2851
2852 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2853
2854
2855 if self.op.os_name and not self.op.force:
2856 CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2857 self.op.force_variant)
2858 instance_os = self.op.os_name
2859 else:
2860 instance_os = self.instance.os
2861
2862 assert not (self.op.disk_template and self.op.disks), \
2863 "Can't modify disk template and apply disk changes at the same time"
2864
2865 if self.op.disk_template:
2866 self._PreCheckDiskTemplate(pnode_info)
2867
2868 self._PreCheckDisks(ispec)
2869
2870
2871 if self.op.hvparams:
2872 hv_type = self.instance.hypervisor
2873 i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2874 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2875 hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2876
2877
2878 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2879 CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2880 self.hv_proposed = self.hv_new = hv_new
2881 self.hv_inst = i_hvdict
2882 else:
2883 self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2884 self.instance.os,
2885 self.instance.hvparams)
2886 self.hv_new = self.hv_inst = {}
2887
2888
2889 if self.op.beparams:
2890 i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2891 use_none=True)
2892 objects.UpgradeBeParams(i_bedict)
2893 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2894 be_new = self.cluster.SimpleFillBE(i_bedict)
2895 self.be_proposed = self.be_new = be_new
2896 self.be_inst = i_bedict
2897 else:
2898 self.be_new = self.be_inst = {}
2899 self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2900 be_old = self.cluster.FillBE(self.instance)
2901
2902
2903
2904
2905 if (constants.BE_VCPUS in self.be_proposed and
2906 constants.HV_CPU_MASK in self.hv_proposed):
2907 cpu_list = \
2908 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2909
2910
2911
2912 if (len(cpu_list) > 1 and
2913 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2914 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2915 " CPU mask [%s]" %
2916 (self.be_proposed[constants.BE_VCPUS],
2917 self.hv_proposed[constants.HV_CPU_MASK]),
2918 errors.ECODE_INVAL)
2919
2920
2921 if constants.HV_CPU_MASK in self.hv_new:
2922
2923 max_requested_cpu = max(map(max, cpu_list))
2924
2925
2926 hvspecs = [(self.instance.hypervisor,
2927 self.cfg.GetClusterInfo()
2928 .hvparams[self.instance.hypervisor])]
2929 _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
2930 max_requested_cpu + 1,
2931 hvspecs)
2932
2933
2934 if self.op.osparams:
2935 i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
2936 CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
2937 self.os_inst = i_osdict
2938 else:
2939 self.os_inst = {}
2940
2941
2942 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
2943 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
2944 mem_check_list = [pnode_uuid]
2945 if be_new[constants.BE_AUTO_BALANCE]:
2946
2947 mem_check_list.extend(self.instance.secondary_nodes)
2948 instance_info = self.rpc.call_instance_info(
2949 pnode_uuid, self.instance.name, self.instance.hypervisor,
2950 self.instance.hvparams)
2951 hvspecs = [(self.instance.hypervisor,
2952 self.cluster.hvparams[self.instance.hypervisor])]
2953 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
2954 hvspecs)
2955 pninfo = nodeinfo[pnode_uuid]
2956 msg = pninfo.fail_msg
2957 if msg:
2958
2959 self.warn.append("Can't get info from primary node %s: %s" %
2960 (self.cfg.GetNodeName(pnode_uuid), msg))
2961 else:
2962 (_, _, (pnhvinfo, )) = pninfo.payload
2963 if not isinstance(pnhvinfo.get("memory_free", None), int):
2964 self.warn.append("Node data from primary node %s doesn't contain"
2965 " free memory information" %
2966 self.cfg.GetNodeName(pnode_uuid))
2967 elif instance_info.fail_msg:
2968 self.warn.append("Can't get instance runtime information: %s" %
2969 instance_info.fail_msg)
2970 else:
2971 if instance_info.payload:
2972 current_mem = int(instance_info.payload["memory"])
2973 else:
2974
2975
2976
2977
2978 current_mem = 0
2979
2980 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
2981 pnhvinfo["memory_free"])
2982 if miss_mem > 0:
2983 raise errors.OpPrereqError("This change will prevent the instance"
2984 " from starting, due to %d MB of memory"
2985 " missing on its primary node" %
2986 miss_mem, errors.ECODE_NORES)
2987
2988 if be_new[constants.BE_AUTO_BALANCE]:
2989 for node_uuid, nres in nodeinfo.items():
2990 if node_uuid not in self.instance.secondary_nodes:
2991 continue
2992 nres.Raise("Can't get info from secondary node %s" %
2993 self.cfg.GetNodeName(node_uuid), prereq=True,
2994 ecode=errors.ECODE_STATE)
2995 (_, _, (nhvinfo, )) = nres.payload
2996 if not isinstance(nhvinfo.get("memory_free", None), int):
2997 raise errors.OpPrereqError("Secondary node %s didn't return free"
2998 " memory information" %
2999 self.cfg.GetNodeName(node_uuid),
3000 errors.ECODE_STATE)
3001
3002 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3003 raise errors.OpPrereqError("This change will prevent the instance"
3004 " from failover to its secondary node"
3005 " %s, due to not enough memory" %
3006 self.cfg.GetNodeName(node_uuid),
3007 errors.ECODE_STATE)
3008
3009 if self.op.runtime_mem:
3010 remote_info = self.rpc.call_instance_info(
3011 self.instance.primary_node, self.instance.name,
3012 self.instance.hypervisor,
3013 self.cluster.hvparams[self.instance.hypervisor])
3014 remote_info.Raise("Error checking node %s" %
3015 self.cfg.GetNodeName(self.instance.primary_node))
3016 if not remote_info.payload:
3017 raise errors.OpPrereqError("Instance %s is not running" %
3018 self.instance.name, errors.ECODE_STATE)
3019
3020 current_memory = remote_info.payload["memory"]
3021 if (not self.op.force and
3022 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3023 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3024 raise errors.OpPrereqError("Instance %s must have memory between %d"
3025 " and %d MB of memory unless --force is"
3026 " given" %
3027 (self.instance.name,
3028 self.be_proposed[constants.BE_MINMEM],
3029 self.be_proposed[constants.BE_MAXMEM]),
3030 errors.ECODE_INVAL)
3031
3032 delta = self.op.runtime_mem - current_memory
3033 if delta > 0:
3034 CheckNodeFreeMemory(
3035 self, self.instance.primary_node,
3036 "ballooning memory for instance %s" % self.instance.name, delta,
3037 self.instance.hypervisor,
3038 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3039
3040
3041 cluster = self.cluster
3042
3043 def _PrepareNicCreate(_, params, private):
3044 self._PrepareNicModification(params, private, None, None,
3045 {}, cluster, pnode_uuid)
3046 return (None, None)
3047
3048 def _PrepareNicMod(_, nic, params, private):
3049 self._PrepareNicModification(params, private, nic.ip, nic.network,
3050 nic.nicparams, cluster, pnode_uuid)
3051 return None
3052
3053 def _PrepareNicRemove(_, params, __):
3054 ip = params.ip
3055 net = params.network
3056 if net is not None and ip is not None:
3057 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3058
3059
3060 nics = self.instance.nics[:]
3061 _ApplyContainerMods("NIC", nics, None, self.nicmod,
3062 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3063 if len(nics) > constants.MAX_NICS:
3064 raise errors.OpPrereqError("Instance has too many network interfaces"
3065 " (%d), cannot add more" % constants.MAX_NICS,
3066 errors.ECODE_STATE)
3067
3068
3069 self._nic_chgdesc = []
3070 if self.nicmod:
3071
3072 nics = [nic.Copy() for nic in self.instance.nics]
3073 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3074 self._CreateNewNic, self._ApplyNicMods, None)
3075
3076 utils.ValidateDeviceNames("NIC", nics)
3077 self._new_nics = nics
3078 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3079 else:
3080 self._new_nics = None
3081 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3082
3083 if not self.op.ignore_ipolicy:
3084 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3085 group_info)
3086
3087
3088 ispec[constants.ISPEC_SPINDLE_USE] = \
3089 self.be_new.get(constants.BE_SPINDLE_USE, None)
3090 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3091 None)
3092
3093
3094 if self.op.disk_template:
3095 new_disk_template = self.op.disk_template
3096 else:
3097 new_disk_template = self.instance.disk_template
3098 ispec_max = ispec.copy()
3099 ispec_max[constants.ISPEC_MEM_SIZE] = \
3100 self.be_new.get(constants.BE_MAXMEM, None)
3101 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3102 new_disk_template)
3103 ispec_min = ispec.copy()
3104 ispec_min[constants.ISPEC_MEM_SIZE] = \
3105 self.be_new.get(constants.BE_MINMEM, None)
3106 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3107 new_disk_template)
3108
3109 if (res_max or res_min):
3110
3111
3112 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3113 (group_info, group_info.name,
3114 utils.CommaJoin(set(res_max + res_min))))
3115 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3116
3118 """Converts an instance from plain to drbd.
3119
3120 """
3121 feedback_fn("Converting template to drbd")
3122 pnode_uuid = self.instance.primary_node
3123 snode_uuid = self.op.remote_node_uuid
3124
3125 assert self.instance.disk_template == constants.DT_PLAIN
3126
3127
3128 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3129 constants.IDISK_VG: d.logical_id[0],
3130 constants.IDISK_NAME: d.name}
3131 for d in self.instance.disks]
3132 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3133 self.instance.uuid, pnode_uuid,
3134 [snode_uuid], disk_info, None, None, 0,
3135 feedback_fn, self.diskparams)
3136 anno_disks = rpc.AnnotateDiskParams(constants.DT_DRBD8, new_disks,
3137 self.diskparams)
3138 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3139 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3140 info = GetInstanceInfoText(self.instance)
3141 feedback_fn("Creating additional volumes...")
3142
3143 for disk in anno_disks:
3144
3145 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3146 info, True, p_excl_stor)
3147 for child in disk.children:
3148 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3149 s_excl_stor)
3150
3151
3152 feedback_fn("Renaming original volumes...")
3153 rename_list = [(o, n.children[0].logical_id)
3154 for (o, n) in zip(self.instance.disks, new_disks)]
3155 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3156 result.Raise("Failed to rename original LVs")
3157
3158 feedback_fn("Initializing DRBD devices...")
3159
3160 try:
3161 for disk in anno_disks:
3162 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3163 (snode_uuid, s_excl_stor)]:
3164 f_create = node_uuid == pnode_uuid
3165 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3166 f_create, excl_stor)
3167 except errors.GenericError, e:
3168 feedback_fn("Initializing of DRBD devices failed;"
3169 " renaming back original volumes...")
3170 for disk in new_disks:
3171 self.cfg.SetDiskID(disk, pnode_uuid)
3172 rename_back_list = [(n.children[0], o.logical_id)
3173 for (n, o) in zip(new_disks, self.instance.disks)]
3174 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3175 result.Raise("Failed to rename LVs back after error %s" % str(e))
3176 raise
3177
3178
3179 self.instance.disk_template = constants.DT_DRBD8
3180 self.instance.disks = new_disks
3181 self.cfg.Update(self.instance, feedback_fn)
3182
3183
3184 ReleaseLocks(self, locking.LEVEL_NODE)
3185
3186
3187 disk_abort = not WaitForSync(self, self.instance,
3188 oneshot=not self.op.wait_for_sync)
3189 if disk_abort:
3190 raise errors.OpExecError("There are some degraded disks for"
3191 " this instance, please cleanup manually")
3192
3193
3194
3196 """Converts an instance from drbd to plain.
3197
3198 """
3199 assert len(self.instance.secondary_nodes) == 1
3200 assert self.instance.disk_template == constants.DT_DRBD8
3201
3202 pnode_uuid = self.instance.primary_node
3203 snode_uuid = self.instance.secondary_nodes[0]
3204 feedback_fn("Converting template to plain")
3205
3206 old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3207 new_disks = [d.children[0] for d in self.instance.disks]
3208
3209
3210 for parent, child in zip(old_disks, new_disks):
3211 child.size = parent.size
3212 child.mode = parent.mode
3213 child.name = parent.name
3214
3215
3216
3217 for disk in old_disks:
3218 tcp_port = disk.logical_id[2]
3219 self.cfg.AddTcpUdpPort(tcp_port)
3220
3221
3222 self.instance.disks = new_disks
3223 self.instance.disk_template = constants.DT_PLAIN
3224 _UpdateIvNames(0, self.instance.disks)
3225 self.cfg.Update(self.instance, feedback_fn)
3226
3227
3228 ReleaseLocks(self, locking.LEVEL_NODE)
3229
3230 feedback_fn("Removing volumes on the secondary node...")
3231 for disk in old_disks:
3232 self.cfg.SetDiskID(disk, snode_uuid)
3233 msg = self.rpc.call_blockdev_remove(snode_uuid, disk).fail_msg
3234 if msg:
3235 self.LogWarning("Could not remove block device %s on node %s,"
3236 " continuing anyway: %s", disk.iv_name,
3237 self.cfg.GetNodeName(snode_uuid), msg)
3238
3239 feedback_fn("Removing unneeded volumes on the primary node...")
3240 for idx, disk in enumerate(old_disks):
3241 meta = disk.children[1]
3242 self.cfg.SetDiskID(meta, pnode_uuid)
3243 msg = self.rpc.call_blockdev_remove(pnode_uuid, meta).fail_msg
3244 if msg:
3245 self.LogWarning("Could not remove metadata for disk %d on node %s,"
3246 " continuing anyway: %s", idx,
3247 self.cfg.GetNodeName(pnode_uuid), msg)
3248
3250 """Creates a new disk.
3251
3252 """
3253
3254 if self.instance.disk_template in constants.DTS_FILEBASED:
3255 (file_driver, file_path) = self.instance.disks[0].logical_id
3256 file_path = os.path.dirname(file_path)
3257 else:
3258 file_driver = file_path = None
3259
3260 disk = \
3261 GenerateDiskTemplate(self, self.instance.disk_template,
3262 self.instance.uuid, self.instance.primary_node,
3263 self.instance.secondary_nodes, [params], file_path,
3264 file_driver, idx, self.Log, self.diskparams)[0]
3265
3266 new_disks = CreateDisks(self, self.instance, disks=[disk])
3267
3268 if self.cluster.prealloc_wipe_disks:
3269
3270 WipeOrCleanupDisks(self, self.instance,
3271 disks=[(idx, disk, 0)],
3272 cleanup=new_disks)
3273
3274 return (disk, [
3275 ("disk/%d" % idx, "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3276 ])
3277
3306
3308 """Removes a disk.
3309
3310 """
3311 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3312 for node_uuid, disk in anno_disk.ComputeNodeTree(
3313 self.instance.primary_node):
3314 self.cfg.SetDiskID(disk, node_uuid)
3315 msg = self.rpc.call_blockdev_remove(node_uuid, disk).fail_msg
3316 if msg:
3317 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3318 " continuing anyway", idx,
3319 self.cfg.GetNodeName(node_uuid), msg)
3320
3321
3322 if root.dev_type in constants.DTS_DRBD:
3323 self.cfg.AddTcpUdpPort(root.logical_id[2])
3324
3326 """Creates data structure for a new network interface.
3327
3328 """
3329 mac = params[constants.INIC_MAC]
3330 ip = params.get(constants.INIC_IP, None)
3331 net = params.get(constants.INIC_NETWORK, None)
3332 name = params.get(constants.INIC_NAME, None)
3333 net_uuid = self.cfg.LookupNetwork(net)
3334
3335 nicparams = private.filled
3336 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3337 nicparams=nicparams)
3338 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3339
3340 return (nobj, [
3341 ("nic.%d" % idx,
3342 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3343 (mac, ip, private.filled[constants.NIC_MODE],
3344 private.filled[constants.NIC_LINK],
3345 net)),
3346 ])
3347
3349 """Modifies a network interface.
3350
3351 """
3352 changes = []
3353
3354 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3355 if key in params:
3356 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3357 setattr(nic, key, params[key])
3358
3359 new_net = params.get(constants.INIC_NETWORK, nic.network)
3360 new_net_uuid = self.cfg.LookupNetwork(new_net)
3361 if new_net_uuid != nic.network:
3362 changes.append(("nic.network/%d" % idx, new_net))
3363 nic.network = new_net_uuid
3364
3365 if private.filled:
3366 nic.nicparams = private.filled
3367
3368 for (key, val) in nic.nicparams.items():
3369 changes.append(("nic.%s/%d" % (key, idx), val))
3370
3371 return changes
3372
3373 - def Exec(self, feedback_fn):
3374 """Modifies an instance.
3375
3376 All parameters take effect only at the next restart of the instance.
3377
3378 """
3379
3380
3381
3382 for warn in self.warn:
3383 feedback_fn("WARNING: %s" % warn)
3384
3385 assert ((self.op.disk_template is None) ^
3386 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3387 "Not owning any node resource locks"
3388
3389 result = []
3390
3391
3392 if self.op.pnode_uuid:
3393 self.instance.primary_node = self.op.pnode_uuid
3394
3395
3396 if self.op.runtime_mem:
3397 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3398 self.instance,
3399 self.op.runtime_mem)
3400 rpcres.Raise("Cannot modify instance runtime memory")
3401 result.append(("runtime_memory", self.op.runtime_mem))
3402
3403
3404 _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3405 self._CreateNewDisk, self._ModifyDisk,
3406 self._RemoveDisk)
3407 _UpdateIvNames(0, self.instance.disks)
3408
3409 if self.op.disk_template:
3410 if __debug__:
3411 check_nodes = set(self.instance.all_nodes)
3412 if self.op.remote_node_uuid:
3413 check_nodes.add(self.op.remote_node_uuid)
3414 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3415 owned = self.owned_locks(level)
3416 assert not (check_nodes - owned), \
3417 ("Not owning the correct locks, owning %r, expected at least %r" %
3418 (owned, check_nodes))
3419
3420 r_shut = ShutdownInstanceDisks(self, self.instance)
3421 if not r_shut:
3422 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3423 " proceed with disk template conversion")
3424 mode = (self.instance.disk_template, self.op.disk_template)
3425 try:
3426 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3427 except:
3428 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3429 raise
3430 result.append(("disk_template", self.op.disk_template))
3431
3432 assert self.instance.disk_template == self.op.disk_template, \
3433 ("Expected disk template '%s', found '%s'" %
3434 (self.op.disk_template, self.instance.disk_template))
3435
3436
3437
3438 ReleaseLocks(self, locking.LEVEL_NODE)
3439 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3440
3441
3442 if self._new_nics is not None:
3443 self.instance.nics = self._new_nics
3444 result.extend(self._nic_chgdesc)
3445
3446
3447 if self.op.hvparams:
3448 self.instance.hvparams = self.hv_inst
3449 for key, val in self.op.hvparams.iteritems():
3450 result.append(("hv/%s" % key, val))
3451
3452
3453 if self.op.beparams:
3454 self.instance.beparams = self.be_inst
3455 for key, val in self.op.beparams.iteritems():
3456 result.append(("be/%s" % key, val))
3457
3458
3459 if self.op.os_name:
3460 self.instance.os = self.op.os_name
3461
3462
3463 if self.op.osparams:
3464 self.instance.osparams = self.os_inst
3465 for key, val in self.op.osparams.iteritems():
3466 result.append(("os/%s" % key, val))
3467
3468 if self.op.offline is None:
3469
3470 pass
3471 elif self.op.offline:
3472
3473 self.cfg.MarkInstanceOffline(self.instance.uuid)
3474 result.append(("admin_state", constants.ADMINST_OFFLINE))
3475 else:
3476
3477 self.cfg.MarkInstanceDown(self.instance.uuid)
3478 result.append(("admin_state", constants.ADMINST_DOWN))
3479
3480 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3481
3482 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3483 self.owned_locks(locking.LEVEL_NODE)), \
3484 "All node locks should have been released by now"
3485
3486 return result
3487
3488 _DISK_CONVERSIONS = {
3489 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3490 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3491 }
3492
3495 HPATH = "instance-change-group"
3496 HTYPE = constants.HTYPE_INSTANCE
3497 REQ_BGL = False
3498
3517
3519 if level == locking.LEVEL_NODEGROUP:
3520 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3521
3522 if self.req_target_uuids:
3523 lock_groups = set(self.req_target_uuids)
3524
3525
3526
3527 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3528 lock_groups.update(instance_groups)
3529 else:
3530
3531 lock_groups = locking.ALL_SET
3532
3533 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3534
3535 elif level == locking.LEVEL_NODE:
3536 if self.req_target_uuids:
3537
3538 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3539 self._LockInstancesNodes()
3540
3541
3542 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3543 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3544 member_nodes = [node_uuid
3545 for group in lock_groups
3546 for node_uuid in self.cfg.GetNodeGroup(group).members]
3547 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3548 else:
3549
3550 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3551
3553 owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3554 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3555 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3556
3557 assert (self.req_target_uuids is None or
3558 owned_groups.issuperset(self.req_target_uuids))
3559 assert owned_instance_names == set([self.op.instance_name])
3560
3561
3562 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3563
3564
3565 assert owned_nodes.issuperset(self.instance.all_nodes), \
3566 ("Instance %s's nodes changed while we kept the lock" %
3567 self.op.instance_name)
3568
3569 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3570 owned_groups)
3571
3572 if self.req_target_uuids:
3573
3574 self.target_uuids = frozenset(self.req_target_uuids)
3575 else:
3576
3577 self.target_uuids = owned_groups - inst_groups
3578
3579 conflicting_groups = self.target_uuids & inst_groups
3580 if conflicting_groups:
3581 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3582 " used by the instance '%s'" %
3583 (utils.CommaJoin(conflicting_groups),
3584 self.op.instance_name),
3585 errors.ECODE_INVAL)
3586
3587 if not self.target_uuids:
3588 raise errors.OpPrereqError("There are no possible target groups",
3589 errors.ECODE_INVAL)
3590
3592 """Build hooks env.
3593
3594 """
3595 assert self.target_uuids
3596
3597 env = {
3598 "TARGET_GROUPS": " ".join(self.target_uuids),
3599 }
3600
3601 env.update(BuildInstanceHookEnvByObject(self, self.instance))
3602
3603 return env
3604
3606 """Build hooks nodes.
3607
3608 """
3609 mn = self.cfg.GetMasterNode()
3610 return ([mn], [mn])
3611
3612 - def Exec(self, feedback_fn):
3613 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3614
3615 assert instances == [self.op.instance_name], "Instance not locked"
3616
3617 req = iallocator.IAReqGroupChange(instances=instances,
3618 target_groups=list(self.target_uuids))
3619 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3620
3621 ial.Run(self.op.iallocator)
3622
3623 if not ial.success:
3624 raise errors.OpPrereqError("Can't compute solution for changing group of"
3625 " instance '%s' using iallocator '%s': %s" %
3626 (self.op.instance_name, self.op.iallocator,
3627 ial.info), errors.ECODE_NORES)
3628
3629 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3630
3631 self.LogInfo("Iallocator returned %s job(s) for changing group of"
3632 " instance '%s'", len(jobs), self.op.instance_name)
3633
3634 return ResultWithJobs(jobs)
3635