1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Logical units dealing with instances."""
32
33 import OpenSSL
34 import copy
35 import logging
36 import os
37
38 from ganeti import compat
39 from ganeti import constants
40 from ganeti import errors
41 from ganeti import ht
42 from ganeti import hypervisor
43 from ganeti import locking
44 from ganeti.masterd import iallocator
45 from ganeti import masterd
46 from ganeti import netutils
47 from ganeti import objects
48 from ganeti import pathutils
49 from ganeti import serializer
50 import ganeti.rpc.node as rpc
51 from ganeti import utils
52 from ganeti.utils import retry
53
54 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
55
56 from ganeti.cmdlib.common import INSTANCE_DOWN, \
57 INSTANCE_NOT_RUNNING, CheckNodeOnline, \
58 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
59 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
60 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, CheckOSImage, \
61 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
62 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
63 CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination, \
64 DetermineImageSize, IsInstanceRunning
65 from ganeti.cmdlib.instance_storage import CreateDisks, \
66 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, ImageDisks, \
67 WaitForSync, IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, \
68 ComputeDisks, CheckRADOSFreeSpace, ComputeDiskSizePerVG, \
69 GenerateDiskTemplate, StartInstanceDisks, ShutdownInstanceDisks, \
70 AssembleInstanceDisks, CheckSpindlesExclusiveStorage, TemporaryDisk
71 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
72 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
73 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
74 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
75 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
76 CheckInstanceBridgesExist, CheckNicsBridgesExist, UpdateMetadata, \
77 CheckCompressionTool, CheckInstanceExistence
78 import ganeti.masterd.instance
79
80
81
82
83 _TApplyContModsCbChanges = \
84 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
85 ht.TNonEmptyString,
86 ht.TAny,
87 ])))
91 """Ensures that a given hostname resolves to a 'sane' name.
92
93 The given name is required to be a prefix of the resolved hostname,
94 to prevent accidental mismatches.
95
96 @param lu: the logical unit on behalf of which we're checking
97 @param name: the name we should resolve and check
98 @return: the resolved hostname object
99
100 """
101 hostname = netutils.GetHostname(name=name)
102 if hostname.name != name:
103 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
104 if not utils.MatchNameComponent(name, [hostname.name]):
105 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
106 " same as given hostname '%s'") %
107 (hostname.name, name), errors.ECODE_INVAL)
108 return hostname
109
112 """Generate error if opportunistic locking is not possible.
113
114 """
115 if op.opportunistic_locking and not op.iallocator:
116 raise errors.OpPrereqError("Opportunistic locking is only available in"
117 " combination with an instance allocator",
118 errors.ECODE_INVAL)
119
122 """Wrapper around IAReqInstanceAlloc.
123
124 @param op: The instance opcode
125 @param disks: The computed disks
126 @param nics: The computed nics
127 @param beparams: The full filled beparams
128 @param node_name_whitelist: List of nodes which should appear as online to the
129 allocator (unless the node is already marked offline)
130
131 @returns: A filled L{iallocator.IAReqInstanceAlloc}
132
133 """
134 spindle_use = beparams[constants.BE_SPINDLE_USE]
135 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
136 disk_template=op.disk_template,
137 tags=op.tags,
138 os=op.os_type,
139 vcpus=beparams[constants.BE_VCPUS],
140 memory=beparams[constants.BE_MAXMEM],
141 spindle_use=spindle_use,
142 disks=disks,
143 nics=[n.ToDict() for n in nics],
144 hypervisor=op.hypervisor,
145 node_whitelist=node_name_whitelist)
146
164
167 """Computes the nics.
168
169 @param op: The instance opcode
170 @param cluster: Cluster configuration object
171 @param default_ip: The default ip to assign
172 @param cfg: An instance of the configuration object
173 @param ec_id: Execution context ID
174
175 @returns: The build up nics
176
177 """
178 nics = []
179 for nic in op.nics:
180 nic_mode_req = nic.get(constants.INIC_MODE, None)
181 nic_mode = nic_mode_req
182 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
183 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
184
185 net = nic.get(constants.INIC_NETWORK, None)
186 link = nic.get(constants.NIC_LINK, None)
187 ip = nic.get(constants.INIC_IP, None)
188 vlan = nic.get(constants.INIC_VLAN, None)
189
190 if net is None or net.lower() == constants.VALUE_NONE:
191 net = None
192 else:
193 if nic_mode_req is not None or link is not None:
194 raise errors.OpPrereqError("If network is given, no mode or link"
195 " is allowed to be passed",
196 errors.ECODE_INVAL)
197
198
199 if ip is None or ip.lower() == constants.VALUE_NONE:
200 nic_ip = None
201 elif ip.lower() == constants.VALUE_AUTO:
202 if not op.name_check:
203 raise errors.OpPrereqError("IP address set to auto but name checks"
204 " have been skipped",
205 errors.ECODE_INVAL)
206 nic_ip = default_ip
207 else:
208
209
210 if ip.lower() == constants.NIC_IP_POOL:
211 if net is None:
212 raise errors.OpPrereqError("if ip=pool, parameter network"
213 " must be passed too",
214 errors.ECODE_INVAL)
215
216 elif not netutils.IPAddress.IsValid(ip):
217 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
218 errors.ECODE_INVAL)
219
220 nic_ip = ip
221
222
223 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
224 raise errors.OpPrereqError("Routed nic mode requires an ip address",
225 errors.ECODE_INVAL)
226
227
228 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
229 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
230 mac = utils.NormalizeAndValidateMac(mac)
231
232 try:
233
234 cfg.ReserveMAC(mac, ec_id)
235 except errors.ReservationError:
236 raise errors.OpPrereqError("MAC address %s already in use"
237 " in cluster" % mac,
238 errors.ECODE_NOTUNIQUE)
239
240
241 nicparams = {}
242 if nic_mode_req:
243 nicparams[constants.NIC_MODE] = nic_mode
244 if link:
245 nicparams[constants.NIC_LINK] = link
246 if vlan:
247 nicparams[constants.NIC_VLAN] = vlan
248
249 check_params = cluster.SimpleFillNIC(nicparams)
250 objects.NIC.CheckParameterSyntax(check_params)
251 net_uuid = cfg.LookupNetwork(net)
252 name = nic.get(constants.INIC_NAME, None)
253 if name is not None and name.lower() == constants.VALUE_NONE:
254 name = None
255 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
256 network=net_uuid, nicparams=nicparams)
257 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
258 nics.append(nic_obj)
259
260 return nics
261
264 """In case of conflicting IP address raise error.
265
266 @type ip: string
267 @param ip: IP address
268 @type node_uuid: string
269 @param node_uuid: node UUID
270
271 """
272 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
273 if conf_net is not None:
274 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
275 " network %s, but the target NIC does not." %
276 (ip, conf_net)),
277 errors.ECODE_STATE)
278
279 return (None, None)
280
285 """Compute if instance specs meets the specs of ipolicy.
286
287 @type ipolicy: dict
288 @param ipolicy: The ipolicy to verify against
289 @param instance_spec: dict
290 @param instance_spec: The instance spec to verify
291 @type disk_template: string
292 @param disk_template: the disk template of the instance
293 @param _compute_fn: The function to verify ipolicy (unittest only)
294 @see: L{ComputeIPolicySpecViolation}
295
296 """
297 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
298 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
299 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
300 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
301 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
302 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
303
304 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
305 disk_sizes, spindle_use, disk_template)
306
309 """Compute the name of the instance NIC used by instance
310 communication.
311
312 With instance communication, a new NIC is added to the instance.
313 This NIC has a special name that identities it as being part of
314 instance communication, and not just a normal NIC. This function
315 generates the name of the NIC based on a prefix and the instance
316 name
317
318 @type instance_name: string
319 @param instance_name: name of the instance the NIC belongs to
320
321 @rtype: string
322 @return: name of the NIC
323
324 """
325 return constants.INSTANCE_COMMUNICATION_NIC_PREFIX + instance_name
326
329 """Create an instance.
330
331 """
332 HPATH = "instance-add"
333 HTYPE = constants.HTYPE_INSTANCE
334 REQ_BGL = False
335
349
391
393 """ Check validity of VLANs if given
394
395 """
396 for nic in self.op.nics:
397 vlan = nic.get(constants.INIC_VLAN, None)
398 if vlan:
399 if vlan[0] == ".":
400
401
402 if not vlan[1:].isdigit():
403 vlanlist = vlan[1:].split(':')
404 for vl in vlanlist:
405 if not vl.isdigit():
406 raise errors.OpPrereqError("Specified VLAN parameter is "
407 "invalid : %s" % vlan,
408 errors.ECODE_INVAL)
409 elif vlan[0] == ":":
410
411 vlanlist = vlan[1:].split(':')
412 for vl in vlanlist:
413 if not vl.isdigit():
414 raise errors.OpPrereqError("Specified VLAN parameter is invalid"
415 " : %s" % vlan, errors.ECODE_INVAL)
416 elif vlan.isdigit():
417
418
419 nic[constants.INIC_VLAN] = "." + vlan
420 else:
421 raise errors.OpPrereqError("Specified VLAN parameter is invalid"
422 " : %s" % vlan, errors.ECODE_INVAL)
423
425 """Check arguments.
426
427 """
428
429
430 if self.op.no_install and self.op.start:
431 self.LogInfo("No-installation mode selected, disabling startup")
432 self.op.start = False
433
434 self.op.instance_name = \
435 netutils.Hostname.GetNormalizedName(self.op.instance_name)
436
437 if self.op.ip_check and not self.op.name_check:
438
439 raise errors.OpPrereqError("Cannot do IP address check without a name"
440 " check", errors.ECODE_INVAL)
441
442
443 if self.op.name_check:
444 self.hostname = _CheckHostnameSane(self, self.op.instance_name)
445 self.op.instance_name = self.hostname.name
446
447 self.check_ip = self.hostname.ip
448 else:
449 self.check_ip = None
450
451
452 if self.op.instance_communication:
453 nic_name = _ComputeInstanceCommunicationNIC(self.op.instance_name)
454
455 for nic in self.op.nics:
456 if nic.get(constants.INIC_NAME, None) == nic_name:
457 break
458 else:
459 self.op.nics.append({constants.INIC_NAME: nic_name,
460 constants.INIC_MAC: constants.VALUE_GENERATE,
461 constants.INIC_IP: constants.NIC_IP_POOL,
462 constants.INIC_NETWORK:
463 self.cfg.GetInstanceCommunicationNetwork()})
464
465
466 if self.op.helper_startup_timeout is None:
467 self.op.helper_startup_timeout = constants.HELPER_VM_STARTUP
468
469 if self.op.helper_shutdown_timeout is None:
470 self.op.helper_shutdown_timeout = constants.HELPER_VM_SHUTDOWN
471
472
473 for nic in self.op.nics:
474 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
475
476 utils.ValidateDeviceNames("NIC", self.op.nics)
477
478 self._CheckVLANArguments()
479
480 self._CheckDiskArguments()
481 assert self.op.disk_template is not None
482
483
484 if (self.op.file_driver and
485 not self.op.file_driver in constants.FILE_DRIVER):
486 raise errors.OpPrereqError("Invalid file driver name '%s'" %
487 self.op.file_driver, errors.ECODE_INVAL)
488
489
490 if (not self.op.file_driver and
491 self.op.disk_template in constants.DTS_FILEBASED):
492 self.op.file_driver = constants.FD_DEFAULT
493
494
495 CheckIAllocatorOrNode(self, "iallocator", "pnode")
496
497 if self.op.pnode is not None:
498 if self.op.disk_template in constants.DTS_INT_MIRROR:
499 if self.op.snode is None:
500 raise errors.OpPrereqError("The networked disk templates need"
501 " a mirror node", errors.ECODE_INVAL)
502 elif self.op.snode:
503 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
504 " template")
505 self.op.snode = None
506
507 _CheckOpportunisticLocking(self.op)
508
509 if self.op.mode == constants.INSTANCE_IMPORT:
510
511
512
513 self.op.force_variant = True
514
515 if self.op.no_install:
516 self.LogInfo("No-installation mode has no effect during import")
517
518 if objects.GetOSImage(self.op.osparams):
519 self.LogInfo("OS image has no effect during import")
520 elif self.op.mode == constants.INSTANCE_CREATE:
521 os_image = CheckOSImage(self.op)
522
523 if self.op.os_type is None and os_image is None:
524 raise errors.OpPrereqError("No guest OS or OS image specified",
525 errors.ECODE_INVAL)
526
527 if self.op.os_type is not None \
528 and self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
529 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
530 " installation" % self.op.os_type,
531 errors.ECODE_STATE)
532 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
533 if objects.GetOSImage(self.op.osparams):
534 self.LogInfo("OS image has no effect during import")
535
536 self._cds = GetClusterDomainSecret()
537
538
539 src_handshake = self.op.source_handshake
540 if not src_handshake:
541 raise errors.OpPrereqError("Missing source handshake",
542 errors.ECODE_INVAL)
543
544 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
545 src_handshake)
546 if errmsg:
547 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
548 errors.ECODE_INVAL)
549
550
551 self.source_x509_ca_pem = self.op.source_x509_ca
552 if not self.source_x509_ca_pem:
553 raise errors.OpPrereqError("Missing source X509 CA",
554 errors.ECODE_INVAL)
555
556 try:
557 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
558 self._cds)
559 except OpenSSL.crypto.Error, err:
560 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
561 (err, ), errors.ECODE_INVAL)
562
563 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
564 if errcode is not None:
565 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
566 errors.ECODE_INVAL)
567
568 self.source_x509_ca = cert
569
570 src_instance_name = self.op.source_instance_name
571 if not src_instance_name:
572 raise errors.OpPrereqError("Missing source instance name",
573 errors.ECODE_INVAL)
574
575 self.source_instance_name = \
576 netutils.GetHostname(name=src_instance_name).name
577
578 else:
579 raise errors.OpPrereqError("Invalid instance creation mode %r" %
580 self.op.mode, errors.ECODE_INVAL)
581
583 """ExpandNames for CreateInstance.
584
585 Figure out the right locks for instance creation.
586
587 """
588 self.needed_locks = {}
589
590
591
592 CheckInstanceExistence(self, self.op.instance_name)
593
594 self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
595
596 if self.op.iallocator:
597
598
599
600 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
601 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
602
603 if self.op.opportunistic_locking:
604 self.opportunistic_locks[locking.LEVEL_NODE] = True
605 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
606 if self.op.disk_template == constants.DT_DRBD8:
607 self.opportunistic_locks_count[locking.LEVEL_NODE] = 2
608 self.opportunistic_locks_count[locking.LEVEL_NODE_RES] = 2
609 else:
610 (self.op.pnode_uuid, self.op.pnode) = \
611 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
612 nodelist = [self.op.pnode_uuid]
613 if self.op.snode is not None:
614 (self.op.snode_uuid, self.op.snode) = \
615 ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
616 nodelist.append(self.op.snode_uuid)
617 self.needed_locks[locking.LEVEL_NODE] = nodelist
618
619
620 if self.op.mode == constants.INSTANCE_IMPORT:
621 src_node = self.op.src_node
622 src_path = self.op.src_path
623
624 if src_path is None:
625 self.op.src_path = src_path = self.op.instance_name
626
627 if src_node is None:
628 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
629 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
630 self.op.src_node = None
631 if os.path.isabs(src_path):
632 raise errors.OpPrereqError("Importing an instance from a path"
633 " requires a source node option",
634 errors.ECODE_INVAL)
635 else:
636 (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
637 ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
638 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
639 self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
640 if not os.path.isabs(src_path):
641 self.op.src_path = \
642 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
643
644 self.needed_locks[locking.LEVEL_NODE_RES] = \
645 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
646
647
648
649
650
651 if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
652
653
654
655
656 self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
657 else:
658 self.needed_locks[locking.LEVEL_NODEGROUP] = \
659 list(self.cfg.GetNodeGroupsFromNodes(
660 self.needed_locks[locking.LEVEL_NODE]))
661 self.share_locks[locking.LEVEL_NODEGROUP] = 1
662
664 """Run the allocator based on input opcode.
665
666 """
667 if self.op.opportunistic_locking:
668
669 node_name_whitelist = self.cfg.GetNodeNames(
670 set(self.owned_locks(locking.LEVEL_NODE)) &
671 set(self.owned_locks(locking.LEVEL_NODE_RES)))
672 else:
673 node_name_whitelist = None
674
675 req = _CreateInstanceAllocRequest(self.op, self.disks,
676 self.nics, self.be_full,
677 node_name_whitelist)
678 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
679
680 ial.Run(self.op.iallocator)
681
682 if not ial.success:
683
684 if self.op.opportunistic_locking:
685 ecode = errors.ECODE_TEMP_NORES
686 else:
687 ecode = errors.ECODE_NORES
688
689 raise errors.OpPrereqError("Can't compute nodes using"
690 " iallocator '%s': %s" %
691 (self.op.iallocator, ial.info),
692 ecode)
693
694 (self.op.pnode_uuid, self.op.pnode) = \
695 ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
696 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
697 self.op.instance_name, self.op.iallocator,
698 utils.CommaJoin(ial.result))
699
700 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
701
702 if req.RequiredNodes() == 2:
703 (self.op.snode_uuid, self.op.snode) = \
704 ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
705
707 """Build hooks env.
708
709 This runs on master, primary and secondary nodes of the instance.
710
711 """
712 env = {
713 "ADD_MODE": self.op.mode,
714 }
715 if self.op.mode == constants.INSTANCE_IMPORT:
716 env["SRC_NODE"] = self.op.src_node
717 env["SRC_PATH"] = self.op.src_path
718 env["SRC_IMAGES"] = self.src_images
719
720 env.update(BuildInstanceHookEnv(
721 name=self.op.instance_name,
722 primary_node_name=self.op.pnode,
723 secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
724 status=self.op.start,
725 os_type=self.op.os_type,
726 minmem=self.be_full[constants.BE_MINMEM],
727 maxmem=self.be_full[constants.BE_MAXMEM],
728 vcpus=self.be_full[constants.BE_VCPUS],
729 nics=NICListToTuple(self, self.nics),
730 disk_template=self.op.disk_template,
731 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
732 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
733 for d in self.disks],
734 bep=self.be_full,
735 hvp=self.hv_full,
736 hypervisor_name=self.op.hypervisor,
737 tags=self.op.tags,
738 ))
739
740 return env
741
743 """Build hooks nodes.
744
745 """
746 nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
747 return nl, nl
748
794
796 """Use export parameters as defaults.
797
798 In case the opcode doesn't specify (as in override) some instance
799 parameters, then try to use them from the export information, if
800 that declares them.
801
802 """
803 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
804
805 if not self.op.disks:
806 disks = []
807
808 for idx in range(constants.MAX_DISKS):
809 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
810 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
811 disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
812 disk = {
813 constants.IDISK_SIZE: disk_sz,
814 constants.IDISK_NAME: disk_name
815 }
816 disks.append(disk)
817 self.op.disks = disks
818 if not disks and self.op.disk_template != constants.DT_DISKLESS:
819 raise errors.OpPrereqError("No disk info specified and the export"
820 " is missing the disk information",
821 errors.ECODE_INVAL)
822
823 if not self.op.nics:
824 nics = []
825 for idx in range(constants.MAX_NICS):
826 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
827 ndict = {}
828 for name in [constants.INIC_IP,
829 constants.INIC_MAC, constants.INIC_NAME]:
830 nic_param_name = "nic%d_%s" % (idx, name)
831 if einfo.has_option(constants.INISECT_INS, nic_param_name):
832 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
833 ndict[name] = v
834 network = einfo.get(constants.INISECT_INS,
835 "nic%d_%s" % (idx, constants.INIC_NETWORK))
836
837
838 if network:
839 ndict[constants.INIC_NETWORK] = network
840 else:
841 for name in list(constants.NICS_PARAMETERS):
842 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
843 ndict[name] = v
844 nics.append(ndict)
845 else:
846 break
847 self.op.nics = nics
848
849 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
850 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
851
852 if (self.op.hypervisor is None and
853 einfo.has_option(constants.INISECT_INS, "hypervisor")):
854 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
855
856 if einfo.has_section(constants.INISECT_HYP):
857
858
859 for name, value in einfo.items(constants.INISECT_HYP):
860 if name not in self.op.hvparams:
861 self.op.hvparams[name] = value
862
863 if einfo.has_section(constants.INISECT_BEP):
864
865 for name, value in einfo.items(constants.INISECT_BEP):
866 if name not in self.op.beparams:
867 self.op.beparams[name] = value
868
869 if name == constants.BE_MEMORY:
870 if constants.BE_MAXMEM not in self.op.beparams:
871 self.op.beparams[constants.BE_MAXMEM] = value
872 if constants.BE_MINMEM not in self.op.beparams:
873 self.op.beparams[constants.BE_MINMEM] = value
874 else:
875
876 for name in constants.BES_PARAMETERS:
877 if (name not in self.op.beparams and
878 einfo.has_option(constants.INISECT_INS, name)):
879 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
880
881 if einfo.has_section(constants.INISECT_OSP):
882
883 for name, value in einfo.items(constants.INISECT_OSP):
884 if name not in self.op.osparams:
885 self.op.osparams[name] = value
886
887 if einfo.has_section(constants.INISECT_OSP_PRIVATE):
888
889 for name, value in einfo.items(constants.INISECT_OSP_PRIVATE):
890 if name not in self.op.osparams_private:
891 self.op.osparams_private[name] = serializer.Private(value, descr=name)
892
924
926 """Calculate final instance file storage dir.
927
928 """
929
930 self.instance_file_storage_dir = None
931 if self.op.disk_template in constants.DTS_FILEBASED:
932
933 joinargs = []
934
935 cfg_storage = None
936 if self.op.disk_template == constants.DT_FILE:
937 cfg_storage = self.cfg.GetFileStorageDir()
938 elif self.op.disk_template == constants.DT_SHARED_FILE:
939 cfg_storage = self.cfg.GetSharedFileStorageDir()
940 elif self.op.disk_template == constants.DT_GLUSTER:
941 cfg_storage = self.cfg.GetGlusterStorageDir()
942
943 if not cfg_storage:
944 raise errors.OpPrereqError(
945 "Cluster file storage dir for {tpl} storage type not defined".format(
946 tpl=repr(self.op.disk_template)
947 ),
948 errors.ECODE_STATE
949 )
950
951 joinargs.append(cfg_storage)
952
953 if self.op.file_storage_dir is not None:
954 joinargs.append(self.op.file_storage_dir)
955
956 if self.op.disk_template != constants.DT_GLUSTER:
957 joinargs.append(self.op.instance_name)
958
959 if len(joinargs) > 1:
960
961 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
962 else:
963 self.instance_file_storage_dir = joinargs[0]
964
966 """Check prerequisites.
967
968 """
969
970
971 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
972 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
973 cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
974 if not owned_groups.issuperset(cur_groups):
975 raise errors.OpPrereqError("New instance %s's node groups changed since"
976 " locks were acquired, current groups are"
977 " are '%s', owning groups '%s'; retry the"
978 " operation" %
979 (self.op.instance_name,
980 utils.CommaJoin(cur_groups),
981 utils.CommaJoin(owned_groups)),
982 errors.ECODE_STATE)
983
984 self._CalculateFileStorageDir()
985
986 if self.op.mode == constants.INSTANCE_IMPORT:
987 export_info = self._ReadExportInfo()
988 self._ReadExportParams(export_info)
989 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
990 else:
991 self._old_instance_name = None
992
993 if (not self.cfg.GetVGName() and
994 self.op.disk_template not in constants.DTS_NOT_LVM):
995 raise errors.OpPrereqError("Cluster does not support lvm-based"
996 " instances", errors.ECODE_STATE)
997
998 if (self.op.hypervisor is None or
999 self.op.hypervisor == constants.VALUE_AUTO):
1000 self.op.hypervisor = self.cfg.GetHypervisorType()
1001
1002 cluster = self.cfg.GetClusterInfo()
1003 enabled_hvs = cluster.enabled_hypervisors
1004 if self.op.hypervisor not in enabled_hvs:
1005 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
1006 " cluster (%s)" %
1007 (self.op.hypervisor, ",".join(enabled_hvs)),
1008 errors.ECODE_STATE)
1009
1010
1011 for tag in self.op.tags:
1012 objects.TaggableObject.ValidateTag(tag)
1013
1014
1015 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
1016 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
1017 self.op.hvparams)
1018 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
1019 hv_type.CheckParameterSyntax(filled_hvp)
1020 self.hv_full = filled_hvp
1021
1022 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
1023 "instance", "cluster")
1024
1025
1026 self.be_full = _ComputeFullBeParams(self.op, cluster)
1027
1028
1029 if self.op.osparams_private is None:
1030 self.op.osparams_private = serializer.PrivateDict()
1031 if self.op.osparams_secret is None:
1032 self.op.osparams_secret = serializer.PrivateDict()
1033
1034 self.os_full = cluster.SimpleFillOS(
1035 self.op.os_type,
1036 self.op.osparams,
1037 os_params_private=self.op.osparams_private,
1038 os_params_secret=self.op.osparams_secret
1039 )
1040
1041
1042
1043 if self.op.identify_defaults:
1044 self._RevertToDefaults(cluster)
1045
1046
1047 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
1048 self.proc.GetECId())
1049
1050
1051 default_vg = self.cfg.GetVGName()
1052 self.disks = ComputeDisks(self.op, default_vg)
1053
1054 if self.op.mode == constants.INSTANCE_IMPORT:
1055 disk_images = []
1056 for idx in range(len(self.disks)):
1057 option = "disk%d_dump" % idx
1058 if export_info.has_option(constants.INISECT_INS, option):
1059
1060 export_name = export_info.get(constants.INISECT_INS, option)
1061 image = utils.PathJoin(self.op.src_path, export_name)
1062 disk_images.append(image)
1063 else:
1064 disk_images.append(False)
1065
1066 self.src_images = disk_images
1067
1068 if self.op.instance_name == self._old_instance_name:
1069 for idx, nic in enumerate(self.nics):
1070 if nic.mac == constants.VALUE_AUTO:
1071 nic_mac_ini = "nic%d_mac" % idx
1072 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
1073
1074
1075
1076
1077 if self.op.ip_check:
1078 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
1079 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1080 (self.check_ip, self.op.instance_name),
1081 errors.ECODE_NOTUNIQUE)
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091 for nic in self.nics:
1092 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
1093 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
1094
1095
1096
1097 if self.op.iallocator is not None:
1098 self._RunAllocator()
1099
1100
1101 keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1102 self.op.src_node_uuid])
1103 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1104 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1105 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1106
1107 ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1108 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1109
1110 assert (self.owned_locks(locking.LEVEL_NODE) ==
1111 self.owned_locks(locking.LEVEL_NODE_RES)), \
1112 "Node locks differ from node resource locks"
1113
1114
1115
1116
1117 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1118 assert self.pnode is not None, \
1119 "Cannot retrieve locked node %s" % self.op.pnode_uuid
1120 if pnode.offline:
1121 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1122 pnode.name, errors.ECODE_STATE)
1123 if pnode.drained:
1124 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1125 pnode.name, errors.ECODE_STATE)
1126 if not pnode.vm_capable:
1127 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1128 " '%s'" % pnode.name, errors.ECODE_STATE)
1129
1130 self.secondaries = []
1131
1132
1133
1134 for idx, nic in enumerate(self.nics):
1135 net_uuid = nic.network
1136 if net_uuid is not None:
1137 nobj = self.cfg.GetNetwork(net_uuid)
1138 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1139 if netparams is None:
1140 raise errors.OpPrereqError("No netparams found for network"
1141 " %s. Probably not connected to"
1142 " node's %s nodegroup" %
1143 (nobj.name, self.pnode.name),
1144 errors.ECODE_INVAL)
1145 self.LogInfo("NIC/%d inherits netparams %s" %
1146 (idx, netparams.values()))
1147 nic.nicparams = dict(netparams)
1148 if nic.ip is not None:
1149 if nic.ip.lower() == constants.NIC_IP_POOL:
1150 try:
1151 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1152 except errors.ReservationError:
1153 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1154 " from the address pool" % idx,
1155 errors.ECODE_STATE)
1156 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1157 else:
1158 try:
1159 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1160 check=self.op.conflicts_check)
1161 except errors.ReservationError:
1162 raise errors.OpPrereqError("IP address %s already in use"
1163 " or does not belong to network %s" %
1164 (nic.ip, nobj.name),
1165 errors.ECODE_NOTUNIQUE)
1166
1167
1168 elif self.op.conflicts_check:
1169 _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1170
1171
1172 if self.op.disk_template in constants.DTS_INT_MIRROR:
1173 if self.op.snode_uuid == pnode.uuid:
1174 raise errors.OpPrereqError("The secondary node cannot be the"
1175 " primary node", errors.ECODE_INVAL)
1176 CheckNodeOnline(self, self.op.snode_uuid)
1177 CheckNodeNotDrained(self, self.op.snode_uuid)
1178 CheckNodeVmCapable(self, self.op.snode_uuid)
1179 self.secondaries.append(self.op.snode_uuid)
1180
1181 snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1182 if pnode.group != snode.group:
1183 self.LogWarning("The primary and secondary nodes are in two"
1184 " different node groups; the disk parameters"
1185 " from the first disk's node group will be"
1186 " used")
1187
1188 nodes = [pnode]
1189 if self.op.disk_template in constants.DTS_INT_MIRROR:
1190 nodes.append(snode)
1191 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1192 excl_stor = compat.any(map(has_es, nodes))
1193 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1194 raise errors.OpPrereqError("Disk template %s not supported with"
1195 " exclusive storage" % self.op.disk_template,
1196 errors.ECODE_STATE)
1197 for disk in self.disks:
1198 CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1199
1200 node_uuids = [pnode.uuid] + self.secondaries
1201
1202 if not self.adopt_disks:
1203 if self.op.disk_template == constants.DT_RBD:
1204
1205
1206
1207 CheckRADOSFreeSpace()
1208 elif self.op.disk_template == constants.DT_EXT:
1209
1210 pass
1211 elif self.op.disk_template in constants.DTS_LVM:
1212
1213 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1214 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1215 else:
1216
1217 pass
1218
1219 elif self.op.disk_template == constants.DT_PLAIN:
1220 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1221 disk[constants.IDISK_ADOPT])
1222 for disk in self.disks])
1223 if len(all_lvs) != len(self.disks):
1224 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1225 errors.ECODE_INVAL)
1226 for lv_name in all_lvs:
1227 try:
1228
1229
1230 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1231 except errors.ReservationError:
1232 raise errors.OpPrereqError("LV named %s used by another instance" %
1233 lv_name, errors.ECODE_NOTUNIQUE)
1234
1235 vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1236 vg_names.Raise("Cannot get VG information from node %s" % pnode.name,
1237 prereq=True)
1238
1239 node_lvs = self.rpc.call_lv_list([pnode.uuid],
1240 vg_names.payload.keys())[pnode.uuid]
1241 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name,
1242 prereq=True)
1243 node_lvs = node_lvs.payload
1244
1245 delta = all_lvs.difference(node_lvs.keys())
1246 if delta:
1247 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1248 utils.CommaJoin(delta),
1249 errors.ECODE_INVAL)
1250 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1251 if online_lvs:
1252 raise errors.OpPrereqError("Online logical volumes found, cannot"
1253 " adopt: %s" % utils.CommaJoin(online_lvs),
1254 errors.ECODE_STATE)
1255
1256 for dsk in self.disks:
1257 dsk[constants.IDISK_SIZE] = \
1258 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1259 dsk[constants.IDISK_ADOPT])][0]))
1260
1261 elif self.op.disk_template == constants.DT_BLOCK:
1262
1263 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1264 for disk in self.disks])
1265 if len(all_disks) != len(self.disks):
1266 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1267 errors.ECODE_INVAL)
1268 baddisks = [d for d in all_disks
1269 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1270 if baddisks:
1271 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1272 " cannot be adopted" %
1273 (utils.CommaJoin(baddisks),
1274 constants.ADOPTABLE_BLOCKDEV_ROOT),
1275 errors.ECODE_INVAL)
1276
1277 node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1278 list(all_disks))[pnode.uuid]
1279 node_disks.Raise("Cannot get block device information from node %s" %
1280 pnode.name, prereq=True)
1281 node_disks = node_disks.payload
1282 delta = all_disks.difference(node_disks.keys())
1283 if delta:
1284 raise errors.OpPrereqError("Missing block device(s): %s" %
1285 utils.CommaJoin(delta),
1286 errors.ECODE_INVAL)
1287 for dsk in self.disks:
1288 dsk[constants.IDISK_SIZE] = \
1289 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1290
1291
1292 node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1293 node_group = self.cfg.GetNodeGroup(node_info.group)
1294 disk_params = self.cfg.GetGroupDiskParams(node_group)
1295 access_type = disk_params[self.op.disk_template].get(
1296 constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1297 )
1298
1299 if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1300 self.op.disk_template,
1301 access_type):
1302 raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1303 " used with %s disk access param" %
1304 (self.op.hypervisor, access_type),
1305 errors.ECODE_STATE)
1306
1307
1308 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1309 ispec = {
1310 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1311 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1312 constants.ISPEC_DISK_COUNT: len(self.disks),
1313 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1314 for disk in self.disks],
1315 constants.ISPEC_NIC_COUNT: len(self.nics),
1316 constants.ISPEC_SPINDLE_USE: spindle_use,
1317 }
1318
1319 group_info = self.cfg.GetNodeGroup(pnode.group)
1320 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1321 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1322 self.op.disk_template)
1323 if not self.op.ignore_ipolicy and res:
1324 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1325 (pnode.group, group_info.name, utils.CommaJoin(res)))
1326 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1327
1328 CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1329
1330 CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full,
1331 self.op.force_variant)
1332
1333 CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1334
1335 CheckCompressionTool(self, self.op.compress)
1336
1337
1338
1339
1340
1341
1342 if self.op.start:
1343 hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1344 self.op.hvparams)
1345 CheckNodeFreeMemory(self, self.pnode.uuid,
1346 "creating instance %s" % self.op.instance_name,
1347 self.be_full[constants.BE_MAXMEM],
1348 self.op.hypervisor, hvfull)
1349
1350 self.dry_run_result = list(node_uuids)
1351
1353 """Removes degraded disks and instance.
1354
1355 It optionally checks whether disks are degraded. If the disks are
1356 degraded, they are removed and the instance is also removed from
1357 the configuration.
1358
1359 If L{disk_abort} is True, then the disks are considered degraded
1360 and removed, and the instance is removed from the configuration.
1361
1362 If L{disk_abort} is False, then it first checks whether disks are
1363 degraded and, if so, it removes the disks and the instance is
1364 removed from the configuration.
1365
1366 @type feedback_fn: callable
1367 @param feedback_fn: function used send feedback back to the caller
1368
1369 @type disk_abort: boolean
1370 @param disk_abort:
1371 True if disks are degraded, False to first check if disks are
1372 degraded
1373 @type instance: L{objects.Instance}
1374 @param instance: instance containing the disks to check
1375
1376 @rtype: NoneType
1377 @return: None
1378 @raise errors.OpPrereqError: if disks are degraded
1379
1380 """
1381 if disk_abort:
1382 pass
1383 elif self.op.wait_for_sync:
1384 disk_abort = not WaitForSync(self, instance)
1385 elif instance.disk_template in constants.DTS_INT_MIRROR:
1386
1387 feedback_fn("* checking mirrors status")
1388 disk_abort = not WaitForSync(self, instance, oneshot=True)
1389 else:
1390 disk_abort = False
1391
1392 if disk_abort:
1393 RemoveDisks(self, instance)
1394 for disk_uuid in instance.disks:
1395 self.cfg.RemoveInstanceDisk(instance.uuid, disk_uuid)
1396 self.cfg.RemoveInstance(instance.uuid)
1397 raise errors.OpExecError("There are some degraded disks for"
1398 " this instance")
1399
1401 """Run OS scripts
1402
1403 If necessary, disks are paused. It handles instance create,
1404 import, and remote import.
1405
1406 @type feedback_fn: callable
1407 @param feedback_fn: function used send feedback back to the caller
1408
1409 @type iobj: L{objects.Instance}
1410 @param iobj: instance object
1411
1412 """
1413 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1414 disks = self.cfg.GetInstanceDisks(iobj.uuid)
1415 if self.op.mode == constants.INSTANCE_CREATE:
1416 os_image = objects.GetOSImage(self.op.osparams)
1417
1418 if os_image is None and not self.op.no_install:
1419 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1420 not self.op.wait_for_sync)
1421 if pause_sync:
1422 feedback_fn("* pausing disk sync to install instance OS")
1423 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1424 (disks, iobj),
1425 True)
1426 for idx, success in enumerate(result.payload):
1427 if not success:
1428 logging.warn("pause-sync of instance %s for disk %d failed",
1429 self.op.instance_name, idx)
1430
1431 feedback_fn("* running the instance OS create scripts...")
1432
1433 os_add_result = \
1434 self.rpc.call_instance_os_add(self.pnode.uuid,
1435 (iobj, self.op.osparams_secret),
1436 False,
1437 self.op.debug_level)
1438 if pause_sync:
1439 feedback_fn("* resuming disk sync")
1440 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1441 (disks, iobj),
1442 False)
1443 for idx, success in enumerate(result.payload):
1444 if not success:
1445 logging.warn("resume-sync of instance %s for disk %d failed",
1446 self.op.instance_name, idx)
1447
1448 os_add_result.Raise("Could not add os for instance %s"
1449 " on node %s" % (self.op.instance_name,
1450 self.pnode.name))
1451
1452 else:
1453 if self.op.mode == constants.INSTANCE_IMPORT:
1454 feedback_fn("* running the instance OS import scripts...")
1455
1456 transfers = []
1457
1458 for idx, image in enumerate(self.src_images):
1459 if not image:
1460 continue
1461
1462 if iobj.os:
1463 dst_io = constants.IEIO_SCRIPT
1464 dst_ioargs = ((disks[idx], iobj), idx)
1465 else:
1466 dst_io = constants.IEIO_RAW_DISK
1467 dst_ioargs = (disks[idx], iobj)
1468
1469
1470 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1471 constants.IEIO_FILE, (image, ),
1472 dst_io, dst_ioargs,
1473 None)
1474 transfers.append(dt)
1475
1476 import_result = \
1477 masterd.instance.TransferInstanceData(self, feedback_fn,
1478 self.op.src_node_uuid,
1479 self.pnode.uuid,
1480 self.pnode.secondary_ip,
1481 self.op.compress,
1482 iobj, transfers)
1483 if not compat.all(import_result):
1484 self.LogWarning("Some disks for instance %s on node %s were not"
1485 " imported successfully" % (self.op.instance_name,
1486 self.pnode.name))
1487
1488 rename_from = self._old_instance_name
1489
1490 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1491 feedback_fn("* preparing remote import...")
1492
1493
1494
1495
1496 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1497 self.op.source_shutdown_timeout)
1498 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1499
1500 assert iobj.primary_node == self.pnode.uuid
1501 disk_results = \
1502 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1503 self.source_x509_ca,
1504 self._cds, self.op.compress, timeouts)
1505 if not compat.all(disk_results):
1506
1507
1508 self.LogWarning("Some disks for instance %s on node %s were not"
1509 " imported successfully" % (self.op.instance_name,
1510 self.pnode.name))
1511
1512 rename_from = self.source_instance_name
1513
1514 else:
1515
1516 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1517 % self.op.mode)
1518
1519 assert iobj.name == self.op.instance_name
1520
1521
1522 if iobj.os:
1523 feedback_fn("Running rename script for %s" % self.op.instance_name)
1524 result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1525 rename_from,
1526 self.op.debug_level)
1527 result.Warn("Failed to run rename script for %s on node %s" %
1528 (self.op.instance_name, self.pnode.name), self.LogWarning)
1529
1531 """Returns the OS scripts environment for the helper VM
1532
1533 @type instance: L{objects.Instance}
1534 @param instance: instance for which the OS scripts are run
1535
1536 @type script: string
1537 @param script: script to run (e.g.,
1538 constants.OS_SCRIPT_CREATE_UNTRUSTED)
1539
1540 @rtype: dict of string to string
1541 @return: OS scripts environment for the helper VM
1542
1543 """
1544 env = {"OS_SCRIPT": script}
1545
1546
1547 if instance.hypervisor == constants.HT_KVM:
1548 prefix = "/dev/vd"
1549 elif instance.hypervisor in [constants.HT_XEN_PVM, constants.HT_XEN_HVM]:
1550 prefix = "/dev/xvd"
1551 else:
1552 raise errors.OpExecError("Cannot run OS scripts in a virtualized"
1553 " environment for hypervisor '%s'"
1554 % instance.hypervisor)
1555
1556 num_disks = len(self.cfg.GetInstanceDisks(instance.uuid))
1557
1558 for idx, disk_label in enumerate(utils.GetDiskLabels(prefix, num_disks + 1,
1559 start=1)):
1560 env["DISK_%d_PATH" % idx] = disk_label
1561
1562 return env
1563
1565 """Updates the OS parameter 'os-install-package' for an instance.
1566
1567 The OS install package is an archive containing an OS definition
1568 and a file containing the environment variables needed to run the
1569 OS scripts.
1570
1571 The OS install package is served by the metadata daemon to the
1572 instances, so the OS scripts can run inside the virtualized
1573 environment.
1574
1575 @type feedback_fn: callable
1576 @param feedback_fn: function used send feedback back to the caller
1577
1578 @type instance: L{objects.Instance}
1579 @param instance: instance for which the OS parameter
1580 'os-install-package' is updated
1581
1582 @type override_env: dict of string to string
1583 @param override_env: if supplied, it overrides the environment of
1584 the export OS scripts archive
1585
1586 """
1587 if "os-install-package" in instance.osparams:
1588 feedback_fn("Using OS install package '%s'" %
1589 instance.osparams["os-install-package"])
1590 else:
1591 result = self.rpc.call_os_export(instance.primary_node, instance,
1592 override_env)
1593 result.Raise("Could not export OS '%s'" % instance.os)
1594 instance.osparams["os-install-package"] = result.payload
1595
1596 feedback_fn("Created OS install package '%s'" % result.payload)
1597
1599 """Runs the OS scripts inside a safe virtualized environment.
1600
1601 The virtualized environment reuses the instance and temporarily
1602 creates a disk onto which the image of the helper VM is dumped.
1603 The temporary disk is used to boot the helper VM. The OS scripts
1604 are passed to the helper VM through the metadata daemon and the OS
1605 install package.
1606
1607 @type feedback_fn: callable
1608 @param feedback_fn: function used send feedback back to the caller
1609
1610 @type instance: L{objects.Instance}
1611 @param instance: instance for which the OS scripts must be run
1612 inside the virtualized environment
1613
1614 """
1615 install_image = self.cfg.GetInstallImage()
1616
1617 if not install_image:
1618 raise errors.OpExecError("Cannot create install instance because an"
1619 " install image has not been specified")
1620
1621 disk_size = DetermineImageSize(self, install_image, instance.primary_node)
1622
1623 env = self.GetOsInstallPackageEnvironment(
1624 instance,
1625 constants.OS_SCRIPT_CREATE_UNTRUSTED)
1626 self.UpdateInstanceOsInstallPackage(feedback_fn, instance, env)
1627 UpdateMetadata(feedback_fn, self.rpc, instance,
1628 osparams_private=self.op.osparams_private,
1629 osparams_secret=self.op.osparams_secret)
1630
1631 with TemporaryDisk(self,
1632 instance,
1633 [(constants.DT_PLAIN, constants.DISK_RDWR, disk_size)],
1634 feedback_fn):
1635 feedback_fn("Activating instance disks")
1636 StartInstanceDisks(self, instance, False)
1637
1638 feedback_fn("Imaging disk with install image")
1639 ImageDisks(self, instance, install_image)
1640
1641 feedback_fn("Starting instance with install image")
1642 result = self.rpc.call_instance_start(instance.primary_node,
1643 (instance, [], []),
1644 False, self.op.reason)
1645 result.Raise("Could not start instance '%s' with the install image '%s'"
1646 % (instance.name, install_image))
1647
1648
1649 running_check = lambda: IsInstanceRunning(self, instance, prereq=False)
1650 instance_up = retry.SimpleRetry(True, running_check, 5.0,
1651 self.op.helper_startup_timeout)
1652 if not instance_up:
1653 raise errors.OpExecError("Could not boot instance using install image"
1654 " '%s'" % install_image)
1655
1656 feedback_fn("Instance is up, now awaiting shutdown")
1657
1658
1659 instance_up = retry.SimpleRetry(False, running_check, 20.0,
1660 self.op.helper_shutdown_timeout)
1661 if instance_up:
1662 self.LogWarning("Installation not completed prior to timeout, shutting"
1663 " down instance forcibly")
1664
1665 feedback_fn("Installation complete")
1666
1667 - def Exec(self, feedback_fn):
1668 """Create and add the instance to the cluster.
1669
1670 """
1671 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1672 self.owned_locks(locking.LEVEL_NODE)), \
1673 "Node locks differ from node resource locks"
1674
1675 ht_kind = self.op.hypervisor
1676 if ht_kind in constants.HTS_REQ_PORT:
1677 network_port = self.cfg.AllocatePort()
1678 else:
1679 network_port = None
1680
1681 instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1682
1683
1684
1685
1686 nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1687 disks = GenerateDiskTemplate(self,
1688 self.op.disk_template,
1689 instance_uuid, self.pnode.uuid,
1690 self.secondaries,
1691 self.disks,
1692 self.instance_file_storage_dir,
1693 self.op.file_driver,
1694 0,
1695 feedback_fn,
1696 self.cfg.GetGroupDiskParams(nodegroup))
1697
1698 if self.op.os_type is None:
1699 os_type = ""
1700 else:
1701 os_type = self.op.os_type
1702
1703 iobj = objects.Instance(name=self.op.instance_name,
1704 uuid=instance_uuid,
1705 os=os_type,
1706 primary_node=self.pnode.uuid,
1707 nics=self.nics, disks=[],
1708 disk_template=self.op.disk_template,
1709 disks_active=False,
1710 admin_state=constants.ADMINST_DOWN,
1711 admin_state_source=constants.ADMIN_SOURCE,
1712 network_port=network_port,
1713 beparams=self.op.beparams,
1714 hvparams=self.op.hvparams,
1715 hypervisor=self.op.hypervisor,
1716 osparams=self.op.osparams,
1717 osparams_private=self.op.osparams_private,
1718 )
1719
1720 if self.op.tags:
1721 for tag in self.op.tags:
1722 iobj.AddTag(tag)
1723
1724 if self.adopt_disks:
1725 if self.op.disk_template == constants.DT_PLAIN:
1726
1727
1728 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1729 rename_to = []
1730 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1731 rename_to.append(t_dsk.logical_id)
1732 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1733 result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1734 zip(tmp_disks, rename_to))
1735 result.Raise("Failed to rename adoped LVs")
1736 else:
1737 feedback_fn("* creating instance disks...")
1738 try:
1739 CreateDisks(self, iobj, instance_disks=disks)
1740 except errors.OpExecError:
1741 self.LogWarning("Device creation failed")
1742 self.cfg.ReleaseDRBDMinors(instance_uuid)
1743 raise
1744
1745 feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1746 self.cfg.AddInstance(iobj, self.proc.GetECId())
1747
1748 feedback_fn("adding disks to cluster config")
1749 for disk in disks:
1750 self.cfg.AddInstanceDisk(iobj.uuid, disk)
1751
1752
1753 iobj = self.cfg.GetInstanceInfo(iobj.uuid)
1754
1755 if self.op.mode == constants.INSTANCE_IMPORT:
1756
1757 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1758 else:
1759
1760 ReleaseLocks(self, locking.LEVEL_NODE)
1761
1762
1763 disk_abort = False
1764 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1765 feedback_fn("* wiping instance disks...")
1766 try:
1767 WipeDisks(self, iobj)
1768 except errors.OpExecError, err:
1769 logging.exception("Wiping disks failed")
1770 self.LogWarning("Wiping instance disks failed (%s)", err)
1771 disk_abort = True
1772
1773 self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
1774
1775
1776 os_image = objects.GetOSImage(iobj.osparams)
1777 disk_abort = False
1778
1779 if not self.adopt_disks and os_image is not None:
1780 feedback_fn("* imaging instance disks...")
1781 try:
1782 ImageDisks(self, iobj, os_image)
1783 except errors.OpExecError, err:
1784 logging.exception("Imaging disks failed")
1785 self.LogWarning("Imaging instance disks failed (%s)", err)
1786 disk_abort = True
1787
1788 self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
1789
1790
1791 iobj.disks_active = True
1792
1793
1794 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1795
1796 if iobj.os:
1797 result = self.rpc.call_os_diagnose([iobj.primary_node])[iobj.primary_node]
1798 result.Raise("Failed to get OS '%s'" % iobj.os)
1799
1800 trusted = None
1801
1802 for (name, _, _, _, _, _, _, os_trusted) in result.payload:
1803 if name == objects.OS.GetName(iobj.os):
1804 trusted = os_trusted
1805 break
1806
1807 if trusted is None:
1808 raise errors.OpPrereqError("OS '%s' is not available in node '%s'" %
1809 (iobj.os, iobj.primary_node))
1810 elif trusted:
1811 self.RunOsScripts(feedback_fn, iobj)
1812 else:
1813 self.RunOsScriptsVirtualized(feedback_fn, iobj)
1814
1815
1816
1817
1818 iobj = self.cfg.GetInstanceInfo(iobj.uuid)
1819
1820
1821
1822 UpdateMetadata(feedback_fn, self.rpc, iobj,
1823 osparams_private=self.op.osparams_private,
1824 osparams_secret=self.op.osparams_secret)
1825
1826 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1827
1828 if self.op.start:
1829 iobj.admin_state = constants.ADMINST_UP
1830 self.cfg.Update(iobj, feedback_fn)
1831 logging.info("Starting instance %s on node %s", self.op.instance_name,
1832 self.pnode.name)
1833 feedback_fn("* starting instance...")
1834 result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1835 False, self.op.reason)
1836 result.Raise("Could not start instance")
1837
1838 return self.cfg.GetNodeNames(list(self.cfg.GetInstanceNodes(iobj.uuid)))
1839
1842 """Rename an instance.
1843
1844 """
1845 HPATH = "instance-rename"
1846 HTYPE = constants.HTYPE_INSTANCE
1847 REQ_BGL = False
1848
1850 """Check arguments.
1851
1852 """
1853 if self.op.ip_check and not self.op.name_check:
1854
1855 raise errors.OpPrereqError("IP address check requires a name check",
1856 errors.ECODE_INVAL)
1857
1859 """Build hooks env.
1860
1861 This runs on master, primary and secondary nodes of the instance.
1862
1863 """
1864 env = BuildInstanceHookEnvByObject(self, self.instance)
1865 env["INSTANCE_NEW_NAME"] = self.op.new_name
1866 return env
1867
1875
1877 """Check prerequisites.
1878
1879 This checks that the instance is in the cluster and is not running.
1880
1881 """
1882 (self.op.instance_uuid, self.op.instance_name) = \
1883 ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1884 self.op.instance_name)
1885 instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1886 assert instance is not None
1887
1888
1889
1890
1891 if (instance.disk_template in constants.DTS_FILEBASED and
1892 self.op.new_name != instance.name):
1893 CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1894 instance.disk_template)
1895
1896 CheckNodeOnline(self, instance.primary_node)
1897 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1898 msg="cannot rename")
1899 self.instance = instance
1900
1901 new_name = self.op.new_name
1902 if self.op.name_check:
1903 hostname = _CheckHostnameSane(self, new_name)
1904 new_name = self.op.new_name = hostname.name
1905 if (self.op.ip_check and
1906 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1907 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1908 (hostname.ip, new_name),
1909 errors.ECODE_NOTUNIQUE)
1910
1911 if new_name != instance.name:
1912 CheckInstanceExistence(self, new_name)
1913
1921
1922 - def Exec(self, feedback_fn):
1923 """Rename the instance.
1924
1925 """
1926 old_name = self.instance.name
1927
1928 rename_file_storage = False
1929 if (self.instance.disk_template in (constants.DT_FILE,
1930 constants.DT_SHARED_FILE) and
1931 self.op.new_name != self.instance.name):
1932 disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1933 old_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
1934 rename_file_storage = True
1935
1936 self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1937
1938
1939 assert old_name in self.owned_locks(locking.LEVEL_INSTANCE)
1940 assert self.op.new_name in self.owned_locks(locking.LEVEL_INSTANCE)
1941
1942
1943 renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1944 disks = self.cfg.GetInstanceDisks(renamed_inst.uuid)
1945
1946 if rename_file_storage:
1947 new_file_storage_dir = os.path.dirname(disks[0].logical_id[1])
1948 result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1949 old_file_storage_dir,
1950 new_file_storage_dir)
1951 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1952 " (but the instance has been renamed in Ganeti)" %
1953 (self.cfg.GetNodeName(renamed_inst.primary_node),
1954 old_file_storage_dir, new_file_storage_dir))
1955
1956 StartInstanceDisks(self, renamed_inst, None)
1957 renamed_inst = self.cfg.GetInstanceInfo(renamed_inst.uuid)
1958
1959
1960 info = GetInstanceInfoText(renamed_inst)
1961 for (idx, disk) in enumerate(disks):
1962 for node_uuid in self.cfg.GetInstanceNodes(renamed_inst.uuid):
1963 result = self.rpc.call_blockdev_setinfo(node_uuid,
1964 (disk, renamed_inst), info)
1965 result.Warn("Error setting info on node %s for disk %s" %
1966 (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1967 try:
1968 result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1969 renamed_inst, old_name,
1970 self.op.debug_level)
1971 result.Warn("Could not run OS rename script for instance %s on node %s"
1972 " (but the instance has been renamed in Ganeti)" %
1973 (renamed_inst.name,
1974 self.cfg.GetNodeName(renamed_inst.primary_node)),
1975 self.LogWarning)
1976 finally:
1977 ShutdownInstanceDisks(self, renamed_inst)
1978
1979 return renamed_inst.name
1980
1983 """Remove an instance.
1984
1985 """
1986 HPATH = "instance-remove"
1987 HTYPE = constants.HTYPE_INSTANCE
1988 REQ_BGL = False
1989
1995
2003
2005 """Build hooks env.
2006
2007 This runs on master, primary and secondary nodes of the instance.
2008
2009 """
2010 env = BuildInstanceHookEnvByObject(self, self.instance,
2011 secondary_nodes=self.secondary_nodes,
2012 disks=self.inst_disks)
2013 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
2014 return env
2015
2023
2036
2037 - def Exec(self, feedback_fn):
2038 """Remove the instance.
2039
2040 """
2041 logging.info("Shutting down instance %s on node %s", self.instance.name,
2042 self.cfg.GetNodeName(self.instance.primary_node))
2043
2044 result = self.rpc.call_instance_shutdown(self.instance.primary_node,
2045 self.instance,
2046 self.op.shutdown_timeout,
2047 self.op.reason)
2048 if self.op.ignore_failures:
2049 result.Warn("Warning: can't shutdown instance", feedback_fn)
2050 else:
2051 result.Raise("Could not shutdown instance %s on node %s" %
2052 (self.instance.name,
2053 self.cfg.GetNodeName(self.instance.primary_node)))
2054
2055 assert (self.owned_locks(locking.LEVEL_NODE) ==
2056 self.owned_locks(locking.LEVEL_NODE_RES))
2057 assert not (set(self.cfg.GetInstanceNodes(self.instance.uuid)) -
2058 self.owned_locks(locking.LEVEL_NODE)), \
2059 "Not owning correct locks"
2060
2061 RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
2062
2065 """Move an instance by data-copying.
2066
2067 """
2068 HPATH = "instance-move"
2069 HTYPE = constants.HTYPE_INSTANCE
2070 REQ_BGL = False
2071
2080
2088
2090 """Build hooks env.
2091
2092 This runs on master, primary and target nodes of the instance.
2093
2094 """
2095 env = {
2096 "TARGET_NODE": self.op.target_node,
2097 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
2098 }
2099 env.update(BuildInstanceHookEnvByObject(self, self.instance))
2100 return env
2101
2103 """Build hooks nodes.
2104
2105 """
2106 nl = [
2107 self.cfg.GetMasterNode(),
2108 self.instance.primary_node,
2109 self.op.target_node_uuid,
2110 ]
2111 return (nl, nl)
2112
2114 """Check prerequisites.
2115
2116 This checks that the instance is in the cluster.
2117
2118 """
2119 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2120 assert self.instance is not None, \
2121 "Cannot retrieve locked instance %s" % self.op.instance_name
2122
2123 if self.instance.disk_template not in constants.DTS_COPYABLE:
2124 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
2125 self.instance.disk_template,
2126 errors.ECODE_STATE)
2127
2128 target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
2129 assert target_node is not None, \
2130 "Cannot retrieve locked node %s" % self.op.target_node
2131
2132 self.target_node_uuid = target_node.uuid
2133 if target_node.uuid == self.instance.primary_node:
2134 raise errors.OpPrereqError("Instance %s is already on the node %s" %
2135 (self.instance.name, target_node.name),
2136 errors.ECODE_STATE)
2137
2138 cluster = self.cfg.GetClusterInfo()
2139 bep = cluster.FillBE(self.instance)
2140
2141 disks = self.cfg.GetInstanceDisks(self.instance.uuid)
2142 for idx, dsk in enumerate(disks):
2143 if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
2144 constants.DT_SHARED_FILE, constants.DT_GLUSTER):
2145 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
2146 " cannot copy" % idx, errors.ECODE_STATE)
2147
2148 CheckNodeOnline(self, target_node.uuid)
2149 CheckNodeNotDrained(self, target_node.uuid)
2150 CheckNodeVmCapable(self, target_node.uuid)
2151 group_info = self.cfg.GetNodeGroup(target_node.group)
2152 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
2153 CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
2154 ignore=self.op.ignore_ipolicy)
2155
2156 if self.instance.admin_state == constants.ADMINST_UP:
2157
2158 CheckNodeFreeMemory(
2159 self, target_node.uuid, "failing over instance %s" %
2160 self.instance.name, bep[constants.BE_MAXMEM],
2161 self.instance.hypervisor,
2162 cluster.hvparams[self.instance.hypervisor])
2163 else:
2164 self.LogInfo("Not checking memory on the secondary node as"
2165 " instance will not be started")
2166
2167
2168 CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
2169
2170 - def Exec(self, feedback_fn):
2171 """Move an instance.
2172
2173 The move is done by shutting it down on its present node, copying
2174 the data over (slow) and starting it on the new node.
2175
2176 """
2177 source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
2178 target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
2179
2180 self.LogInfo("Shutting down instance %s on source node %s",
2181 self.instance.name, source_node.name)
2182
2183 assert (self.owned_locks(locking.LEVEL_NODE) ==
2184 self.owned_locks(locking.LEVEL_NODE_RES))
2185
2186 result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
2187 self.op.shutdown_timeout,
2188 self.op.reason)
2189 if self.op.ignore_consistency:
2190 result.Warn("Could not shutdown instance %s on node %s. Proceeding"
2191 " anyway. Please make sure node %s is down. Error details" %
2192 (self.instance.name, source_node.name, source_node.name),
2193 self.LogWarning)
2194 else:
2195 result.Raise("Could not shutdown instance %s on node %s" %
2196 (self.instance.name, source_node.name))
2197
2198
2199 try:
2200 CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
2201 except errors.OpExecError:
2202 self.LogWarning("Device creation failed")
2203 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
2204 raise
2205
2206 errs = []
2207 transfers = []
2208
2209 disks = self.cfg.GetInstanceDisks(self.instance.uuid)
2210 for idx, disk in enumerate(disks):
2211
2212 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
2213 constants.IEIO_RAW_DISK,
2214 (disk, self.instance),
2215 constants.IEIO_RAW_DISK,
2216 (disk, self.instance),
2217 None)
2218 transfers.append(dt)
2219 self.cfg.Update(disk, feedback_fn)
2220
2221 import_result = \
2222 masterd.instance.TransferInstanceData(self, feedback_fn,
2223 source_node.uuid,
2224 target_node.uuid,
2225 target_node.secondary_ip,
2226 self.op.compress,
2227 self.instance, transfers)
2228 if not compat.all(import_result):
2229 errs.append("Failed to transfer instance data")
2230
2231 if errs:
2232 self.LogWarning("Some disks failed to copy, aborting")
2233 try:
2234 RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
2235 finally:
2236 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
2237 raise errors.OpExecError("Errors during disk copy: %s" %
2238 (",".join(errs),))
2239
2240 self.instance.primary_node = target_node.uuid
2241 self.cfg.Update(self.instance, feedback_fn)
2242
2243 self.LogInfo("Removing the disks on the original node")
2244 RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
2245
2246
2247 if self.instance.admin_state == constants.ADMINST_UP:
2248 self.LogInfo("Starting instance %s on node %s",
2249 self.instance.name, target_node.name)
2250
2251 disks_ok, _ = AssembleInstanceDisks(self, self.instance,
2252 ignore_secondaries=True)
2253 if not disks_ok:
2254 ShutdownInstanceDisks(self, self.instance)
2255 raise errors.OpExecError("Can't activate the instance's disks")
2256
2257 result = self.rpc.call_instance_start(target_node.uuid,
2258 (self.instance, None, None), False,
2259 self.op.reason)
2260 msg = result.fail_msg
2261 if msg:
2262 ShutdownInstanceDisks(self, self.instance)
2263 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
2264 (self.instance.name, target_node.name, msg))
2265
2268 """Allocates multiple instances at the same time.
2269
2270 """
2271 REQ_BGL = False
2272
2274 """Check arguments.
2275
2276 """
2277 nodes = []
2278 for inst in self.op.instances:
2279 if inst.iallocator is not None:
2280 raise errors.OpPrereqError("iallocator are not allowed to be set on"
2281 " instance objects", errors.ECODE_INVAL)
2282 nodes.append(bool(inst.pnode))
2283 if inst.disk_template in constants.DTS_INT_MIRROR:
2284 nodes.append(bool(inst.snode))
2285
2286 has_nodes = compat.any(nodes)
2287 if compat.all(nodes) ^ has_nodes:
2288 raise errors.OpPrereqError("There are instance objects providing"
2289 " pnode/snode while others do not",
2290 errors.ECODE_INVAL)
2291
2292 if not has_nodes and self.op.iallocator is None:
2293 default_iallocator = self.cfg.GetDefaultIAllocator()
2294 if default_iallocator:
2295 self.op.iallocator = default_iallocator
2296 else:
2297 raise errors.OpPrereqError("No iallocator or nodes on the instances"
2298 " given and no cluster-wide default"
2299 " iallocator found; please specify either"
2300 " an iallocator or nodes on the instances"
2301 " or set a cluster-wide default iallocator",
2302 errors.ECODE_INVAL)
2303
2304 _CheckOpportunisticLocking(self.op)
2305
2306 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
2307 if dups:
2308 raise errors.OpPrereqError("There are duplicate instance names: %s" %
2309 utils.CommaJoin(dups), errors.ECODE_INVAL)
2310
2312 """Calculate the locks.
2313
2314 """
2315 self.share_locks = ShareAll()
2316 self.needed_locks = {
2317
2318
2319 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
2320 }
2321
2322 if self.op.iallocator:
2323 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2324 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2325
2326 if self.op.opportunistic_locking:
2327 self.opportunistic_locks[locking.LEVEL_NODE] = True
2328 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
2329 else:
2330 nodeslist = []
2331 for inst in self.op.instances:
2332 (inst.pnode_uuid, inst.pnode) = \
2333 ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
2334 nodeslist.append(inst.pnode_uuid)
2335 if inst.snode is not None:
2336 (inst.snode_uuid, inst.snode) = \
2337 ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
2338 nodeslist.append(inst.snode_uuid)
2339
2340 self.needed_locks[locking.LEVEL_NODE] = nodeslist
2341
2342
2343 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2344
2346 """Check prerequisite.
2347
2348 """
2349 if self.op.iallocator:
2350 cluster = self.cfg.GetClusterInfo()
2351 default_vg = self.cfg.GetVGName()
2352 ec_id = self.proc.GetECId()
2353
2354 if self.op.opportunistic_locking:
2355
2356 node_whitelist = self.cfg.GetNodeNames(
2357 set(self.owned_locks(locking.LEVEL_NODE)) &
2358 set(self.owned_locks(locking.LEVEL_NODE_RES)))
2359 else:
2360 node_whitelist = None
2361
2362 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
2363 _ComputeNics(op, cluster, None,
2364 self.cfg, ec_id),
2365 _ComputeFullBeParams(op, cluster),
2366 node_whitelist)
2367 for op in self.op.instances]
2368
2369 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2370 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2371
2372 ial.Run(self.op.iallocator)
2373
2374 if not ial.success:
2375 raise errors.OpPrereqError("Can't compute nodes using"
2376 " iallocator '%s': %s" %
2377 (self.op.iallocator, ial.info),
2378 errors.ECODE_NORES)
2379
2380 self.ia_result = ial.result
2381
2382 if self.op.dry_run:
2383 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2384 constants.JOB_IDS_KEY: [],
2385 })
2386
2388 """Contructs the partial result.
2389
2390 """
2391 if self.op.iallocator:
2392 (allocatable, failed_insts) = self.ia_result
2393 allocatable_insts = map(compat.fst, allocatable)
2394 else:
2395 allocatable_insts = [op.instance_name for op in self.op.instances]
2396 failed_insts = []
2397
2398 return {
2399 constants.ALLOCATABLE_KEY: allocatable_insts,
2400 constants.FAILED_KEY: failed_insts,
2401 }
2402
2403 - def Exec(self, feedback_fn):
2404 """Executes the opcode.
2405
2406 """
2407 jobs = []
2408 if self.op.iallocator:
2409 op2inst = dict((op.instance_name, op) for op in self.op.instances)
2410 (allocatable, failed) = self.ia_result
2411
2412 for (name, node_names) in allocatable:
2413 op = op2inst.pop(name)
2414
2415 (op.pnode_uuid, op.pnode) = \
2416 ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2417 if len(node_names) > 1:
2418 (op.snode_uuid, op.snode) = \
2419 ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2420
2421 jobs.append([op])
2422
2423 missing = set(op2inst.keys()) - set(failed)
2424 assert not missing, \
2425 "Iallocator did return incomplete result: %s" % \
2426 utils.CommaJoin(missing)
2427 else:
2428 jobs.extend([op] for op in self.op.instances)
2429
2430 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2431
2434 """Data structure for network interface modifications.
2435
2436 Used by L{LUInstanceSetParams}.
2437
2438 """
2440 self.params = None
2441 self.filled = None
2442
2445 """Prepares a list of container modifications by adding a private data field.
2446
2447 @type mods: list of tuples; (operation, index, parameters)
2448 @param mods: List of modifications
2449 @type private_fn: callable or None
2450 @param private_fn: Callable for constructing a private data field for a
2451 modification
2452 @rtype: list
2453
2454 """
2455 if private_fn is None:
2456 fn = lambda: None
2457 else:
2458 fn = private_fn
2459
2460 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2461
2464 """Checks if nodes have enough physical CPUs
2465
2466 This function checks if all given nodes have the needed number of
2467 physical CPUs. In case any node has less CPUs or we cannot get the
2468 information from the node, this function raises an OpPrereqError
2469 exception.
2470
2471 @type lu: C{LogicalUnit}
2472 @param lu: a logical unit from which we get configuration data
2473 @type node_uuids: C{list}
2474 @param node_uuids: the list of node UUIDs to check
2475 @type requested: C{int}
2476 @param requested: the minimum acceptable number of physical CPUs
2477 @type hypervisor_specs: list of pairs (string, dict of strings)
2478 @param hypervisor_specs: list of hypervisor specifications in
2479 pairs (hypervisor_name, hvparams)
2480 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2481 or we cannot check the node
2482
2483 """
2484 nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2485 for node_uuid in node_uuids:
2486 info = nodeinfo[node_uuid]
2487 node_name = lu.cfg.GetNodeName(node_uuid)
2488 info.Raise("Cannot get current information from node %s" % node_name,
2489 prereq=True, ecode=errors.ECODE_ENVIRON)
2490 (_, _, (hv_info, )) = info.payload
2491 num_cpus = hv_info.get("cpu_total", None)
2492 if not isinstance(num_cpus, int):
2493 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2494 " on node %s, result was '%s'" %
2495 (node_name, num_cpus), errors.ECODE_ENVIRON)
2496 if requested > num_cpus:
2497 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2498 "required" % (node_name, num_cpus, requested),
2499 errors.ECODE_NORES)
2500
2503 """Return the item refered by the identifier.
2504
2505 @type identifier: string
2506 @param identifier: Item index or name or UUID
2507 @type kind: string
2508 @param kind: One-word item description
2509 @type container: list
2510 @param container: Container to get the item from
2511
2512 """
2513
2514 try:
2515 idx = int(identifier)
2516 if idx == -1:
2517
2518 absidx = len(container) - 1
2519 elif idx < 0:
2520 raise IndexError("Not accepting negative indices other than -1")
2521 elif idx > len(container):
2522 raise IndexError("Got %s index %s, but there are only %s" %
2523 (kind, idx, len(container)))
2524 else:
2525 absidx = idx
2526 return (absidx, container[idx])
2527 except ValueError:
2528 pass
2529
2530 for idx, item in enumerate(container):
2531 if item.uuid == identifier or item.name == identifier:
2532 return (idx, item)
2533
2534 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2535 (kind, identifier), errors.ECODE_NOENT)
2536
2537
2538 -def _ApplyContainerMods(kind, container, chgdesc, mods,
2539 create_fn, modify_fn, remove_fn,
2540 post_add_fn=None):
2541 """Applies descriptions in C{mods} to C{container}.
2542
2543 @type kind: string
2544 @param kind: One-word item description
2545 @type container: list
2546 @param container: Container to modify
2547 @type chgdesc: None or list
2548 @param chgdesc: List of applied changes
2549 @type mods: list
2550 @param mods: Modifications as returned by L{_PrepareContainerMods}
2551 @type create_fn: callable
2552 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2553 receives absolute item index, parameters and private data object as added
2554 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2555 as list
2556 @type modify_fn: callable
2557 @param modify_fn: Callback for modifying an existing item
2558 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2559 and private data object as added by L{_PrepareContainerMods}, returns
2560 changes as list
2561 @type remove_fn: callable
2562 @param remove_fn: Callback on removing item; receives absolute item index,
2563 item and private data object as added by L{_PrepareContainerMods}
2564 @type post_add_fn: callable
2565 @param post_add_fn: Callable for post-processing a newly created item after
2566 it has been put into the container. It receives the index of the new item
2567 and the new item as parameters.
2568
2569 """
2570 for (op, identifier, params, private) in mods:
2571 changes = None
2572
2573 if op == constants.DDM_ADD:
2574
2575
2576 try:
2577 idx = int(identifier)
2578 except ValueError:
2579 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2580 " identifier for %s" % constants.DDM_ADD,
2581 errors.ECODE_INVAL)
2582 if idx == -1:
2583 addidx = len(container)
2584 else:
2585 if idx < 0:
2586 raise IndexError("Not accepting negative indices other than -1")
2587 elif idx > len(container):
2588 raise IndexError("Got %s index %s, but there are only %s" %
2589 (kind, idx, len(container)))
2590 addidx = idx
2591
2592 if create_fn is None:
2593 item = params
2594 else:
2595 (item, changes) = create_fn(addidx, params, private)
2596
2597 if idx == -1:
2598 container.append(item)
2599 else:
2600 assert idx >= 0
2601 assert idx <= len(container)
2602
2603 container.insert(idx, item)
2604
2605 if post_add_fn is not None:
2606 post_add_fn(addidx, item)
2607
2608 else:
2609
2610 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2611
2612 if op == constants.DDM_REMOVE:
2613 assert not params
2614
2615 changes = [("%s/%s" % (kind, absidx), "remove")]
2616
2617 if remove_fn is not None:
2618 msg = remove_fn(absidx, item, private)
2619 if msg:
2620 changes.append(("%s/%s" % (kind, absidx), msg))
2621
2622 assert container[absidx] == item
2623 del container[absidx]
2624 elif op == constants.DDM_MODIFY:
2625 if modify_fn is not None:
2626 changes = modify_fn(absidx, item, params, private)
2627 else:
2628 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2629
2630 assert _TApplyContModsCbChanges(changes)
2631
2632 if not (chgdesc is None or changes is None):
2633 chgdesc.extend(changes)
2634
2637 """Modifies an instances's parameters.
2638
2639 """
2640 HPATH = "instance-modify"
2641 HTYPE = constants.HTYPE_INSTANCE
2642 REQ_BGL = False
2643
2644 @staticmethod
2646 assert ht.TList(mods)
2647 assert not mods or len(mods[0]) in (2, 3)
2648
2649 if mods and len(mods[0]) == 2:
2650 result = []
2651
2652 addremove = 0
2653 for op, params in mods:
2654 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2655 result.append((op, -1, params))
2656 addremove += 1
2657
2658 if addremove > 1:
2659 raise errors.OpPrereqError("Only one %s add or remove operation is"
2660 " supported at a time" % kind,
2661 errors.ECODE_INVAL)
2662 else:
2663 result.append((constants.DDM_MODIFY, op, params))
2664
2665 assert verify_fn(result)
2666 else:
2667 result = mods
2668
2669 return result
2670
2671 @staticmethod
2693
2695 """Verifies a disk modification.
2696
2697 """
2698 if op == constants.DDM_ADD:
2699 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2700 if mode not in constants.DISK_ACCESS_SET:
2701 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2702 errors.ECODE_INVAL)
2703
2704 size = params.get(constants.IDISK_SIZE, None)
2705 if size is None:
2706 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2707 constants.IDISK_SIZE, errors.ECODE_INVAL)
2708 size = int(size)
2709
2710 params[constants.IDISK_SIZE] = size
2711 name = params.get(constants.IDISK_NAME, None)
2712 if name is not None and name.lower() == constants.VALUE_NONE:
2713 params[constants.IDISK_NAME] = None
2714
2715 CheckSpindlesExclusiveStorage(params, excl_stor, True)
2716
2717 elif op == constants.DDM_MODIFY:
2718 if constants.IDISK_SIZE in params:
2719 raise errors.OpPrereqError("Disk size change not possible, use"
2720 " grow-disk", errors.ECODE_INVAL)
2721
2722
2723
2724 if self.instance.disk_template != constants.DT_EXT:
2725 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2726
2727 name = params.get(constants.IDISK_NAME, None)
2728 if name is not None and name.lower() == constants.VALUE_NONE:
2729 params[constants.IDISK_NAME] = None
2730
2731 @staticmethod
2733 """Verifies a network interface modification.
2734
2735 """
2736 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2737 ip = params.get(constants.INIC_IP, None)
2738 name = params.get(constants.INIC_NAME, None)
2739 req_net = params.get(constants.INIC_NETWORK, None)
2740 link = params.get(constants.NIC_LINK, None)
2741 mode = params.get(constants.NIC_MODE, None)
2742 if name is not None and name.lower() == constants.VALUE_NONE:
2743 params[constants.INIC_NAME] = None
2744 if req_net is not None:
2745 if req_net.lower() == constants.VALUE_NONE:
2746 params[constants.INIC_NETWORK] = None
2747 req_net = None
2748 elif link is not None or mode is not None:
2749 raise errors.OpPrereqError("If network is given"
2750 " mode or link should not",
2751 errors.ECODE_INVAL)
2752
2753 if op == constants.DDM_ADD:
2754 macaddr = params.get(constants.INIC_MAC, None)
2755 if macaddr is None:
2756 params[constants.INIC_MAC] = constants.VALUE_AUTO
2757
2758 if ip is not None:
2759 if ip.lower() == constants.VALUE_NONE:
2760 params[constants.INIC_IP] = None
2761 else:
2762 if ip.lower() == constants.NIC_IP_POOL:
2763 if op == constants.DDM_ADD and req_net is None:
2764 raise errors.OpPrereqError("If ip=pool, parameter network"
2765 " cannot be none",
2766 errors.ECODE_INVAL)
2767 else:
2768 if not netutils.IPAddress.IsValid(ip):
2769 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2770 errors.ECODE_INVAL)
2771
2772 if constants.INIC_MAC in params:
2773 macaddr = params[constants.INIC_MAC]
2774 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2775 macaddr = utils.NormalizeAndValidateMac(macaddr)
2776
2777 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2778 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2779 " modifying an existing NIC",
2780 errors.ECODE_INVAL)
2781
2783 if not (self.op.nics or self.op.disks or self.op.disk_template or
2784 self.op.hvparams or self.op.beparams or self.op.os_name or
2785 self.op.osparams or self.op.offline is not None or
2786 self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
2787 self.op.instance_communication is not None):
2788 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2789
2790 if self.op.hvparams:
2791 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2792 "hypervisor", "instance", "cluster")
2793
2794 self.op.disks = self._UpgradeDiskNicMods(
2795 "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2796 self.op.nics = self._UpgradeDiskNicMods(
2797 "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2798
2799 if self.op.disks and self.op.disk_template is not None:
2800 raise errors.OpPrereqError("Disk template conversion and other disk"
2801 " changes not supported at the same time",
2802 errors.ECODE_INVAL)
2803
2804 if (self.op.disk_template and
2805 self.op.disk_template in constants.DTS_INT_MIRROR and
2806 self.op.remote_node is None):
2807 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2808 " one requires specifying a secondary node",
2809 errors.ECODE_INVAL)
2810
2811
2812 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2813 self._VerifyNicModification)
2814
2815 if self.op.pnode:
2816 (self.op.pnode_uuid, self.op.pnode) = \
2817 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2818
2829
2831 if level == locking.LEVEL_NODEGROUP:
2832 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2833
2834
2835 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2836 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2837 elif level == locking.LEVEL_NODE:
2838 self._LockInstancesNodes()
2839 if self.op.disk_template and self.op.remote_node:
2840 (self.op.remote_node_uuid, self.op.remote_node) = \
2841 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2842 self.op.remote_node)
2843 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2844 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2845
2846 self.needed_locks[locking.LEVEL_NODE_RES] = \
2847 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2848
2883
2891
2894
2895 update_params_dict = dict([(key, params[key])
2896 for key in constants.NICS_PARAMETERS
2897 if key in params])
2898
2899 req_link = update_params_dict.get(constants.NIC_LINK, None)
2900 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2901
2902 new_net_uuid = None
2903 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2904 if new_net_uuid_or_name:
2905 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2906 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2907
2908 if old_net_uuid:
2909 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2910
2911 if new_net_uuid:
2912 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2913 if not netparams:
2914 raise errors.OpPrereqError("No netparams found for the network"
2915 " %s, probably not connected" %
2916 new_net_obj.name, errors.ECODE_INVAL)
2917 new_params = dict(netparams)
2918 else:
2919 new_params = GetUpdatedParams(old_params, update_params_dict)
2920
2921 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2922
2923 new_filled_params = cluster.SimpleFillNIC(new_params)
2924 objects.NIC.CheckParameterSyntax(new_filled_params)
2925
2926 new_mode = new_filled_params[constants.NIC_MODE]
2927 if new_mode == constants.NIC_MODE_BRIDGED:
2928 bridge = new_filled_params[constants.NIC_LINK]
2929 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2930 if msg:
2931 msg = "Error checking bridges on node '%s': %s" % \
2932 (self.cfg.GetNodeName(pnode_uuid), msg)
2933 if self.op.force:
2934 self.warn.append(msg)
2935 else:
2936 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2937
2938 elif new_mode == constants.NIC_MODE_ROUTED:
2939 ip = params.get(constants.INIC_IP, old_ip)
2940 if ip is None:
2941 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2942 " on a routed NIC", errors.ECODE_INVAL)
2943
2944 elif new_mode == constants.NIC_MODE_OVS:
2945
2946 self.LogInfo("OVS links are currently not checked for correctness")
2947
2948 if constants.INIC_MAC in params:
2949 mac = params[constants.INIC_MAC]
2950 if mac is None:
2951 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2952 errors.ECODE_INVAL)
2953 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2954
2955 params[constants.INIC_MAC] = \
2956 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2957 else:
2958
2959 try:
2960 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2961 except errors.ReservationError:
2962 raise errors.OpPrereqError("MAC address '%s' already in use"
2963 " in cluster" % mac,
2964 errors.ECODE_NOTUNIQUE)
2965 elif new_net_uuid != old_net_uuid:
2966
2967 def get_net_prefix(net_uuid):
2968 mac_prefix = None
2969 if net_uuid:
2970 nobj = self.cfg.GetNetwork(net_uuid)
2971 mac_prefix = nobj.mac_prefix
2972
2973 return mac_prefix
2974
2975 new_prefix = get_net_prefix(new_net_uuid)
2976 old_prefix = get_net_prefix(old_net_uuid)
2977 if old_prefix != new_prefix:
2978 params[constants.INIC_MAC] = \
2979 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2980
2981
2982 new_ip = params.get(constants.INIC_IP, old_ip)
2983 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2984 if new_ip:
2985
2986 if new_ip.lower() == constants.NIC_IP_POOL:
2987 if new_net_uuid:
2988 try:
2989 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2990 except errors.ReservationError:
2991 raise errors.OpPrereqError("Unable to get a free IP"
2992 " from the address pool",
2993 errors.ECODE_STATE)
2994 self.LogInfo("Chose IP %s from network %s",
2995 new_ip,
2996 new_net_obj.name)
2997 params[constants.INIC_IP] = new_ip
2998 else:
2999 raise errors.OpPrereqError("ip=pool, but no network found",
3000 errors.ECODE_INVAL)
3001
3002 elif new_net_uuid:
3003 try:
3004 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
3005 check=self.op.conflicts_check)
3006 self.LogInfo("Reserving IP %s in network %s",
3007 new_ip, new_net_obj.name)
3008 except errors.ReservationError:
3009 raise errors.OpPrereqError("IP %s not available in network %s" %
3010 (new_ip, new_net_obj.name),
3011 errors.ECODE_NOTUNIQUE)
3012
3013 elif self.op.conflicts_check:
3014 _CheckForConflictingIp(self, new_ip, pnode_uuid)
3015
3016
3017 if old_ip and old_net_uuid:
3018 try:
3019 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
3020 except errors.AddressPoolError:
3021 logging.warning("Release IP %s not contained in network %s",
3022 old_ip, old_net_obj.name)
3023
3024
3025 elif (old_net_uuid is not None and
3026 (req_link is not None or req_mode is not None)):
3027 raise errors.OpPrereqError("Not allowed to change link or mode of"
3028 " a NIC that is connected to a network",
3029 errors.ECODE_INVAL)
3030
3031 private.params = new_params
3032 private.filled = new_filled_params
3033
3035 """CheckPrereq checks related to a new disk template."""
3036
3037 pnode_uuid = self.instance.primary_node
3038 if self.instance.disk_template == self.op.disk_template:
3039 raise errors.OpPrereqError("Instance already has disk template %s" %
3040 self.instance.disk_template,
3041 errors.ECODE_INVAL)
3042
3043 if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
3044 raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
3045 " cluster." % self.op.disk_template)
3046
3047 if (self.instance.disk_template,
3048 self.op.disk_template) not in self._DISK_CONVERSIONS:
3049 raise errors.OpPrereqError("Unsupported disk template conversion from"
3050 " %s to %s" % (self.instance.disk_template,
3051 self.op.disk_template),
3052 errors.ECODE_INVAL)
3053 CheckInstanceState(self, self.instance, INSTANCE_DOWN,
3054 msg="cannot change disk template")
3055 if self.op.disk_template in constants.DTS_INT_MIRROR:
3056 if self.op.remote_node_uuid == pnode_uuid:
3057 raise errors.OpPrereqError("Given new secondary node %s is the same"
3058 " as the primary node of the instance" %
3059 self.op.remote_node, errors.ECODE_STATE)
3060 CheckNodeOnline(self, self.op.remote_node_uuid)
3061 CheckNodeNotDrained(self, self.op.remote_node_uuid)
3062
3063 assert self.instance.disk_template == constants.DT_PLAIN
3064 disks = [{constants.IDISK_SIZE: d.size,
3065 constants.IDISK_VG: d.logical_id[0]}
3066 for d in self.cfg.GetInstanceDisks(self.instance.uuid)]
3067 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
3068 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
3069
3070 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
3071 snode_group = self.cfg.GetNodeGroup(snode_info.group)
3072 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3073 snode_group)
3074 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
3075 ignore=self.op.ignore_ipolicy)
3076 if pnode_info.group != snode_info.group:
3077 self.LogWarning("The primary and secondary nodes are in two"
3078 " different node groups; the disk parameters"
3079 " from the first disk's node group will be"
3080 " used")
3081
3082 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
3083
3084 nodes = [pnode_info]
3085 if self.op.disk_template in constants.DTS_INT_MIRROR:
3086 assert snode_info
3087 nodes.append(snode_info)
3088 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
3089 if compat.any(map(has_es, nodes)):
3090 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
3091 " storage is enabled" % (self.instance.disk_template,
3092 self.op.disk_template))
3093 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
3094
3096 """CheckPrereq checks related to disk changes.
3097
3098 @type ispec: dict
3099 @param ispec: instance specs to be updated with the new disks
3100
3101 """
3102 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
3103
3104 inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
3105 excl_stor = compat.any(
3106 rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes).values()
3107 )
3108
3109
3110
3111 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
3112 if self.instance.disk_template == constants.DT_EXT:
3113 self._CheckMods("disk", self.op.disks, {}, ver_fn)
3114 else:
3115 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
3116 ver_fn)
3117
3118 self.diskmod = _PrepareContainerMods(self.op.disks, None)
3119
3120
3121 if self.instance.disk_template in constants.DT_EXT:
3122 for mod in self.diskmod:
3123 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
3124 if mod[0] == constants.DDM_ADD:
3125 if ext_provider is None:
3126 raise errors.OpPrereqError("Instance template is '%s' and parameter"
3127 " '%s' missing, during disk add" %
3128 (constants.DT_EXT,
3129 constants.IDISK_PROVIDER),
3130 errors.ECODE_NOENT)
3131 elif mod[0] == constants.DDM_MODIFY:
3132 if ext_provider:
3133 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
3134 " modification" %
3135 constants.IDISK_PROVIDER,
3136 errors.ECODE_INVAL)
3137 else:
3138 for mod in self.diskmod:
3139 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
3140 if ext_provider is not None:
3141 raise errors.OpPrereqError("Parameter '%s' is only valid for"
3142 " instances of type '%s'" %
3143 (constants.IDISK_PROVIDER,
3144 constants.DT_EXT),
3145 errors.ECODE_INVAL)
3146
3147 if not self.op.wait_for_sync and not self.instance.disks_active:
3148 for mod in self.diskmod:
3149 if mod[0] == constants.DDM_ADD:
3150 raise errors.OpPrereqError("Can't add a disk to an instance with"
3151 " deactivated disks and"
3152 " --no-wait-for-sync given.",
3153 errors.ECODE_INVAL)
3154
3155 if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
3156 raise errors.OpPrereqError("Disk operations not supported for"
3157 " diskless instances", errors.ECODE_INVAL)
3158
3159 def _PrepareDiskMod(_, disk, params, __):
3160 disk.name = params.get(constants.IDISK_NAME, None)
3161
3162
3163 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
3164 disks = copy.deepcopy(inst_disks)
3165 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
3166 _PrepareDiskMod, None)
3167 utils.ValidateDeviceNames("disk", disks)
3168 if len(disks) > constants.MAX_DISKS:
3169 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
3170 " more" % constants.MAX_DISKS,
3171 errors.ECODE_STATE)
3172 disk_sizes = [disk.size for disk in inst_disks]
3173 disk_sizes.extend(params["size"] for (op, idx, params, private) in
3174 self.diskmod if op == constants.DDM_ADD)
3175 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
3176 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
3177
3178
3179 if self.op.offline is not None:
3180 if self.op.offline:
3181 msg = "can't change to offline without being down first"
3182 else:
3183 msg = "can't change to online (down) without being offline first"
3184 CheckInstanceState(self, self.instance, INSTANCE_NOT_RUNNING,
3185 msg=msg)
3186
3187 @staticmethod
3189 """Create a NIC mod that adds or removes the instance
3190 communication NIC to a running instance.
3191
3192 The NICS are dynamically created using the Dynamic Device
3193 Modification (DDM). This function produces a NIC modification
3194 (mod) that inserts an additional NIC meant for instance
3195 communication in or removes an existing instance communication NIC
3196 from a running instance, using DDM.
3197
3198 @type cfg: L{config.ConfigWriter}
3199 @param cfg: cluster configuration
3200
3201 @type instance_communication: boolean
3202 @param instance_communication: whether instance communication is
3203 enabled or disabled
3204
3205 @type instance: L{objects.Instance}
3206 @param instance: instance to which the NIC mod will be applied to
3207
3208 @rtype: (L{constants.DDM_ADD}, -1, parameters) or
3209 (L{constants.DDM_REMOVE}, -1, parameters) or
3210 L{None}
3211 @return: DDM mod containing an action to add or remove the NIC, or
3212 None if nothing needs to be done
3213
3214 """
3215 nic_name = _ComputeInstanceCommunicationNIC(instance.name)
3216
3217 instance_communication_nic = None
3218
3219 for nic in instance.nics:
3220 if nic.name == nic_name:
3221 instance_communication_nic = nic
3222 break
3223
3224 if instance_communication and not instance_communication_nic:
3225 action = constants.DDM_ADD
3226 params = {constants.INIC_NAME: nic_name,
3227 constants.INIC_MAC: constants.VALUE_GENERATE,
3228 constants.INIC_IP: constants.NIC_IP_POOL,
3229 constants.INIC_NETWORK:
3230 cfg.GetInstanceCommunicationNetwork()}
3231 elif not instance_communication and instance_communication_nic:
3232 action = constants.DDM_REMOVE
3233 params = None
3234 else:
3235 action = None
3236 params = None
3237
3238 if action is not None:
3239 return (action, -1, params)
3240 else:
3241 return None
3242
3244 """Check prerequisites.
3245
3246 This only checks the instance list against the existing names.
3247
3248 """
3249 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
3250 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3251 self.cluster = self.cfg.GetClusterInfo()
3252 cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
3253
3254 assert self.instance is not None, \
3255 "Cannot retrieve locked instance %s" % self.op.instance_name
3256
3257 pnode_uuid = self.instance.primary_node
3258
3259 self.warn = []
3260
3261 if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
3262 not self.op.force):
3263
3264 instance_info = self.rpc.call_instance_info(
3265 pnode_uuid, self.instance.name, self.instance.hypervisor,
3266 cluster_hvparams)
3267 if instance_info.fail_msg:
3268 self.warn.append("Can't get instance runtime information: %s" %
3269 instance_info.fail_msg)
3270 elif instance_info.payload:
3271 raise errors.OpPrereqError("Instance is still running on %s" %
3272 self.cfg.GetNodeName(pnode_uuid),
3273 errors.ECODE_STATE)
3274
3275 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
3276 node_uuids = list(self.cfg.GetInstanceNodes(self.instance.uuid))
3277 pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
3278
3279
3280 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
3281 group_info = self.cfg.GetNodeGroup(pnode_info.group)
3282
3283
3284 ispec = {}
3285
3286 if self.op.hotplug or self.op.hotplug_if_possible:
3287 result = self.rpc.call_hotplug_supported(self.instance.primary_node,
3288 self.instance)
3289 if result.fail_msg:
3290 if self.op.hotplug:
3291 result.Raise("Hotplug is not possible: %s" % result.fail_msg,
3292 prereq=True)
3293 else:
3294 self.LogWarning(result.fail_msg)
3295 self.op.hotplug = False
3296 self.LogInfo("Modification will take place without hotplugging.")
3297 else:
3298 self.op.hotplug = True
3299
3300
3301
3302 if self.op.instance_communication is not None:
3303 mod = self._InstanceCommunicationDDM(self.cfg,
3304 self.op.instance_communication,
3305 self.instance)
3306 if mod is not None:
3307 self.op.nics.append(mod)
3308
3309 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
3310
3311
3312 assert not (self.op.disk_template and self.op.disks), \
3313 "Can't modify disk template and apply disk changes at the same time"
3314
3315 if self.op.disk_template:
3316 self._PreCheckDiskTemplate(pnode_info)
3317
3318 self._PreCheckDisks(ispec)
3319
3320
3321 if self.op.hvparams:
3322 hv_type = self.instance.hypervisor
3323 i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
3324 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
3325 hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
3326
3327
3328 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
3329 CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
3330 self.hv_proposed = self.hv_new = hv_new
3331 self.hv_inst = i_hvdict
3332 else:
3333 self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
3334 self.instance.os,
3335 self.instance.hvparams)
3336 self.hv_new = self.hv_inst = {}
3337
3338
3339 if self.op.beparams:
3340 i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
3341 use_none=True)
3342 objects.UpgradeBeParams(i_bedict)
3343 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
3344 be_new = self.cluster.SimpleFillBE(i_bedict)
3345 self.be_proposed = self.be_new = be_new
3346 self.be_inst = i_bedict
3347 else:
3348 self.be_new = self.be_inst = {}
3349 self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
3350 be_old = self.cluster.FillBE(self.instance)
3351
3352
3353
3354
3355 if (constants.BE_VCPUS in self.be_proposed and
3356 constants.HV_CPU_MASK in self.hv_proposed):
3357 cpu_list = \
3358 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
3359
3360
3361
3362 if (len(cpu_list) > 1 and
3363 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
3364 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
3365 " CPU mask [%s]" %
3366 (self.be_proposed[constants.BE_VCPUS],
3367 self.hv_proposed[constants.HV_CPU_MASK]),
3368 errors.ECODE_INVAL)
3369
3370
3371 if constants.HV_CPU_MASK in self.hv_new:
3372
3373 max_requested_cpu = max(map(max, cpu_list))
3374
3375
3376 hvspecs = [(self.instance.hypervisor,
3377 self.cfg.GetClusterInfo()
3378 .hvparams[self.instance.hypervisor])]
3379 _CheckNodesPhysicalCPUs(self,
3380 self.cfg.GetInstanceNodes(self.instance.uuid),
3381 max_requested_cpu + 1,
3382 hvspecs)
3383
3384
3385 if self.op.os_name and not self.op.force:
3386 instance_os = self.op.os_name
3387 else:
3388 instance_os = self.instance.os
3389
3390 if self.op.osparams or self.op.osparams_private:
3391 public_parms = self.op.osparams or {}
3392 private_parms = self.op.osparams_private or {}
3393 dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
3394
3395 if dupe_keys:
3396 raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
3397 utils.CommaJoin(dupe_keys))
3398
3399 self.os_inst = GetUpdatedParams(self.instance.osparams,
3400 public_parms)
3401 self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
3402 private_parms)
3403
3404 CheckOSParams(self, True, node_uuids, instance_os,
3405 objects.FillDict(self.os_inst,
3406 self.os_inst_private),
3407 self.op.force_variant)
3408
3409 else:
3410 self.os_inst = {}
3411 self.os_inst_private = {}
3412
3413
3414 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3415 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3416 mem_check_list = [pnode_uuid]
3417 if be_new[constants.BE_AUTO_BALANCE]:
3418
3419 mem_check_list.extend(
3420 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid))
3421 instance_info = self.rpc.call_instance_info(
3422 pnode_uuid, self.instance.name, self.instance.hypervisor,
3423 cluster_hvparams)
3424 hvspecs = [(self.instance.hypervisor,
3425 cluster_hvparams)]
3426 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3427 hvspecs)
3428 pninfo = nodeinfo[pnode_uuid]
3429 msg = pninfo.fail_msg
3430 if msg:
3431
3432 self.warn.append("Can't get info from primary node %s: %s" %
3433 (self.cfg.GetNodeName(pnode_uuid), msg))
3434 else:
3435 (_, _, (pnhvinfo, )) = pninfo.payload
3436 if not isinstance(pnhvinfo.get("memory_free", None), int):
3437 self.warn.append("Node data from primary node %s doesn't contain"
3438 " free memory information" %
3439 self.cfg.GetNodeName(pnode_uuid))
3440 elif instance_info.fail_msg:
3441 self.warn.append("Can't get instance runtime information: %s" %
3442 instance_info.fail_msg)
3443 else:
3444 if instance_info.payload:
3445 current_mem = int(instance_info.payload["memory"])
3446 else:
3447
3448
3449
3450
3451 current_mem = 0
3452
3453 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3454 pnhvinfo["memory_free"])
3455 if miss_mem > 0:
3456 raise errors.OpPrereqError("This change will prevent the instance"
3457 " from starting, due to %d MB of memory"
3458 " missing on its primary node" %
3459 miss_mem, errors.ECODE_NORES)
3460
3461 if be_new[constants.BE_AUTO_BALANCE]:
3462 secondary_nodes = \
3463 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
3464 for node_uuid, nres in nodeinfo.items():
3465 if node_uuid not in secondary_nodes:
3466 continue
3467 nres.Raise("Can't get info from secondary node %s" %
3468 self.cfg.GetNodeName(node_uuid), prereq=True,
3469 ecode=errors.ECODE_STATE)
3470 (_, _, (nhvinfo, )) = nres.payload
3471 if not isinstance(nhvinfo.get("memory_free", None), int):
3472 raise errors.OpPrereqError("Secondary node %s didn't return free"
3473 " memory information" %
3474 self.cfg.GetNodeName(node_uuid),
3475 errors.ECODE_STATE)
3476
3477 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3478 raise errors.OpPrereqError("This change will prevent the instance"
3479 " from failover to its secondary node"
3480 " %s, due to not enough memory" %
3481 self.cfg.GetNodeName(node_uuid),
3482 errors.ECODE_STATE)
3483
3484 if self.op.runtime_mem:
3485 remote_info = self.rpc.call_instance_info(
3486 self.instance.primary_node, self.instance.name,
3487 self.instance.hypervisor,
3488 cluster_hvparams)
3489 remote_info.Raise("Error checking node %s" %
3490 self.cfg.GetNodeName(self.instance.primary_node),
3491 prereq=True)
3492 if not remote_info.payload:
3493 raise errors.OpPrereqError("Instance %s is not running" %
3494 self.instance.name, errors.ECODE_STATE)
3495
3496 current_memory = remote_info.payload["memory"]
3497 if (not self.op.force and
3498 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3499 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3500 raise errors.OpPrereqError("Instance %s must have memory between %d"
3501 " and %d MB of memory unless --force is"
3502 " given" %
3503 (self.instance.name,
3504 self.be_proposed[constants.BE_MINMEM],
3505 self.be_proposed[constants.BE_MAXMEM]),
3506 errors.ECODE_INVAL)
3507
3508 delta = self.op.runtime_mem - current_memory
3509 if delta > 0:
3510 CheckNodeFreeMemory(
3511 self, self.instance.primary_node,
3512 "ballooning memory for instance %s" % self.instance.name, delta,
3513 self.instance.hypervisor,
3514 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3515
3516
3517 cluster = self.cluster
3518
3519 def _PrepareNicCreate(_, params, private):
3520 self._PrepareNicModification(params, private, None, None,
3521 {}, cluster, pnode_uuid)
3522 return (None, None)
3523
3524 def _PrepareNicMod(_, nic, params, private):
3525 self._PrepareNicModification(params, private, nic.ip, nic.network,
3526 nic.nicparams, cluster, pnode_uuid)
3527 return None
3528
3529 def _PrepareNicRemove(_, params, __):
3530 ip = params.ip
3531 net = params.network
3532 if net is not None and ip is not None:
3533 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3534
3535
3536 nics = [nic.Copy() for nic in self.instance.nics]
3537 _ApplyContainerMods("NIC", nics, None, self.nicmod,
3538 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3539 if len(nics) > constants.MAX_NICS:
3540 raise errors.OpPrereqError("Instance has too many network interfaces"
3541 " (%d), cannot add more" % constants.MAX_NICS,
3542 errors.ECODE_STATE)
3543
3544
3545 self._nic_chgdesc = []
3546 if self.nicmod:
3547
3548 nics = [nic.Copy() for nic in self.instance.nics]
3549 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3550 self._CreateNewNic, self._ApplyNicMods,
3551 self._RemoveNic)
3552
3553 utils.ValidateDeviceNames("NIC", nics)
3554 self._new_nics = nics
3555 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3556 else:
3557 self._new_nics = None
3558 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3559
3560 if not self.op.ignore_ipolicy:
3561 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3562 group_info)
3563
3564
3565 ispec[constants.ISPEC_SPINDLE_USE] = \
3566 self.be_new.get(constants.BE_SPINDLE_USE, None)
3567 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3568 None)
3569
3570
3571 if self.op.disk_template:
3572 new_disk_template = self.op.disk_template
3573 else:
3574 new_disk_template = self.instance.disk_template
3575 ispec_max = ispec.copy()
3576 ispec_max[constants.ISPEC_MEM_SIZE] = \
3577 self.be_new.get(constants.BE_MAXMEM, None)
3578 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3579 new_disk_template)
3580 ispec_min = ispec.copy()
3581 ispec_min[constants.ISPEC_MEM_SIZE] = \
3582 self.be_new.get(constants.BE_MINMEM, None)
3583 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3584 new_disk_template)
3585
3586 if (res_max or res_min):
3587
3588
3589 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3590 (group_info, group_info.name,
3591 utils.CommaJoin(set(res_max + res_min))))
3592 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3593
3595 """Converts an instance from plain to drbd.
3596
3597 """
3598 feedback_fn("Converting template to drbd")
3599 pnode_uuid = self.instance.primary_node
3600 snode_uuid = self.op.remote_node_uuid
3601
3602 assert self.instance.disk_template == constants.DT_PLAIN
3603
3604 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
3605
3606 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3607 constants.IDISK_VG: d.logical_id[0],
3608 constants.IDISK_NAME: d.name}
3609 for d in old_disks]
3610 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3611 self.instance.uuid, pnode_uuid,
3612 [snode_uuid], disk_info, None, None, 0,
3613 feedback_fn, self.diskparams)
3614 anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3615 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3616 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3617 info = GetInstanceInfoText(self.instance)
3618 feedback_fn("Creating additional volumes...")
3619
3620 for disk in anno_disks:
3621
3622 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3623 info, True, p_excl_stor)
3624 for child in disk.children:
3625 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3626 s_excl_stor)
3627
3628
3629 feedback_fn("Renaming original volumes...")
3630 rename_list = [(o, n.children[0].logical_id)
3631 for (o, n) in zip(old_disks, new_disks)]
3632 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3633 result.Raise("Failed to rename original LVs")
3634
3635 feedback_fn("Initializing DRBD devices...")
3636
3637 try:
3638 for disk in anno_disks:
3639 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3640 (snode_uuid, s_excl_stor)]:
3641 f_create = node_uuid == pnode_uuid
3642 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3643 f_create, excl_stor)
3644 except errors.GenericError, e:
3645 feedback_fn("Initializing of DRBD devices failed;"
3646 " renaming back original volumes...")
3647 rename_back_list = [(n.children[0], o.logical_id)
3648 for (n, o) in zip(new_disks, old_disks)]
3649 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3650 result.Raise("Failed to rename LVs back after error %s" % str(e))
3651 raise
3652
3653
3654 for old_disk in old_disks:
3655 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
3656
3657
3658 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
3659 self.instance.disk_template = constants.DT_DRBD8
3660 self.cfg.Update(self.instance, feedback_fn)
3661
3662
3663 for (idx, new_disk) in enumerate(new_disks):
3664 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
3665
3666
3667 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
3668
3669
3670 ReleaseLocks(self, locking.LEVEL_NODE)
3671
3672
3673 disk_abort = not WaitForSync(self, self.instance,
3674 oneshot=not self.op.wait_for_sync)
3675 if disk_abort:
3676 raise errors.OpExecError("There are some degraded disks for"
3677 " this instance, please cleanup manually")
3678
3679
3680
3682 """Converts an instance from drbd to plain.
3683
3684 """
3685 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
3686
3687 assert self.instance.disk_template == constants.DT_DRBD8
3688 assert len(secondary_nodes) == 1 or not self.instance.disks
3689
3690 pnode_uuid = self.instance.primary_node
3691
3692
3693 snode_uuid = None
3694 if secondary_nodes:
3695 snode_uuid = secondary_nodes[0]
3696
3697 feedback_fn("Converting template to plain")
3698
3699 disks = self.cfg.GetInstanceDisks(self.instance.uuid)
3700 old_disks = AnnotateDiskParams(self.instance, disks, self.cfg)
3701 new_disks = [d.children[0] for d in disks]
3702
3703
3704 for parent, child in zip(old_disks, new_disks):
3705 child.size = parent.size
3706 child.mode = parent.mode
3707 child.name = parent.name
3708
3709
3710
3711 for disk in old_disks:
3712 tcp_port = disk.logical_id[2]
3713 self.cfg.AddTcpUdpPort(tcp_port)
3714
3715
3716 for old_disk in old_disks:
3717 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
3718
3719
3720 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
3721 self.instance.disk_template = constants.DT_PLAIN
3722 self.cfg.Update(self.instance, feedback_fn)
3723
3724
3725 for (idx, new_disk) in enumerate(new_disks):
3726 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
3727
3728
3729 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
3730
3731
3732 ReleaseLocks(self, locking.LEVEL_NODE)
3733
3734 feedback_fn("Removing volumes on the secondary node...")
3735 for disk in old_disks:
3736 result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3737 result.Warn("Could not remove block device %s on node %s,"
3738 " continuing anyway" %
3739 (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3740 self.LogWarning)
3741
3742 feedback_fn("Removing unneeded volumes on the primary node...")
3743 for idx, disk in enumerate(old_disks):
3744 meta = disk.children[1]
3745 result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3746 result.Warn("Could not remove metadata for disk %d on node %s,"
3747 " continuing anyway" %
3748 (idx, self.cfg.GetNodeName(pnode_uuid)),
3749 self.LogWarning)
3750
3752 self.LogInfo("Trying to hotplug device...")
3753 msg = "hotplug:"
3754 result = self.rpc.call_hotplug_device(self.instance.primary_node,
3755 self.instance, action, dev_type,
3756 (device, self.instance),
3757 extra, seq)
3758 if result.fail_msg:
3759 self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3760 self.LogInfo("Continuing execution..")
3761 msg += "failed"
3762 else:
3763 self.LogInfo("Hotplug done.")
3764 msg += "done"
3765 return msg
3766
3768 """Creates a new disk.
3769
3770 """
3771
3772 instance_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
3773 if self.instance.disk_template in constants.DTS_FILEBASED:
3774 (file_driver, file_path) = instance_disks[0].logical_id
3775 file_path = os.path.dirname(file_path)
3776 else:
3777 file_driver = file_path = None
3778
3779 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
3780 disk = \
3781 GenerateDiskTemplate(self, self.instance.disk_template,
3782 self.instance.uuid, self.instance.primary_node,
3783 secondary_nodes, [params], file_path,
3784 file_driver, idx, self.Log, self.diskparams)[0]
3785
3786 new_disks = CreateDisks(self, self.instance, disks=[disk])
3787 self.cfg.AddInstanceDisk(self.instance.uuid, disk, idx)
3788
3789
3790 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
3791
3792 if self.cluster.prealloc_wipe_disks:
3793
3794 WipeOrCleanupDisks(self, self.instance,
3795 disks=[(idx, disk, 0)],
3796 cleanup=new_disks)
3797
3798 changes = [
3799 ("disk/%d" % idx,
3800 "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3801 ]
3802 if self.op.hotplug:
3803 result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3804 (disk, self.instance),
3805 self.instance, True, idx)
3806 if result.fail_msg:
3807 changes.append(("disk/%d" % idx, "assemble:failed"))
3808 self.LogWarning("Can't assemble newly created disk %d: %s",
3809 idx, result.fail_msg)
3810 else:
3811 _, link_name, uri = result.payload
3812 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3813 constants.HOTPLUG_TARGET_DISK,
3814 disk, (link_name, uri), idx)
3815 changes.append(("disk/%d" % idx, msg))
3816
3817 return (disk, changes)
3818
3819 - def _PostAddDisk(self, _, disk):
3820 if not WaitForSync(self, self.instance, disks=[disk],
3821 oneshot=not self.op.wait_for_sync):
3822 raise errors.OpExecError("Failed to sync disks of %s" %
3823 self.instance.name)
3824
3825
3826
3827 if not self.instance.disks_active:
3828 ShutdownInstanceDisks(self, self.instance, disks=[disk])
3829
3861
3863 """Removes a disk.
3864
3865 """
3866 hotmsg = ""
3867 if self.op.hotplug:
3868 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3869 constants.HOTPLUG_TARGET_DISK,
3870 root, None, idx)
3871 ShutdownInstanceDisks(self, self.instance, [root])
3872
3873 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3874 for node_uuid, disk in anno_disk.ComputeNodeTree(
3875 self.instance.primary_node):
3876 msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3877 .fail_msg
3878 if msg:
3879 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3880 " continuing anyway", idx,
3881 self.cfg.GetNodeName(node_uuid), msg)
3882
3883
3884 if root.dev_type in constants.DTS_DRBD:
3885 self.cfg.AddTcpUdpPort(root.logical_id[2])
3886
3887
3888 self.cfg.RemoveInstanceDisk(self.instance.uuid, root.uuid)
3889
3890
3891 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
3892
3893 return hotmsg
3894
3896 """Creates data structure for a new network interface.
3897
3898 """
3899 mac = params[constants.INIC_MAC]
3900 ip = params.get(constants.INIC_IP, None)
3901 net = params.get(constants.INIC_NETWORK, None)
3902 name = params.get(constants.INIC_NAME, None)
3903 net_uuid = self.cfg.LookupNetwork(net)
3904
3905 nicparams = private.filled
3906 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3907 nicparams=nicparams)
3908 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3909
3910 changes = [
3911 ("nic.%d" % idx,
3912 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3913 (mac, ip, private.filled[constants.NIC_MODE],
3914 private.filled[constants.NIC_LINK], net)),
3915 ]
3916
3917 if self.op.hotplug:
3918 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3919 constants.HOTPLUG_TARGET_NIC,
3920 nobj, None, idx)
3921 changes.append(("nic.%d" % idx, msg))
3922
3923 return (nobj, changes)
3924
3926 """Modifies a network interface.
3927
3928 """
3929 changes = []
3930
3931 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3932 if key in params:
3933 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3934 setattr(nic, key, params[key])
3935
3936 new_net = params.get(constants.INIC_NETWORK, nic.network)
3937 new_net_uuid = self.cfg.LookupNetwork(new_net)
3938 if new_net_uuid != nic.network:
3939 changes.append(("nic.network/%d" % idx, new_net))
3940 nic.network = new_net_uuid
3941
3942 if private.filled:
3943 nic.nicparams = private.filled
3944
3945 for (key, val) in nic.nicparams.items():
3946 changes.append(("nic.%s/%d" % (key, idx), val))
3947
3948 if self.op.hotplug:
3949 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3950 constants.HOTPLUG_TARGET_NIC,
3951 nic, None, idx)
3952 changes.append(("nic/%d" % idx, msg))
3953
3954 return changes
3955
3961
3962 - def Exec(self, feedback_fn):
3963 """Modifies an instance.
3964
3965 All parameters take effect only at the next restart of the instance.
3966
3967 """
3968 self.feedback_fn = feedback_fn
3969
3970
3971
3972 for warn in self.warn:
3973 feedback_fn("WARNING: %s" % warn)
3974
3975 assert ((self.op.disk_template is None) ^
3976 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3977 "Not owning any node resource locks"
3978
3979 result = []
3980
3981
3982 if self.op.pnode_uuid:
3983 self.instance.primary_node = self.op.pnode_uuid
3984
3985
3986 if self.op.runtime_mem:
3987 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3988 self.instance,
3989 self.op.runtime_mem)
3990 rpcres.Raise("Cannot modify instance runtime memory")
3991 result.append(("runtime_memory", self.op.runtime_mem))
3992
3993
3994 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
3995 _ApplyContainerMods("disk", inst_disks, result, self.diskmod,
3996 self._CreateNewDisk, self._ModifyDisk,
3997 self._RemoveDisk, post_add_fn=self._PostAddDisk)
3998
3999 if self.op.disk_template:
4000 if __debug__:
4001 check_nodes = set(self.cfg.GetInstanceNodes(self.instance.uuid))
4002 if self.op.remote_node_uuid:
4003 check_nodes.add(self.op.remote_node_uuid)
4004 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
4005 owned = self.owned_locks(level)
4006 assert not (check_nodes - owned), \
4007 ("Not owning the correct locks, owning %r, expected at least %r" %
4008 (owned, check_nodes))
4009
4010 r_shut = ShutdownInstanceDisks(self, self.instance)
4011 if not r_shut:
4012 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
4013 " proceed with disk template conversion")
4014 mode = (self.instance.disk_template, self.op.disk_template)
4015 try:
4016 self._DISK_CONVERSIONS[mode](self, feedback_fn)
4017 except:
4018 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
4019 raise
4020 result.append(("disk_template", self.op.disk_template))
4021
4022 assert self.instance.disk_template == self.op.disk_template, \
4023 ("Expected disk template '%s', found '%s'" %
4024 (self.op.disk_template, self.instance.disk_template))
4025
4026
4027
4028 ReleaseLocks(self, locking.LEVEL_NODE)
4029 ReleaseLocks(self, locking.LEVEL_NODE_RES)
4030
4031
4032 if self._new_nics is not None:
4033 self.instance.nics = self._new_nics
4034 result.extend(self._nic_chgdesc)
4035
4036
4037 if self.op.hvparams:
4038 self.instance.hvparams = self.hv_inst
4039 for key, val in self.op.hvparams.iteritems():
4040 result.append(("hv/%s" % key, val))
4041
4042
4043 if self.op.beparams:
4044 self.instance.beparams = self.be_inst
4045 for key, val in self.op.beparams.iteritems():
4046 result.append(("be/%s" % key, val))
4047
4048
4049 if self.op.os_name:
4050 self.instance.os = self.op.os_name
4051
4052
4053 if self.op.osparams:
4054 self.instance.osparams = self.os_inst
4055 for key, val in self.op.osparams.iteritems():
4056 result.append(("os/%s" % key, val))
4057
4058 if self.op.osparams_private:
4059 self.instance.osparams_private = self.os_inst_private
4060 for key, val in self.op.osparams_private.iteritems():
4061
4062 result.append(("os_private/%s" % key, repr(val)))
4063
4064 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
4065
4066 if self.op.offline is None:
4067
4068 pass
4069 elif self.op.offline:
4070
4071 self.instance = self.cfg.MarkInstanceOffline(self.instance.uuid)
4072 result.append(("admin_state", constants.ADMINST_OFFLINE))
4073 else:
4074
4075 self.instance = self.cfg.MarkInstanceDown(self.instance.uuid)
4076 result.append(("admin_state", constants.ADMINST_DOWN))
4077
4078 UpdateMetadata(feedback_fn, self.rpc, self.instance)
4079
4080 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
4081 self.owned_locks(locking.LEVEL_NODE)), \
4082 "All node locks should have been released by now"
4083
4084 return result
4085
4086 _DISK_CONVERSIONS = {
4087 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
4088 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
4089 }
4090
4093 HPATH = "instance-change-group"
4094 HTYPE = constants.HTYPE_INSTANCE
4095 REQ_BGL = False
4096
4115
4117 if level == locking.LEVEL_NODEGROUP:
4118 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
4119
4120 if self.req_target_uuids:
4121 lock_groups = set(self.req_target_uuids)
4122
4123
4124
4125 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
4126 lock_groups.update(instance_groups)
4127 else:
4128
4129 lock_groups = locking.ALL_SET
4130
4131 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
4132
4133 elif level == locking.LEVEL_NODE:
4134 if self.req_target_uuids:
4135
4136 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
4137 self._LockInstancesNodes()
4138
4139
4140 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) |
4141 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
4142 member_nodes = [node_uuid
4143 for group in lock_groups
4144 for node_uuid in self.cfg.GetNodeGroup(group).members]
4145 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
4146 else:
4147
4148 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
4149
4151 owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
4152 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
4153 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
4154
4155 assert (self.req_target_uuids is None or
4156 owned_groups.issuperset(self.req_target_uuids))
4157 assert owned_instance_names == set([self.op.instance_name])
4158
4159
4160 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
4161
4162
4163 instance_all_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
4164 assert owned_nodes.issuperset(instance_all_nodes), \
4165 ("Instance %s's nodes changed while we kept the lock" %
4166 self.op.instance_name)
4167
4168 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
4169 owned_groups)
4170
4171 if self.req_target_uuids:
4172
4173 self.target_uuids = frozenset(self.req_target_uuids)
4174 else:
4175
4176 self.target_uuids = owned_groups - inst_groups
4177
4178 conflicting_groups = self.target_uuids & inst_groups
4179 if conflicting_groups:
4180 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
4181 " used by the instance '%s'" %
4182 (utils.CommaJoin(conflicting_groups),
4183 self.op.instance_name),
4184 errors.ECODE_INVAL)
4185
4186 if not self.target_uuids:
4187 raise errors.OpPrereqError("There are no possible target groups",
4188 errors.ECODE_INVAL)
4189
4191 """Build hooks env.
4192
4193 """
4194 assert self.target_uuids
4195
4196 env = {
4197 "TARGET_GROUPS": " ".join(self.target_uuids),
4198 }
4199
4200 env.update(BuildInstanceHookEnvByObject(self, self.instance))
4201
4202 return env
4203
4205 """Build hooks nodes.
4206
4207 """
4208 mn = self.cfg.GetMasterNode()
4209 return ([mn], [mn])
4210
4211 - def Exec(self, feedback_fn):
4212 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
4213
4214 assert instances == [self.op.instance_name], "Instance not locked"
4215
4216 req = iallocator.IAReqGroupChange(instances=instances,
4217 target_groups=list(self.target_uuids))
4218 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
4219
4220 ial.Run(self.op.iallocator)
4221
4222 if not ial.success:
4223 raise errors.OpPrereqError("Can't compute solution for changing group of"
4224 " instance '%s' using iallocator '%s': %s" %
4225 (self.op.instance_name, self.op.iallocator,
4226 ial.info), errors.ECODE_NORES)
4227
4228 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
4229
4230 self.LogInfo("Iallocator returned %s job(s) for changing group of"
4231 " instance '%s'", len(jobs), self.op.instance_name)
4232
4233 return ResultWithJobs(jobs)
4234