1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Logical units dealing with instances."""
32
33 import OpenSSL
34 import copy
35 import logging
36 import os
37
38 from ganeti import compat
39 from ganeti import constants
40 from ganeti import errors
41 from ganeti import ht
42 from ganeti import hypervisor
43 from ganeti import locking
44 from ganeti.masterd import iallocator
45 from ganeti import masterd
46 from ganeti import netutils
47 from ganeti import objects
48 from ganeti import pathutils
49 import ganeti.rpc.node as rpc
50 from ganeti import utils
51
52 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
53
54 from ganeti.cmdlib.common import INSTANCE_DOWN, \
55 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
56 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
57 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
58 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
59 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
60 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
61 CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
62 from ganeti.cmdlib.instance_storage import CreateDisks, \
63 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
64 IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
65 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
66 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
67 CheckSpindlesExclusiveStorage
68 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
69 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
70 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
71 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
72 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
73 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
74
75 import ganeti.masterd.instance
76
77
78
79
80 _TApplyContModsCbChanges = \
81 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
82 ht.TNonEmptyString,
83 ht.TAny,
84 ])))
88 """Ensures that a given hostname resolves to a 'sane' name.
89
90 The given name is required to be a prefix of the resolved hostname,
91 to prevent accidental mismatches.
92
93 @param lu: the logical unit on behalf of which we're checking
94 @param name: the name we should resolve and check
95 @return: the resolved hostname object
96
97 """
98 hostname = netutils.GetHostname(name=name)
99 if hostname.name != name:
100 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
101 if not utils.MatchNameComponent(name, [hostname.name]):
102 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
103 " same as given hostname '%s'") %
104 (hostname.name, name), errors.ECODE_INVAL)
105 return hostname
106
109 """Generate error if opportunistic locking is not possible.
110
111 """
112 if op.opportunistic_locking and not op.iallocator:
113 raise errors.OpPrereqError("Opportunistic locking is only available in"
114 " combination with an instance allocator",
115 errors.ECODE_INVAL)
116
119 """Wrapper around IAReqInstanceAlloc.
120
121 @param op: The instance opcode
122 @param disks: The computed disks
123 @param nics: The computed nics
124 @param beparams: The full filled beparams
125 @param node_name_whitelist: List of nodes which should appear as online to the
126 allocator (unless the node is already marked offline)
127
128 @returns: A filled L{iallocator.IAReqInstanceAlloc}
129
130 """
131 spindle_use = beparams[constants.BE_SPINDLE_USE]
132 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
133 disk_template=op.disk_template,
134 tags=op.tags,
135 os=op.os_type,
136 vcpus=beparams[constants.BE_VCPUS],
137 memory=beparams[constants.BE_MAXMEM],
138 spindle_use=spindle_use,
139 disks=disks,
140 nics=[n.ToDict() for n in nics],
141 hypervisor=op.hypervisor,
142 node_whitelist=node_name_whitelist)
143
161
164 """Computes the nics.
165
166 @param op: The instance opcode
167 @param cluster: Cluster configuration object
168 @param default_ip: The default ip to assign
169 @param cfg: An instance of the configuration object
170 @param ec_id: Execution context ID
171
172 @returns: The build up nics
173
174 """
175 nics = []
176 for nic in op.nics:
177 nic_mode_req = nic.get(constants.INIC_MODE, None)
178 nic_mode = nic_mode_req
179 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
180 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
181
182 net = nic.get(constants.INIC_NETWORK, None)
183 link = nic.get(constants.NIC_LINK, None)
184 ip = nic.get(constants.INIC_IP, None)
185 vlan = nic.get(constants.INIC_VLAN, None)
186
187 if net is None or net.lower() == constants.VALUE_NONE:
188 net = None
189 else:
190 if nic_mode_req is not None or link is not None:
191 raise errors.OpPrereqError("If network is given, no mode or link"
192 " is allowed to be passed",
193 errors.ECODE_INVAL)
194
195
196 if ip is None or ip.lower() == constants.VALUE_NONE:
197 nic_ip = None
198 elif ip.lower() == constants.VALUE_AUTO:
199 if not op.name_check:
200 raise errors.OpPrereqError("IP address set to auto but name checks"
201 " have been skipped",
202 errors.ECODE_INVAL)
203 nic_ip = default_ip
204 else:
205
206
207 if ip.lower() == constants.NIC_IP_POOL:
208 if net is None:
209 raise errors.OpPrereqError("if ip=pool, parameter network"
210 " must be passed too",
211 errors.ECODE_INVAL)
212
213 elif not netutils.IPAddress.IsValid(ip):
214 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
215 errors.ECODE_INVAL)
216
217 nic_ip = ip
218
219
220 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
221 raise errors.OpPrereqError("Routed nic mode requires an ip address",
222 errors.ECODE_INVAL)
223
224
225 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
226 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
227 mac = utils.NormalizeAndValidateMac(mac)
228
229 try:
230
231 cfg.ReserveMAC(mac, ec_id)
232 except errors.ReservationError:
233 raise errors.OpPrereqError("MAC address %s already in use"
234 " in cluster" % mac,
235 errors.ECODE_NOTUNIQUE)
236
237
238 nicparams = {}
239 if nic_mode_req:
240 nicparams[constants.NIC_MODE] = nic_mode
241 if link:
242 nicparams[constants.NIC_LINK] = link
243 if vlan:
244 nicparams[constants.NIC_VLAN] = vlan
245
246 check_params = cluster.SimpleFillNIC(nicparams)
247 objects.NIC.CheckParameterSyntax(check_params)
248 net_uuid = cfg.LookupNetwork(net)
249 name = nic.get(constants.INIC_NAME, None)
250 if name is not None and name.lower() == constants.VALUE_NONE:
251 name = None
252 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
253 network=net_uuid, nicparams=nicparams)
254 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
255 nics.append(nic_obj)
256
257 return nics
258
261 """In case of conflicting IP address raise error.
262
263 @type ip: string
264 @param ip: IP address
265 @type node_uuid: string
266 @param node_uuid: node UUID
267
268 """
269 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
270 if conf_net is not None:
271 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
272 " network %s, but the target NIC does not." %
273 (ip, conf_net)),
274 errors.ECODE_STATE)
275
276 return (None, None)
277
282 """Compute if instance specs meets the specs of ipolicy.
283
284 @type ipolicy: dict
285 @param ipolicy: The ipolicy to verify against
286 @param instance_spec: dict
287 @param instance_spec: The instance spec to verify
288 @type disk_template: string
289 @param disk_template: the disk template of the instance
290 @param _compute_fn: The function to verify ipolicy (unittest only)
291 @see: L{ComputeIPolicySpecViolation}
292
293 """
294 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
295 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
296 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
297 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
298 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
299 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
300
301 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
302 disk_sizes, spindle_use, disk_template)
303
306 """Check whether an OS name conforms to the os variants specification.
307
308 @type os_obj: L{objects.OS}
309 @param os_obj: OS object to check
310 @type name: string
311 @param name: OS name passed by the user, to check for validity
312
313 """
314 variant = objects.OS.GetVariant(name)
315 if not os_obj.supported_variants:
316 if variant:
317 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
318 " passed)" % (os_obj.name, variant),
319 errors.ECODE_INVAL)
320 return
321 if not variant:
322 raise errors.OpPrereqError("OS name must include a variant",
323 errors.ECODE_INVAL)
324
325 if variant not in os_obj.supported_variants:
326 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
327
330 """Create an instance.
331
332 """
333 HPATH = "instance-add"
334 HTYPE = constants.HTYPE_INSTANCE
335 REQ_BGL = False
336
350
392
394 """ Check validity of VLANs if given
395
396 """
397 for nic in self.op.nics:
398 vlan = nic.get(constants.INIC_VLAN, None)
399 if vlan:
400 if vlan[0] == ".":
401
402
403 if not vlan[1:].isdigit():
404 vlanlist = vlan[1:].split(':')
405 for vl in vlanlist:
406 if not vl.isdigit():
407 raise errors.OpPrereqError("Specified VLAN parameter is "
408 "invalid : %s" % vlan,
409 errors.ECODE_INVAL)
410 elif vlan[0] == ":":
411
412 vlanlist = vlan[1:].split(':')
413 for vl in vlanlist:
414 if not vl.isdigit():
415 raise errors.OpPrereqError("Specified VLAN parameter is invalid"
416 " : %s" % vlan, errors.ECODE_INVAL)
417 elif vlan.isdigit():
418
419
420 nic[constants.INIC_VLAN] = "." + vlan
421 else:
422 raise errors.OpPrereqError("Specified VLAN parameter is invalid"
423 " : %s" % vlan, errors.ECODE_INVAL)
424
426 """Check arguments.
427
428 """
429
430
431 if self.op.no_install and self.op.start:
432 self.LogInfo("No-installation mode selected, disabling startup")
433 self.op.start = False
434
435 self.op.instance_name = \
436 netutils.Hostname.GetNormalizedName(self.op.instance_name)
437
438 if self.op.ip_check and not self.op.name_check:
439
440 raise errors.OpPrereqError("Cannot do IP address check without a name"
441 " check", errors.ECODE_INVAL)
442
443
444 for nic in self.op.nics:
445 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
446
447 utils.ValidateDeviceNames("NIC", self.op.nics)
448
449 self._CheckVLANArguments()
450
451 self._CheckDiskArguments()
452 assert self.op.disk_template is not None
453
454
455 if self.op.name_check:
456 self.hostname = _CheckHostnameSane(self, self.op.instance_name)
457 self.op.instance_name = self.hostname.name
458
459 self.check_ip = self.hostname.ip
460 else:
461 self.check_ip = None
462
463
464 if (self.op.file_driver and
465 not self.op.file_driver in constants.FILE_DRIVER):
466 raise errors.OpPrereqError("Invalid file driver name '%s'" %
467 self.op.file_driver, errors.ECODE_INVAL)
468
469
470 if (not self.op.file_driver and
471 self.op.disk_template in constants.DTS_FILEBASED):
472 self.op.file_driver = constants.FD_DEFAULT
473
474
475 CheckIAllocatorOrNode(self, "iallocator", "pnode")
476
477 if self.op.pnode is not None:
478 if self.op.disk_template in constants.DTS_INT_MIRROR:
479 if self.op.snode is None:
480 raise errors.OpPrereqError("The networked disk templates need"
481 " a mirror node", errors.ECODE_INVAL)
482 elif self.op.snode:
483 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
484 " template")
485 self.op.snode = None
486
487 _CheckOpportunisticLocking(self.op)
488
489 if self.op.mode == constants.INSTANCE_IMPORT:
490
491
492
493 self.op.force_variant = True
494
495 if self.op.no_install:
496 self.LogInfo("No-installation mode has no effect during import")
497
498 elif self.op.mode == constants.INSTANCE_CREATE:
499 if self.op.os_type is None:
500 raise errors.OpPrereqError("No guest OS specified",
501 errors.ECODE_INVAL)
502 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
503 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
504 " installation" % self.op.os_type,
505 errors.ECODE_STATE)
506 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
507 self._cds = GetClusterDomainSecret()
508
509
510 src_handshake = self.op.source_handshake
511 if not src_handshake:
512 raise errors.OpPrereqError("Missing source handshake",
513 errors.ECODE_INVAL)
514
515 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
516 src_handshake)
517 if errmsg:
518 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
519 errors.ECODE_INVAL)
520
521
522 self.source_x509_ca_pem = self.op.source_x509_ca
523 if not self.source_x509_ca_pem:
524 raise errors.OpPrereqError("Missing source X509 CA",
525 errors.ECODE_INVAL)
526
527 try:
528 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
529 self._cds)
530 except OpenSSL.crypto.Error, err:
531 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
532 (err, ), errors.ECODE_INVAL)
533
534 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
535 if errcode is not None:
536 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
537 errors.ECODE_INVAL)
538
539 self.source_x509_ca = cert
540
541 src_instance_name = self.op.source_instance_name
542 if not src_instance_name:
543 raise errors.OpPrereqError("Missing source instance name",
544 errors.ECODE_INVAL)
545
546 self.source_instance_name = \
547 netutils.GetHostname(name=src_instance_name).name
548
549 else:
550 raise errors.OpPrereqError("Invalid instance creation mode %r" %
551 self.op.mode, errors.ECODE_INVAL)
552
554 """ExpandNames for CreateInstance.
555
556 Figure out the right locks for instance creation.
557
558 """
559 self.needed_locks = {}
560
561
562
563 if self.op.instance_name in\
564 [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
565 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
566 self.op.instance_name, errors.ECODE_EXISTS)
567
568 self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
569
570 if self.op.iallocator:
571
572
573
574 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
575 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
576
577 if self.op.opportunistic_locking:
578 self.opportunistic_locks[locking.LEVEL_NODE] = True
579 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
580 else:
581 (self.op.pnode_uuid, self.op.pnode) = \
582 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
583 nodelist = [self.op.pnode_uuid]
584 if self.op.snode is not None:
585 (self.op.snode_uuid, self.op.snode) = \
586 ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
587 nodelist.append(self.op.snode_uuid)
588 self.needed_locks[locking.LEVEL_NODE] = nodelist
589
590
591 if self.op.mode == constants.INSTANCE_IMPORT:
592 src_node = self.op.src_node
593 src_path = self.op.src_path
594
595 if src_path is None:
596 self.op.src_path = src_path = self.op.instance_name
597
598 if src_node is None:
599 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
600 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
601 self.op.src_node = None
602 if os.path.isabs(src_path):
603 raise errors.OpPrereqError("Importing an instance from a path"
604 " requires a source node option",
605 errors.ECODE_INVAL)
606 else:
607 (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
608 ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
609 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
610 self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
611 if not os.path.isabs(src_path):
612 self.op.src_path = \
613 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
614
615 self.needed_locks[locking.LEVEL_NODE_RES] = \
616 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
617
618
619
620
621
622 if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
623
624
625
626
627 self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
628 else:
629 self.needed_locks[locking.LEVEL_NODEGROUP] = \
630 list(self.cfg.GetNodeGroupsFromNodes(
631 self.needed_locks[locking.LEVEL_NODE]))
632 self.share_locks[locking.LEVEL_NODEGROUP] = 1
633
635 """Run the allocator based on input opcode.
636
637 """
638 if self.op.opportunistic_locking:
639
640 node_name_whitelist = self.cfg.GetNodeNames(
641 set(self.owned_locks(locking.LEVEL_NODE)) &
642 set(self.owned_locks(locking.LEVEL_NODE_RES)))
643 else:
644 node_name_whitelist = None
645
646 req = _CreateInstanceAllocRequest(self.op, self.disks,
647 self.nics, self.be_full,
648 node_name_whitelist)
649 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
650
651 ial.Run(self.op.iallocator)
652
653 if not ial.success:
654
655 if self.op.opportunistic_locking:
656 ecode = errors.ECODE_TEMP_NORES
657 else:
658 ecode = errors.ECODE_NORES
659
660 raise errors.OpPrereqError("Can't compute nodes using"
661 " iallocator '%s': %s" %
662 (self.op.iallocator, ial.info),
663 ecode)
664
665 (self.op.pnode_uuid, self.op.pnode) = \
666 ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
667 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
668 self.op.instance_name, self.op.iallocator,
669 utils.CommaJoin(ial.result))
670
671 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
672
673 if req.RequiredNodes() == 2:
674 (self.op.snode_uuid, self.op.snode) = \
675 ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
676
678 """Build hooks env.
679
680 This runs on master, primary and secondary nodes of the instance.
681
682 """
683 env = {
684 "ADD_MODE": self.op.mode,
685 }
686 if self.op.mode == constants.INSTANCE_IMPORT:
687 env["SRC_NODE"] = self.op.src_node
688 env["SRC_PATH"] = self.op.src_path
689 env["SRC_IMAGES"] = self.src_images
690
691 env.update(BuildInstanceHookEnv(
692 name=self.op.instance_name,
693 primary_node_name=self.op.pnode,
694 secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
695 status=self.op.start,
696 os_type=self.op.os_type,
697 minmem=self.be_full[constants.BE_MINMEM],
698 maxmem=self.be_full[constants.BE_MAXMEM],
699 vcpus=self.be_full[constants.BE_VCPUS],
700 nics=NICListToTuple(self, self.nics),
701 disk_template=self.op.disk_template,
702 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
703 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
704 for d in self.disks],
705 bep=self.be_full,
706 hvp=self.hv_full,
707 hypervisor_name=self.op.hypervisor,
708 tags=self.op.tags,
709 ))
710
711 return env
712
714 """Build hooks nodes.
715
716 """
717 nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
718 return nl, nl
719
765
767 """Use export parameters as defaults.
768
769 In case the opcode doesn't specify (as in override) some instance
770 parameters, then try to use them from the export information, if
771 that declares them.
772
773 """
774 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
775
776 if not self.op.disks:
777 disks = []
778
779 for idx in range(constants.MAX_DISKS):
780 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
781 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
782 disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
783 disk = {
784 constants.IDISK_SIZE: disk_sz,
785 constants.IDISK_NAME: disk_name
786 }
787 disks.append(disk)
788 self.op.disks = disks
789 if not disks and self.op.disk_template != constants.DT_DISKLESS:
790 raise errors.OpPrereqError("No disk info specified and the export"
791 " is missing the disk information",
792 errors.ECODE_INVAL)
793
794 if not self.op.nics:
795 nics = []
796 for idx in range(constants.MAX_NICS):
797 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
798 ndict = {}
799 for name in [constants.INIC_IP,
800 constants.INIC_MAC, constants.INIC_NAME]:
801 nic_param_name = "nic%d_%s" % (idx, name)
802 if einfo.has_option(constants.INISECT_INS, nic_param_name):
803 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
804 ndict[name] = v
805 network = einfo.get(constants.INISECT_INS,
806 "nic%d_%s" % (idx, constants.INIC_NETWORK))
807
808
809 if network:
810 ndict[constants.INIC_NETWORK] = network
811 else:
812 for name in list(constants.NICS_PARAMETERS):
813 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
814 ndict[name] = v
815 nics.append(ndict)
816 else:
817 break
818 self.op.nics = nics
819
820 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
821 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
822
823 if (self.op.hypervisor is None and
824 einfo.has_option(constants.INISECT_INS, "hypervisor")):
825 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
826
827 if einfo.has_section(constants.INISECT_HYP):
828
829
830 for name, value in einfo.items(constants.INISECT_HYP):
831 if name not in self.op.hvparams:
832 self.op.hvparams[name] = value
833
834 if einfo.has_section(constants.INISECT_BEP):
835
836 for name, value in einfo.items(constants.INISECT_BEP):
837 if name not in self.op.beparams:
838 self.op.beparams[name] = value
839
840 if name == constants.BE_MEMORY:
841 if constants.BE_MAXMEM not in self.op.beparams:
842 self.op.beparams[constants.BE_MAXMEM] = value
843 if constants.BE_MINMEM not in self.op.beparams:
844 self.op.beparams[constants.BE_MINMEM] = value
845 else:
846
847 for name in constants.BES_PARAMETERS:
848 if (name not in self.op.beparams and
849 einfo.has_option(constants.INISECT_INS, name)):
850 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
851
852 if einfo.has_section(constants.INISECT_OSP):
853
854 for name, value in einfo.items(constants.INISECT_OSP):
855 if name not in self.op.osparams:
856 self.op.osparams[name] = value
857
883
885 """Calculate final instance file storage dir.
886
887 """
888
889 self.instance_file_storage_dir = None
890 if self.op.disk_template in constants.DTS_FILEBASED:
891
892 joinargs = []
893
894 cfg_storage = None
895 if self.op.disk_template == constants.DT_FILE:
896 cfg_storage = self.cfg.GetFileStorageDir()
897 elif self.op.disk_template == constants.DT_SHARED_FILE:
898 cfg_storage = self.cfg.GetSharedFileStorageDir()
899 elif self.op.disk_template == constants.DT_GLUSTER:
900 cfg_storage = self.cfg.GetGlusterStorageDir()
901
902 if not cfg_storage:
903 raise errors.OpPrereqError(
904 "Cluster file storage dir for {tpl} storage type not defined".format(
905 tpl=repr(self.op.disk_template)
906 ),
907 errors.ECODE_STATE
908 )
909
910 joinargs.append(cfg_storage)
911
912 if self.op.file_storage_dir is not None:
913 joinargs.append(self.op.file_storage_dir)
914
915 if self.op.disk_template != constants.DT_GLUSTER:
916 joinargs.append(self.op.instance_name)
917
918 if len(joinargs) > 1:
919
920 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
921 else:
922 self.instance_file_storage_dir = joinargs[0]
923
925 """Check prerequisites.
926
927 """
928
929
930 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
931 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
932 cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
933 if not owned_groups.issuperset(cur_groups):
934 raise errors.OpPrereqError("New instance %s's node groups changed since"
935 " locks were acquired, current groups are"
936 " are '%s', owning groups '%s'; retry the"
937 " operation" %
938 (self.op.instance_name,
939 utils.CommaJoin(cur_groups),
940 utils.CommaJoin(owned_groups)),
941 errors.ECODE_STATE)
942
943 self._CalculateFileStorageDir()
944
945 if self.op.mode == constants.INSTANCE_IMPORT:
946 export_info = self._ReadExportInfo()
947 self._ReadExportParams(export_info)
948 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
949 else:
950 self._old_instance_name = None
951
952 if (not self.cfg.GetVGName() and
953 self.op.disk_template not in constants.DTS_NOT_LVM):
954 raise errors.OpPrereqError("Cluster does not support lvm-based"
955 " instances", errors.ECODE_STATE)
956
957 if (self.op.hypervisor is None or
958 self.op.hypervisor == constants.VALUE_AUTO):
959 self.op.hypervisor = self.cfg.GetHypervisorType()
960
961 cluster = self.cfg.GetClusterInfo()
962 enabled_hvs = cluster.enabled_hypervisors
963 if self.op.hypervisor not in enabled_hvs:
964 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
965 " cluster (%s)" %
966 (self.op.hypervisor, ",".join(enabled_hvs)),
967 errors.ECODE_STATE)
968
969
970 for tag in self.op.tags:
971 objects.TaggableObject.ValidateTag(tag)
972
973
974 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
975 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
976 self.op.hvparams)
977 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
978 hv_type.CheckParameterSyntax(filled_hvp)
979 self.hv_full = filled_hvp
980
981 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
982 "instance", "cluster")
983
984
985 self.be_full = _ComputeFullBeParams(self.op, cluster)
986
987
988 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
989
990
991
992 if self.op.identify_defaults:
993 self._RevertToDefaults(cluster)
994
995
996 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
997 self.proc.GetECId())
998
999
1000 default_vg = self.cfg.GetVGName()
1001 self.disks = ComputeDisks(self.op, default_vg)
1002
1003 if self.op.mode == constants.INSTANCE_IMPORT:
1004 disk_images = []
1005 for idx in range(len(self.disks)):
1006 option = "disk%d_dump" % idx
1007 if export_info.has_option(constants.INISECT_INS, option):
1008
1009 export_name = export_info.get(constants.INISECT_INS, option)
1010 image = utils.PathJoin(self.op.src_path, export_name)
1011 disk_images.append(image)
1012 else:
1013 disk_images.append(False)
1014
1015 self.src_images = disk_images
1016
1017 if self.op.instance_name == self._old_instance_name:
1018 for idx, nic in enumerate(self.nics):
1019 if nic.mac == constants.VALUE_AUTO:
1020 nic_mac_ini = "nic%d_mac" % idx
1021 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
1022
1023
1024
1025
1026 if self.op.ip_check:
1027 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
1028 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1029 (self.check_ip, self.op.instance_name),
1030 errors.ECODE_NOTUNIQUE)
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040 for nic in self.nics:
1041 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
1042 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
1043
1044
1045
1046 if self.op.iallocator is not None:
1047 self._RunAllocator()
1048
1049
1050 keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1051 self.op.src_node_uuid])
1052 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1053 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1054 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1055
1056 ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1057 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1058
1059 assert (self.owned_locks(locking.LEVEL_NODE) ==
1060 self.owned_locks(locking.LEVEL_NODE_RES)), \
1061 "Node locks differ from node resource locks"
1062
1063
1064
1065
1066 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1067 assert self.pnode is not None, \
1068 "Cannot retrieve locked node %s" % self.op.pnode_uuid
1069 if pnode.offline:
1070 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1071 pnode.name, errors.ECODE_STATE)
1072 if pnode.drained:
1073 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1074 pnode.name, errors.ECODE_STATE)
1075 if not pnode.vm_capable:
1076 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1077 " '%s'" % pnode.name, errors.ECODE_STATE)
1078
1079 self.secondaries = []
1080
1081
1082
1083 for idx, nic in enumerate(self.nics):
1084 net_uuid = nic.network
1085 if net_uuid is not None:
1086 nobj = self.cfg.GetNetwork(net_uuid)
1087 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1088 if netparams is None:
1089 raise errors.OpPrereqError("No netparams found for network"
1090 " %s. Probably not connected to"
1091 " node's %s nodegroup" %
1092 (nobj.name, self.pnode.name),
1093 errors.ECODE_INVAL)
1094 self.LogInfo("NIC/%d inherits netparams %s" %
1095 (idx, netparams.values()))
1096 nic.nicparams = dict(netparams)
1097 if nic.ip is not None:
1098 if nic.ip.lower() == constants.NIC_IP_POOL:
1099 try:
1100 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1101 except errors.ReservationError:
1102 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1103 " from the address pool" % idx,
1104 errors.ECODE_STATE)
1105 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1106 else:
1107 try:
1108 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1109 check=self.op.conflicts_check)
1110 except errors.ReservationError:
1111 raise errors.OpPrereqError("IP address %s already in use"
1112 " or does not belong to network %s" %
1113 (nic.ip, nobj.name),
1114 errors.ECODE_NOTUNIQUE)
1115
1116
1117 elif self.op.conflicts_check:
1118 _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1119
1120
1121 if self.op.disk_template in constants.DTS_INT_MIRROR:
1122 if self.op.snode_uuid == pnode.uuid:
1123 raise errors.OpPrereqError("The secondary node cannot be the"
1124 " primary node", errors.ECODE_INVAL)
1125 CheckNodeOnline(self, self.op.snode_uuid)
1126 CheckNodeNotDrained(self, self.op.snode_uuid)
1127 CheckNodeVmCapable(self, self.op.snode_uuid)
1128 self.secondaries.append(self.op.snode_uuid)
1129
1130 snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1131 if pnode.group != snode.group:
1132 self.LogWarning("The primary and secondary nodes are in two"
1133 " different node groups; the disk parameters"
1134 " from the first disk's node group will be"
1135 " used")
1136
1137 nodes = [pnode]
1138 if self.op.disk_template in constants.DTS_INT_MIRROR:
1139 nodes.append(snode)
1140 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1141 excl_stor = compat.any(map(has_es, nodes))
1142 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1143 raise errors.OpPrereqError("Disk template %s not supported with"
1144 " exclusive storage" % self.op.disk_template,
1145 errors.ECODE_STATE)
1146 for disk in self.disks:
1147 CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1148
1149 node_uuids = [pnode.uuid] + self.secondaries
1150
1151 if not self.adopt_disks:
1152 if self.op.disk_template == constants.DT_RBD:
1153
1154
1155
1156 CheckRADOSFreeSpace()
1157 elif self.op.disk_template == constants.DT_EXT:
1158
1159 pass
1160 elif self.op.disk_template in constants.DTS_LVM:
1161
1162 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1163 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1164 else:
1165
1166 pass
1167
1168 elif self.op.disk_template == constants.DT_PLAIN:
1169 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1170 disk[constants.IDISK_ADOPT])
1171 for disk in self.disks])
1172 if len(all_lvs) != len(self.disks):
1173 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1174 errors.ECODE_INVAL)
1175 for lv_name in all_lvs:
1176 try:
1177
1178
1179 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1180 except errors.ReservationError:
1181 raise errors.OpPrereqError("LV named %s used by another instance" %
1182 lv_name, errors.ECODE_NOTUNIQUE)
1183
1184 vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1185 vg_names.Raise("Cannot get VG information from node %s" % pnode.name,
1186 prereq=True)
1187
1188 node_lvs = self.rpc.call_lv_list([pnode.uuid],
1189 vg_names.payload.keys())[pnode.uuid]
1190 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name,
1191 prereq=True)
1192 node_lvs = node_lvs.payload
1193
1194 delta = all_lvs.difference(node_lvs.keys())
1195 if delta:
1196 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1197 utils.CommaJoin(delta),
1198 errors.ECODE_INVAL)
1199 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1200 if online_lvs:
1201 raise errors.OpPrereqError("Online logical volumes found, cannot"
1202 " adopt: %s" % utils.CommaJoin(online_lvs),
1203 errors.ECODE_STATE)
1204
1205 for dsk in self.disks:
1206 dsk[constants.IDISK_SIZE] = \
1207 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1208 dsk[constants.IDISK_ADOPT])][0]))
1209
1210 elif self.op.disk_template == constants.DT_BLOCK:
1211
1212 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1213 for disk in self.disks])
1214 if len(all_disks) != len(self.disks):
1215 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1216 errors.ECODE_INVAL)
1217 baddisks = [d for d in all_disks
1218 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1219 if baddisks:
1220 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1221 " cannot be adopted" %
1222 (utils.CommaJoin(baddisks),
1223 constants.ADOPTABLE_BLOCKDEV_ROOT),
1224 errors.ECODE_INVAL)
1225
1226 node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1227 list(all_disks))[pnode.uuid]
1228 node_disks.Raise("Cannot get block device information from node %s" %
1229 pnode.name, prereq=True)
1230 node_disks = node_disks.payload
1231 delta = all_disks.difference(node_disks.keys())
1232 if delta:
1233 raise errors.OpPrereqError("Missing block device(s): %s" %
1234 utils.CommaJoin(delta),
1235 errors.ECODE_INVAL)
1236 for dsk in self.disks:
1237 dsk[constants.IDISK_SIZE] = \
1238 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1239
1240
1241 node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1242 node_group = self.cfg.GetNodeGroup(node_info.group)
1243 disk_params = self.cfg.GetGroupDiskParams(node_group)
1244 access_type = disk_params[self.op.disk_template].get(
1245 constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1246 )
1247
1248 if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1249 self.op.disk_template,
1250 access_type):
1251 raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1252 " used with %s disk access param" %
1253 (self.op.hypervisor, access_type),
1254 errors.ECODE_STATE)
1255
1256
1257 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1258 ispec = {
1259 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1260 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1261 constants.ISPEC_DISK_COUNT: len(self.disks),
1262 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1263 for disk in self.disks],
1264 constants.ISPEC_NIC_COUNT: len(self.nics),
1265 constants.ISPEC_SPINDLE_USE: spindle_use,
1266 }
1267
1268 group_info = self.cfg.GetNodeGroup(pnode.group)
1269 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1270 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1271 self.op.disk_template)
1272 if not self.op.ignore_ipolicy and res:
1273 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1274 (pnode.group, group_info.name, utils.CommaJoin(res)))
1275 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1276
1277 CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1278
1279 CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1280
1281 CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1282
1283 CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1284
1285
1286
1287
1288
1289
1290 if self.op.start:
1291 hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1292 self.op.hvparams)
1293 CheckNodeFreeMemory(self, self.pnode.uuid,
1294 "creating instance %s" % self.op.instance_name,
1295 self.be_full[constants.BE_MAXMEM],
1296 self.op.hypervisor, hvfull)
1297
1298 self.dry_run_result = list(node_uuids)
1299
1300 - def Exec(self, feedback_fn):
1301 """Create and add the instance to the cluster.
1302
1303 """
1304 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1305 self.owned_locks(locking.LEVEL_NODE)), \
1306 "Node locks differ from node resource locks"
1307 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1308
1309 ht_kind = self.op.hypervisor
1310 if ht_kind in constants.HTS_REQ_PORT:
1311 network_port = self.cfg.AllocatePort()
1312 else:
1313 network_port = None
1314
1315 instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1316
1317
1318
1319
1320 nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1321 disks = GenerateDiskTemplate(self,
1322 self.op.disk_template,
1323 instance_uuid, self.pnode.uuid,
1324 self.secondaries,
1325 self.disks,
1326 self.instance_file_storage_dir,
1327 self.op.file_driver,
1328 0,
1329 feedback_fn,
1330 self.cfg.GetGroupDiskParams(nodegroup))
1331
1332 iobj = objects.Instance(name=self.op.instance_name,
1333 uuid=instance_uuid,
1334 os=self.op.os_type,
1335 primary_node=self.pnode.uuid,
1336 nics=self.nics, disks=disks,
1337 disk_template=self.op.disk_template,
1338 disks_active=False,
1339 admin_state=constants.ADMINST_DOWN,
1340 admin_state_source=constants.ADMIN_SOURCE,
1341 network_port=network_port,
1342 beparams=self.op.beparams,
1343 hvparams=self.op.hvparams,
1344 hypervisor=self.op.hypervisor,
1345 osparams=self.op.osparams,
1346 )
1347
1348 if self.op.tags:
1349 for tag in self.op.tags:
1350 iobj.AddTag(tag)
1351
1352 if self.adopt_disks:
1353 if self.op.disk_template == constants.DT_PLAIN:
1354
1355
1356 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1357 rename_to = []
1358 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1359 rename_to.append(t_dsk.logical_id)
1360 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1361 result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1362 zip(tmp_disks, rename_to))
1363 result.Raise("Failed to rename adoped LVs")
1364 else:
1365 feedback_fn("* creating instance disks...")
1366 try:
1367 CreateDisks(self, iobj)
1368 except errors.OpExecError:
1369 self.LogWarning("Device creation failed")
1370 self.cfg.ReleaseDRBDMinors(instance_uuid)
1371 raise
1372
1373 feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1374
1375 self.cfg.AddInstance(iobj, self.proc.GetECId())
1376
1377
1378
1379 del self.remove_locks[locking.LEVEL_INSTANCE]
1380
1381 if self.op.mode == constants.INSTANCE_IMPORT:
1382
1383 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1384 else:
1385
1386 ReleaseLocks(self, locking.LEVEL_NODE)
1387
1388 disk_abort = False
1389 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1390 feedback_fn("* wiping instance disks...")
1391 try:
1392 WipeDisks(self, iobj)
1393 except errors.OpExecError, err:
1394 logging.exception("Wiping disks failed")
1395 self.LogWarning("Wiping instance disks failed (%s)", err)
1396 disk_abort = True
1397
1398 if disk_abort:
1399
1400 pass
1401 elif self.op.wait_for_sync:
1402 disk_abort = not WaitForSync(self, iobj)
1403 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1404
1405 feedback_fn("* checking mirrors status")
1406 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1407 else:
1408 disk_abort = False
1409
1410 if disk_abort:
1411 RemoveDisks(self, iobj)
1412 self.cfg.RemoveInstance(iobj.uuid)
1413
1414 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1415 raise errors.OpExecError("There are some degraded disks for"
1416 " this instance")
1417
1418
1419 iobj.disks_active = True
1420
1421
1422 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1423
1424 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1425 if self.op.mode == constants.INSTANCE_CREATE:
1426 if not self.op.no_install:
1427 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1428 not self.op.wait_for_sync)
1429 if pause_sync:
1430 feedback_fn("* pausing disk sync to install instance OS")
1431 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1432 (iobj.disks,
1433 iobj), True)
1434 for idx, success in enumerate(result.payload):
1435 if not success:
1436 logging.warn("pause-sync of instance %s for disk %d failed",
1437 self.op.instance_name, idx)
1438
1439 feedback_fn("* running the instance OS create scripts...")
1440
1441 os_add_result = \
1442 self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1443 self.op.debug_level)
1444 if pause_sync:
1445 feedback_fn("* resuming disk sync")
1446 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1447 (iobj.disks,
1448 iobj), False)
1449 for idx, success in enumerate(result.payload):
1450 if not success:
1451 logging.warn("resume-sync of instance %s for disk %d failed",
1452 self.op.instance_name, idx)
1453
1454 os_add_result.Raise("Could not add os for instance %s"
1455 " on node %s" % (self.op.instance_name,
1456 self.pnode.name))
1457
1458 else:
1459 if self.op.mode == constants.INSTANCE_IMPORT:
1460 feedback_fn("* running the instance OS import scripts...")
1461
1462 transfers = []
1463
1464 for idx, image in enumerate(self.src_images):
1465 if not image:
1466 continue
1467
1468
1469 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1470 constants.IEIO_FILE, (image, ),
1471 constants.IEIO_SCRIPT,
1472 ((iobj.disks[idx], iobj), idx),
1473 None)
1474 transfers.append(dt)
1475
1476 import_result = \
1477 masterd.instance.TransferInstanceData(self, feedback_fn,
1478 self.op.src_node_uuid,
1479 self.pnode.uuid,
1480 self.pnode.secondary_ip,
1481 self.op.compress,
1482 iobj, transfers)
1483 if not compat.all(import_result):
1484 self.LogWarning("Some disks for instance %s on node %s were not"
1485 " imported successfully" % (self.op.instance_name,
1486 self.pnode.name))
1487
1488 rename_from = self._old_instance_name
1489
1490 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1491 feedback_fn("* preparing remote import...")
1492
1493
1494
1495
1496 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1497 self.op.source_shutdown_timeout)
1498 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1499
1500 assert iobj.primary_node == self.pnode.uuid
1501 disk_results = \
1502 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1503 self.source_x509_ca,
1504 self._cds, self.op.compress, timeouts)
1505 if not compat.all(disk_results):
1506
1507
1508 self.LogWarning("Some disks for instance %s on node %s were not"
1509 " imported successfully" % (self.op.instance_name,
1510 self.pnode.name))
1511
1512 rename_from = self.source_instance_name
1513
1514 else:
1515
1516 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1517 % self.op.mode)
1518
1519
1520 assert iobj.name == self.op.instance_name
1521 feedback_fn("Running rename script for %s" % self.op.instance_name)
1522 result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1523 rename_from,
1524 self.op.debug_level)
1525 result.Warn("Failed to run rename script for %s on node %s" %
1526 (self.op.instance_name, self.pnode.name), self.LogWarning)
1527
1528 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1529
1530 if self.op.start:
1531 iobj.admin_state = constants.ADMINST_UP
1532 self.cfg.Update(iobj, feedback_fn)
1533 logging.info("Starting instance %s on node %s", self.op.instance_name,
1534 self.pnode.name)
1535 feedback_fn("* starting instance...")
1536 result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1537 False, self.op.reason)
1538 result.Raise("Could not start instance")
1539
1540 return self.cfg.GetNodeNames(list(iobj.all_nodes))
1541
1544 """Rename an instance.
1545
1546 """
1547 HPATH = "instance-rename"
1548 HTYPE = constants.HTYPE_INSTANCE
1549
1551 """Check arguments.
1552
1553 """
1554 if self.op.ip_check and not self.op.name_check:
1555
1556 raise errors.OpPrereqError("IP address check requires a name check",
1557 errors.ECODE_INVAL)
1558
1560 """Build hooks env.
1561
1562 This runs on master, primary and secondary nodes of the instance.
1563
1564 """
1565 env = BuildInstanceHookEnvByObject(self, self.instance)
1566 env["INSTANCE_NEW_NAME"] = self.op.new_name
1567 return env
1568
1575
1577 """Check prerequisites.
1578
1579 This checks that the instance is in the cluster and is not running.
1580
1581 """
1582 (self.op.instance_uuid, self.op.instance_name) = \
1583 ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1584 self.op.instance_name)
1585 instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1586 assert instance is not None
1587
1588
1589
1590
1591 if (instance.disk_template in constants.DTS_FILEBASED and
1592 self.op.new_name != instance.name):
1593 CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1594 instance.disk_template)
1595
1596 CheckNodeOnline(self, instance.primary_node)
1597 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1598 msg="cannot rename")
1599 self.instance = instance
1600
1601 new_name = self.op.new_name
1602 if self.op.name_check:
1603 hostname = _CheckHostnameSane(self, new_name)
1604 new_name = self.op.new_name = hostname.name
1605 if (self.op.ip_check and
1606 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1607 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1608 (hostname.ip, new_name),
1609 errors.ECODE_NOTUNIQUE)
1610
1611 instance_names = [inst.name for
1612 inst in self.cfg.GetAllInstancesInfo().values()]
1613 if new_name in instance_names and new_name != instance.name:
1614 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1615 new_name, errors.ECODE_EXISTS)
1616
1617 - def Exec(self, feedback_fn):
1618 """Rename the instance.
1619
1620 """
1621 old_name = self.instance.name
1622
1623 rename_file_storage = False
1624 if (self.instance.disk_template in (constants.DT_FILE,
1625 constants.DT_SHARED_FILE) and
1626 self.op.new_name != self.instance.name):
1627 old_file_storage_dir = os.path.dirname(
1628 self.instance.disks[0].logical_id[1])
1629 rename_file_storage = True
1630
1631 self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1632
1633
1634 assert self.REQ_BGL
1635 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1636 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1637 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1638
1639
1640 renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1641
1642 if rename_file_storage:
1643 new_file_storage_dir = os.path.dirname(
1644 renamed_inst.disks[0].logical_id[1])
1645 result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1646 old_file_storage_dir,
1647 new_file_storage_dir)
1648 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1649 " (but the instance has been renamed in Ganeti)" %
1650 (self.cfg.GetNodeName(renamed_inst.primary_node),
1651 old_file_storage_dir, new_file_storage_dir))
1652
1653 StartInstanceDisks(self, renamed_inst, None)
1654
1655 info = GetInstanceInfoText(renamed_inst)
1656 for (idx, disk) in enumerate(renamed_inst.disks):
1657 for node_uuid in renamed_inst.all_nodes:
1658 result = self.rpc.call_blockdev_setinfo(node_uuid,
1659 (disk, renamed_inst), info)
1660 result.Warn("Error setting info on node %s for disk %s" %
1661 (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1662 try:
1663 result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1664 renamed_inst, old_name,
1665 self.op.debug_level)
1666 result.Warn("Could not run OS rename script for instance %s on node %s"
1667 " (but the instance has been renamed in Ganeti)" %
1668 (renamed_inst.name,
1669 self.cfg.GetNodeName(renamed_inst.primary_node)),
1670 self.LogWarning)
1671 finally:
1672 ShutdownInstanceDisks(self, renamed_inst)
1673
1674 return renamed_inst.name
1675
1678 """Remove an instance.
1679
1680 """
1681 HPATH = "instance-remove"
1682 HTYPE = constants.HTYPE_INSTANCE
1683 REQ_BGL = False
1684
1690
1698
1700 """Build hooks env.
1701
1702 This runs on master, primary and secondary nodes of the instance.
1703
1704 """
1705 env = BuildInstanceHookEnvByObject(self, self.instance)
1706 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1707 return env
1708
1710 """Build hooks nodes.
1711
1712 """
1713 nl = [self.cfg.GetMasterNode()]
1714 nl_post = list(self.instance.all_nodes) + nl
1715 return (nl, nl_post)
1716
1718 """Check prerequisites.
1719
1720 This checks that the instance is in the cluster.
1721
1722 """
1723 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1724 assert self.instance is not None, \
1725 "Cannot retrieve locked instance %s" % self.op.instance_name
1726
1727 - def Exec(self, feedback_fn):
1728 """Remove the instance.
1729
1730 """
1731 logging.info("Shutting down instance %s on node %s", self.instance.name,
1732 self.cfg.GetNodeName(self.instance.primary_node))
1733
1734 result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1735 self.instance,
1736 self.op.shutdown_timeout,
1737 self.op.reason)
1738 if self.op.ignore_failures:
1739 result.Warn("Warning: can't shutdown instance", feedback_fn)
1740 else:
1741 result.Raise("Could not shutdown instance %s on node %s" %
1742 (self.instance.name,
1743 self.cfg.GetNodeName(self.instance.primary_node)))
1744
1745 assert (self.owned_locks(locking.LEVEL_NODE) ==
1746 self.owned_locks(locking.LEVEL_NODE_RES))
1747 assert not (set(self.instance.all_nodes) -
1748 self.owned_locks(locking.LEVEL_NODE)), \
1749 "Not owning correct locks"
1750
1751 RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1752
1755 """Move an instance by data-copying.
1756
1757 """
1758 HPATH = "instance-move"
1759 HTYPE = constants.HTYPE_INSTANCE
1760 REQ_BGL = False
1761
1770
1778
1780 """Build hooks env.
1781
1782 This runs on master, primary and target nodes of the instance.
1783
1784 """
1785 env = {
1786 "TARGET_NODE": self.op.target_node,
1787 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1788 }
1789 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1790 return env
1791
1793 """Build hooks nodes.
1794
1795 """
1796 nl = [
1797 self.cfg.GetMasterNode(),
1798 self.instance.primary_node,
1799 self.op.target_node_uuid,
1800 ]
1801 return (nl, nl)
1802
1804 """Check prerequisites.
1805
1806 This checks that the instance is in the cluster.
1807
1808 """
1809 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1810 assert self.instance is not None, \
1811 "Cannot retrieve locked instance %s" % self.op.instance_name
1812
1813 if self.instance.disk_template not in constants.DTS_COPYABLE:
1814 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1815 self.instance.disk_template,
1816 errors.ECODE_STATE)
1817
1818 target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1819 assert target_node is not None, \
1820 "Cannot retrieve locked node %s" % self.op.target_node
1821
1822 self.target_node_uuid = target_node.uuid
1823 if target_node.uuid == self.instance.primary_node:
1824 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1825 (self.instance.name, target_node.name),
1826 errors.ECODE_STATE)
1827
1828 cluster = self.cfg.GetClusterInfo()
1829 bep = cluster.FillBE(self.instance)
1830
1831 for idx, dsk in enumerate(self.instance.disks):
1832 if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1833 constants.DT_SHARED_FILE, constants.DT_GLUSTER):
1834 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1835 " cannot copy" % idx, errors.ECODE_STATE)
1836
1837 CheckNodeOnline(self, target_node.uuid)
1838 CheckNodeNotDrained(self, target_node.uuid)
1839 CheckNodeVmCapable(self, target_node.uuid)
1840 group_info = self.cfg.GetNodeGroup(target_node.group)
1841 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1842 CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1843 ignore=self.op.ignore_ipolicy)
1844
1845 if self.instance.admin_state == constants.ADMINST_UP:
1846
1847 CheckNodeFreeMemory(
1848 self, target_node.uuid, "failing over instance %s" %
1849 self.instance.name, bep[constants.BE_MAXMEM],
1850 self.instance.hypervisor,
1851 cluster.hvparams[self.instance.hypervisor])
1852 else:
1853 self.LogInfo("Not checking memory on the secondary node as"
1854 " instance will not be started")
1855
1856
1857 CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1858
1859 - def Exec(self, feedback_fn):
1860 """Move an instance.
1861
1862 The move is done by shutting it down on its present node, copying
1863 the data over (slow) and starting it on the new node.
1864
1865 """
1866 source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1867 target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1868
1869 self.LogInfo("Shutting down instance %s on source node %s",
1870 self.instance.name, source_node.name)
1871
1872 assert (self.owned_locks(locking.LEVEL_NODE) ==
1873 self.owned_locks(locking.LEVEL_NODE_RES))
1874
1875 result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1876 self.op.shutdown_timeout,
1877 self.op.reason)
1878 if self.op.ignore_consistency:
1879 result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1880 " anyway. Please make sure node %s is down. Error details" %
1881 (self.instance.name, source_node.name, source_node.name),
1882 self.LogWarning)
1883 else:
1884 result.Raise("Could not shutdown instance %s on node %s" %
1885 (self.instance.name, source_node.name))
1886
1887
1888 try:
1889 CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1890 except errors.OpExecError:
1891 self.LogWarning("Device creation failed")
1892 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1893 raise
1894
1895 errs = []
1896 transfers = []
1897
1898 for idx, disk in enumerate(self.instance.disks):
1899
1900 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1901 constants.IEIO_RAW_DISK,
1902 (disk, self.instance),
1903 constants.IEIO_RAW_DISK,
1904 (disk, self.instance),
1905 None)
1906 transfers.append(dt)
1907
1908 import_result = \
1909 masterd.instance.TransferInstanceData(self, feedback_fn,
1910 source_node.uuid,
1911 target_node.uuid,
1912 target_node.secondary_ip,
1913 self.op.compress,
1914 self.instance, transfers)
1915 if not compat.all(import_result):
1916 errs.append("Failed to transfer instance data")
1917
1918 if errs:
1919 self.LogWarning("Some disks failed to copy, aborting")
1920 try:
1921 RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1922 finally:
1923 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1924 raise errors.OpExecError("Errors during disk copy: %s" %
1925 (",".join(errs),))
1926
1927 self.instance.primary_node = target_node.uuid
1928 self.cfg.Update(self.instance, feedback_fn)
1929
1930 self.LogInfo("Removing the disks on the original node")
1931 RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1932
1933
1934 if self.instance.admin_state == constants.ADMINST_UP:
1935 self.LogInfo("Starting instance %s on node %s",
1936 self.instance.name, target_node.name)
1937
1938 disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1939 ignore_secondaries=True)
1940 if not disks_ok:
1941 ShutdownInstanceDisks(self, self.instance)
1942 raise errors.OpExecError("Can't activate the instance's disks")
1943
1944 result = self.rpc.call_instance_start(target_node.uuid,
1945 (self.instance, None, None), False,
1946 self.op.reason)
1947 msg = result.fail_msg
1948 if msg:
1949 ShutdownInstanceDisks(self, self.instance)
1950 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1951 (self.instance.name, target_node.name, msg))
1952
1955 """Allocates multiple instances at the same time.
1956
1957 """
1958 REQ_BGL = False
1959
1961 """Check arguments.
1962
1963 """
1964 nodes = []
1965 for inst in self.op.instances:
1966 if inst.iallocator is not None:
1967 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1968 " instance objects", errors.ECODE_INVAL)
1969 nodes.append(bool(inst.pnode))
1970 if inst.disk_template in constants.DTS_INT_MIRROR:
1971 nodes.append(bool(inst.snode))
1972
1973 has_nodes = compat.any(nodes)
1974 if compat.all(nodes) ^ has_nodes:
1975 raise errors.OpPrereqError("There are instance objects providing"
1976 " pnode/snode while others do not",
1977 errors.ECODE_INVAL)
1978
1979 if not has_nodes and self.op.iallocator is None:
1980 default_iallocator = self.cfg.GetDefaultIAllocator()
1981 if default_iallocator:
1982 self.op.iallocator = default_iallocator
1983 else:
1984 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1985 " given and no cluster-wide default"
1986 " iallocator found; please specify either"
1987 " an iallocator or nodes on the instances"
1988 " or set a cluster-wide default iallocator",
1989 errors.ECODE_INVAL)
1990
1991 _CheckOpportunisticLocking(self.op)
1992
1993 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1994 if dups:
1995 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1996 utils.CommaJoin(dups), errors.ECODE_INVAL)
1997
1999 """Calculate the locks.
2000
2001 """
2002 self.share_locks = ShareAll()
2003 self.needed_locks = {
2004
2005
2006 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
2007 }
2008
2009 if self.op.iallocator:
2010 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2011 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2012
2013 if self.op.opportunistic_locking:
2014 self.opportunistic_locks[locking.LEVEL_NODE] = True
2015 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
2016 else:
2017 nodeslist = []
2018 for inst in self.op.instances:
2019 (inst.pnode_uuid, inst.pnode) = \
2020 ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
2021 nodeslist.append(inst.pnode_uuid)
2022 if inst.snode is not None:
2023 (inst.snode_uuid, inst.snode) = \
2024 ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
2025 nodeslist.append(inst.snode_uuid)
2026
2027 self.needed_locks[locking.LEVEL_NODE] = nodeslist
2028
2029
2030 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2031
2033 """Check prerequisite.
2034
2035 """
2036 if self.op.iallocator:
2037 cluster = self.cfg.GetClusterInfo()
2038 default_vg = self.cfg.GetVGName()
2039 ec_id = self.proc.GetECId()
2040
2041 if self.op.opportunistic_locking:
2042
2043 node_whitelist = self.cfg.GetNodeNames(
2044 set(self.owned_locks(locking.LEVEL_NODE)) &
2045 set(self.owned_locks(locking.LEVEL_NODE_RES)))
2046 else:
2047 node_whitelist = None
2048
2049 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
2050 _ComputeNics(op, cluster, None,
2051 self.cfg, ec_id),
2052 _ComputeFullBeParams(op, cluster),
2053 node_whitelist)
2054 for op in self.op.instances]
2055
2056 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2057 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2058
2059 ial.Run(self.op.iallocator)
2060
2061 if not ial.success:
2062 raise errors.OpPrereqError("Can't compute nodes using"
2063 " iallocator '%s': %s" %
2064 (self.op.iallocator, ial.info),
2065 errors.ECODE_NORES)
2066
2067 self.ia_result = ial.result
2068
2069 if self.op.dry_run:
2070 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2071 constants.JOB_IDS_KEY: [],
2072 })
2073
2075 """Contructs the partial result.
2076
2077 """
2078 if self.op.iallocator:
2079 (allocatable, failed_insts) = self.ia_result
2080 allocatable_insts = map(compat.fst, allocatable)
2081 else:
2082 allocatable_insts = [op.instance_name for op in self.op.instances]
2083 failed_insts = []
2084
2085 return {
2086 constants.ALLOCATABLE_KEY: allocatable_insts,
2087 constants.FAILED_KEY: failed_insts,
2088 }
2089
2090 - def Exec(self, feedback_fn):
2091 """Executes the opcode.
2092
2093 """
2094 jobs = []
2095 if self.op.iallocator:
2096 op2inst = dict((op.instance_name, op) for op in self.op.instances)
2097 (allocatable, failed) = self.ia_result
2098
2099 for (name, node_names) in allocatable:
2100 op = op2inst.pop(name)
2101
2102 (op.pnode_uuid, op.pnode) = \
2103 ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2104 if len(node_names) > 1:
2105 (op.snode_uuid, op.snode) = \
2106 ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2107
2108 jobs.append([op])
2109
2110 missing = set(op2inst.keys()) - set(failed)
2111 assert not missing, \
2112 "Iallocator did return incomplete result: %s" % \
2113 utils.CommaJoin(missing)
2114 else:
2115 jobs.extend([op] for op in self.op.instances)
2116
2117 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2118
2121 """Data structure for network interface modifications.
2122
2123 Used by L{LUInstanceSetParams}.
2124
2125 """
2127 self.params = None
2128 self.filled = None
2129
2132 """Prepares a list of container modifications by adding a private data field.
2133
2134 @type mods: list of tuples; (operation, index, parameters)
2135 @param mods: List of modifications
2136 @type private_fn: callable or None
2137 @param private_fn: Callable for constructing a private data field for a
2138 modification
2139 @rtype: list
2140
2141 """
2142 if private_fn is None:
2143 fn = lambda: None
2144 else:
2145 fn = private_fn
2146
2147 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2148
2151 """Checks if nodes have enough physical CPUs
2152
2153 This function checks if all given nodes have the needed number of
2154 physical CPUs. In case any node has less CPUs or we cannot get the
2155 information from the node, this function raises an OpPrereqError
2156 exception.
2157
2158 @type lu: C{LogicalUnit}
2159 @param lu: a logical unit from which we get configuration data
2160 @type node_uuids: C{list}
2161 @param node_uuids: the list of node UUIDs to check
2162 @type requested: C{int}
2163 @param requested: the minimum acceptable number of physical CPUs
2164 @type hypervisor_specs: list of pairs (string, dict of strings)
2165 @param hypervisor_specs: list of hypervisor specifications in
2166 pairs (hypervisor_name, hvparams)
2167 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2168 or we cannot check the node
2169
2170 """
2171 nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2172 for node_uuid in node_uuids:
2173 info = nodeinfo[node_uuid]
2174 node_name = lu.cfg.GetNodeName(node_uuid)
2175 info.Raise("Cannot get current information from node %s" % node_name,
2176 prereq=True, ecode=errors.ECODE_ENVIRON)
2177 (_, _, (hv_info, )) = info.payload
2178 num_cpus = hv_info.get("cpu_total", None)
2179 if not isinstance(num_cpus, int):
2180 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2181 " on node %s, result was '%s'" %
2182 (node_name, num_cpus), errors.ECODE_ENVIRON)
2183 if requested > num_cpus:
2184 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2185 "required" % (node_name, num_cpus, requested),
2186 errors.ECODE_NORES)
2187
2190 """Return the item refered by the identifier.
2191
2192 @type identifier: string
2193 @param identifier: Item index or name or UUID
2194 @type kind: string
2195 @param kind: One-word item description
2196 @type container: list
2197 @param container: Container to get the item from
2198
2199 """
2200
2201 try:
2202 idx = int(identifier)
2203 if idx == -1:
2204
2205 absidx = len(container) - 1
2206 elif idx < 0:
2207 raise IndexError("Not accepting negative indices other than -1")
2208 elif idx > len(container):
2209 raise IndexError("Got %s index %s, but there are only %s" %
2210 (kind, idx, len(container)))
2211 else:
2212 absidx = idx
2213 return (absidx, container[idx])
2214 except ValueError:
2215 pass
2216
2217 for idx, item in enumerate(container):
2218 if item.uuid == identifier or item.name == identifier:
2219 return (idx, item)
2220
2221 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2222 (kind, identifier), errors.ECODE_NOENT)
2223
2224
2225 -def _ApplyContainerMods(kind, container, chgdesc, mods,
2226 create_fn, modify_fn, remove_fn,
2227 post_add_fn=None):
2228 """Applies descriptions in C{mods} to C{container}.
2229
2230 @type kind: string
2231 @param kind: One-word item description
2232 @type container: list
2233 @param container: Container to modify
2234 @type chgdesc: None or list
2235 @param chgdesc: List of applied changes
2236 @type mods: list
2237 @param mods: Modifications as returned by L{_PrepareContainerMods}
2238 @type create_fn: callable
2239 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2240 receives absolute item index, parameters and private data object as added
2241 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2242 as list
2243 @type modify_fn: callable
2244 @param modify_fn: Callback for modifying an existing item
2245 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2246 and private data object as added by L{_PrepareContainerMods}, returns
2247 changes as list
2248 @type remove_fn: callable
2249 @param remove_fn: Callback on removing item; receives absolute item index,
2250 item and private data object as added by L{_PrepareContainerMods}
2251 @type post_add_fn: callable
2252 @param post_add_fn: Callable for post-processing a newly created item after
2253 it has been put into the container. It receives the index of the new item
2254 and the new item as parameters.
2255
2256 """
2257 for (op, identifier, params, private) in mods:
2258 changes = None
2259
2260 if op == constants.DDM_ADD:
2261
2262
2263 try:
2264 idx = int(identifier)
2265 except ValueError:
2266 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2267 " identifier for %s" % constants.DDM_ADD,
2268 errors.ECODE_INVAL)
2269 if idx == -1:
2270 addidx = len(container)
2271 else:
2272 if idx < 0:
2273 raise IndexError("Not accepting negative indices other than -1")
2274 elif idx > len(container):
2275 raise IndexError("Got %s index %s, but there are only %s" %
2276 (kind, idx, len(container)))
2277 addidx = idx
2278
2279 if create_fn is None:
2280 item = params
2281 else:
2282 (item, changes) = create_fn(addidx, params, private)
2283
2284 if idx == -1:
2285 container.append(item)
2286 else:
2287 assert idx >= 0
2288 assert idx <= len(container)
2289
2290 container.insert(idx, item)
2291
2292 if post_add_fn is not None:
2293 post_add_fn(addidx, item)
2294
2295 else:
2296
2297 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2298
2299 if op == constants.DDM_REMOVE:
2300 assert not params
2301
2302 changes = [("%s/%s" % (kind, absidx), "remove")]
2303
2304 if remove_fn is not None:
2305 msg = remove_fn(absidx, item, private)
2306 if msg:
2307 changes.append(("%s/%s" % (kind, absidx), msg))
2308
2309 assert container[absidx] == item
2310 del container[absidx]
2311 elif op == constants.DDM_MODIFY:
2312 if modify_fn is not None:
2313 changes = modify_fn(absidx, item, params, private)
2314 else:
2315 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2316
2317 assert _TApplyContModsCbChanges(changes)
2318
2319 if not (chgdesc is None or changes is None):
2320 chgdesc.extend(changes)
2321
2324 """Updates the C{iv_name} attribute of disks.
2325
2326 @type disks: list of L{objects.Disk}
2327
2328 """
2329 for (idx, disk) in enumerate(disks):
2330 disk.iv_name = "disk/%s" % (base_index + idx, )
2331
2334 """Modifies an instances's parameters.
2335
2336 """
2337 HPATH = "instance-modify"
2338 HTYPE = constants.HTYPE_INSTANCE
2339 REQ_BGL = False
2340
2341 @staticmethod
2343 assert ht.TList(mods)
2344 assert not mods or len(mods[0]) in (2, 3)
2345
2346 if mods and len(mods[0]) == 2:
2347 result = []
2348
2349 addremove = 0
2350 for op, params in mods:
2351 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2352 result.append((op, -1, params))
2353 addremove += 1
2354
2355 if addremove > 1:
2356 raise errors.OpPrereqError("Only one %s add or remove operation is"
2357 " supported at a time" % kind,
2358 errors.ECODE_INVAL)
2359 else:
2360 result.append((constants.DDM_MODIFY, op, params))
2361
2362 assert verify_fn(result)
2363 else:
2364 result = mods
2365
2366 return result
2367
2368 @staticmethod
2390
2392 """Verifies a disk modification.
2393
2394 """
2395 if op == constants.DDM_ADD:
2396 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2397 if mode not in constants.DISK_ACCESS_SET:
2398 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2399 errors.ECODE_INVAL)
2400
2401 size = params.get(constants.IDISK_SIZE, None)
2402 if size is None:
2403 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2404 constants.IDISK_SIZE, errors.ECODE_INVAL)
2405 size = int(size)
2406
2407 params[constants.IDISK_SIZE] = size
2408 name = params.get(constants.IDISK_NAME, None)
2409 if name is not None and name.lower() == constants.VALUE_NONE:
2410 params[constants.IDISK_NAME] = None
2411
2412 CheckSpindlesExclusiveStorage(params, excl_stor, True)
2413
2414 elif op == constants.DDM_MODIFY:
2415 if constants.IDISK_SIZE in params:
2416 raise errors.OpPrereqError("Disk size change not possible, use"
2417 " grow-disk", errors.ECODE_INVAL)
2418
2419
2420
2421 if self.instance.disk_template != constants.DT_EXT:
2422 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2423
2424 name = params.get(constants.IDISK_NAME, None)
2425 if name is not None and name.lower() == constants.VALUE_NONE:
2426 params[constants.IDISK_NAME] = None
2427
2428 @staticmethod
2430 """Verifies a network interface modification.
2431
2432 """
2433 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2434 ip = params.get(constants.INIC_IP, None)
2435 name = params.get(constants.INIC_NAME, None)
2436 req_net = params.get(constants.INIC_NETWORK, None)
2437 link = params.get(constants.NIC_LINK, None)
2438 mode = params.get(constants.NIC_MODE, None)
2439 if name is not None and name.lower() == constants.VALUE_NONE:
2440 params[constants.INIC_NAME] = None
2441 if req_net is not None:
2442 if req_net.lower() == constants.VALUE_NONE:
2443 params[constants.INIC_NETWORK] = None
2444 req_net = None
2445 elif link is not None or mode is not None:
2446 raise errors.OpPrereqError("If network is given"
2447 " mode or link should not",
2448 errors.ECODE_INVAL)
2449
2450 if op == constants.DDM_ADD:
2451 macaddr = params.get(constants.INIC_MAC, None)
2452 if macaddr is None:
2453 params[constants.INIC_MAC] = constants.VALUE_AUTO
2454
2455 if ip is not None:
2456 if ip.lower() == constants.VALUE_NONE:
2457 params[constants.INIC_IP] = None
2458 else:
2459 if ip.lower() == constants.NIC_IP_POOL:
2460 if op == constants.DDM_ADD and req_net is None:
2461 raise errors.OpPrereqError("If ip=pool, parameter network"
2462 " cannot be none",
2463 errors.ECODE_INVAL)
2464 else:
2465 if not netutils.IPAddress.IsValid(ip):
2466 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2467 errors.ECODE_INVAL)
2468
2469 if constants.INIC_MAC in params:
2470 macaddr = params[constants.INIC_MAC]
2471 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2472 macaddr = utils.NormalizeAndValidateMac(macaddr)
2473
2474 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2475 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2476 " modifying an existing NIC",
2477 errors.ECODE_INVAL)
2478
2480 if not (self.op.nics or self.op.disks or self.op.disk_template or
2481 self.op.hvparams or self.op.beparams or self.op.os_name or
2482 self.op.osparams or self.op.offline is not None or
2483 self.op.runtime_mem or self.op.pnode):
2484 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2485
2486 if self.op.hvparams:
2487 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2488 "hypervisor", "instance", "cluster")
2489
2490 self.op.disks = self._UpgradeDiskNicMods(
2491 "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2492 self.op.nics = self._UpgradeDiskNicMods(
2493 "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2494
2495 if self.op.disks and self.op.disk_template is not None:
2496 raise errors.OpPrereqError("Disk template conversion and other disk"
2497 " changes not supported at the same time",
2498 errors.ECODE_INVAL)
2499
2500 if (self.op.disk_template and
2501 self.op.disk_template in constants.DTS_INT_MIRROR and
2502 self.op.remote_node is None):
2503 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2504 " one requires specifying a secondary node",
2505 errors.ECODE_INVAL)
2506
2507
2508 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2509 self._VerifyNicModification)
2510
2511 if self.op.pnode:
2512 (self.op.pnode_uuid, self.op.pnode) = \
2513 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2514
2525
2527 if level == locking.LEVEL_NODEGROUP:
2528 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2529
2530
2531 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2532 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2533 elif level == locking.LEVEL_NODE:
2534 self._LockInstancesNodes()
2535 if self.op.disk_template and self.op.remote_node:
2536 (self.op.remote_node_uuid, self.op.remote_node) = \
2537 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2538 self.op.remote_node)
2539 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2540 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2541
2542 self.needed_locks[locking.LEVEL_NODE_RES] = \
2543 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2544
2579
2586
2589
2590 update_params_dict = dict([(key, params[key])
2591 for key in constants.NICS_PARAMETERS
2592 if key in params])
2593
2594 req_link = update_params_dict.get(constants.NIC_LINK, None)
2595 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2596
2597 new_net_uuid = None
2598 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2599 if new_net_uuid_or_name:
2600 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2601 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2602
2603 if old_net_uuid:
2604 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2605
2606 if new_net_uuid:
2607 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2608 if not netparams:
2609 raise errors.OpPrereqError("No netparams found for the network"
2610 " %s, probably not connected" %
2611 new_net_obj.name, errors.ECODE_INVAL)
2612 new_params = dict(netparams)
2613 else:
2614 new_params = GetUpdatedParams(old_params, update_params_dict)
2615
2616 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2617
2618 new_filled_params = cluster.SimpleFillNIC(new_params)
2619 objects.NIC.CheckParameterSyntax(new_filled_params)
2620
2621 new_mode = new_filled_params[constants.NIC_MODE]
2622 if new_mode == constants.NIC_MODE_BRIDGED:
2623 bridge = new_filled_params[constants.NIC_LINK]
2624 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2625 if msg:
2626 msg = "Error checking bridges on node '%s': %s" % \
2627 (self.cfg.GetNodeName(pnode_uuid), msg)
2628 if self.op.force:
2629 self.warn.append(msg)
2630 else:
2631 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2632
2633 elif new_mode == constants.NIC_MODE_ROUTED:
2634 ip = params.get(constants.INIC_IP, old_ip)
2635 if ip is None:
2636 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2637 " on a routed NIC", errors.ECODE_INVAL)
2638
2639 elif new_mode == constants.NIC_MODE_OVS:
2640
2641 self.LogInfo("OVS links are currently not checked for correctness")
2642
2643 if constants.INIC_MAC in params:
2644 mac = params[constants.INIC_MAC]
2645 if mac is None:
2646 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2647 errors.ECODE_INVAL)
2648 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2649
2650 params[constants.INIC_MAC] = \
2651 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2652 else:
2653
2654 try:
2655 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2656 except errors.ReservationError:
2657 raise errors.OpPrereqError("MAC address '%s' already in use"
2658 " in cluster" % mac,
2659 errors.ECODE_NOTUNIQUE)
2660 elif new_net_uuid != old_net_uuid:
2661
2662 def get_net_prefix(net_uuid):
2663 mac_prefix = None
2664 if net_uuid:
2665 nobj = self.cfg.GetNetwork(net_uuid)
2666 mac_prefix = nobj.mac_prefix
2667
2668 return mac_prefix
2669
2670 new_prefix = get_net_prefix(new_net_uuid)
2671 old_prefix = get_net_prefix(old_net_uuid)
2672 if old_prefix != new_prefix:
2673 params[constants.INIC_MAC] = \
2674 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2675
2676
2677 new_ip = params.get(constants.INIC_IP, old_ip)
2678 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2679 if new_ip:
2680
2681 if new_ip.lower() == constants.NIC_IP_POOL:
2682 if new_net_uuid:
2683 try:
2684 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2685 except errors.ReservationError:
2686 raise errors.OpPrereqError("Unable to get a free IP"
2687 " from the address pool",
2688 errors.ECODE_STATE)
2689 self.LogInfo("Chose IP %s from network %s",
2690 new_ip,
2691 new_net_obj.name)
2692 params[constants.INIC_IP] = new_ip
2693 else:
2694 raise errors.OpPrereqError("ip=pool, but no network found",
2695 errors.ECODE_INVAL)
2696
2697 elif new_net_uuid:
2698 try:
2699 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2700 check=self.op.conflicts_check)
2701 self.LogInfo("Reserving IP %s in network %s",
2702 new_ip, new_net_obj.name)
2703 except errors.ReservationError:
2704 raise errors.OpPrereqError("IP %s not available in network %s" %
2705 (new_ip, new_net_obj.name),
2706 errors.ECODE_NOTUNIQUE)
2707
2708 elif self.op.conflicts_check:
2709 _CheckForConflictingIp(self, new_ip, pnode_uuid)
2710
2711
2712 if old_ip and old_net_uuid:
2713 try:
2714 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2715 except errors.AddressPoolError:
2716 logging.warning("Release IP %s not contained in network %s",
2717 old_ip, old_net_obj.name)
2718
2719
2720 elif (old_net_uuid is not None and
2721 (req_link is not None or req_mode is not None)):
2722 raise errors.OpPrereqError("Not allowed to change link or mode of"
2723 " a NIC that is connected to a network",
2724 errors.ECODE_INVAL)
2725
2726 private.params = new_params
2727 private.filled = new_filled_params
2728
2730 """CheckPrereq checks related to a new disk template."""
2731
2732 pnode_uuid = self.instance.primary_node
2733 if self.instance.disk_template == self.op.disk_template:
2734 raise errors.OpPrereqError("Instance already has disk template %s" %
2735 self.instance.disk_template,
2736 errors.ECODE_INVAL)
2737
2738 if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2739 raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2740 " cluster." % self.op.disk_template)
2741
2742 if (self.instance.disk_template,
2743 self.op.disk_template) not in self._DISK_CONVERSIONS:
2744 raise errors.OpPrereqError("Unsupported disk template conversion from"
2745 " %s to %s" % (self.instance.disk_template,
2746 self.op.disk_template),
2747 errors.ECODE_INVAL)
2748 CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2749 msg="cannot change disk template")
2750 if self.op.disk_template in constants.DTS_INT_MIRROR:
2751 if self.op.remote_node_uuid == pnode_uuid:
2752 raise errors.OpPrereqError("Given new secondary node %s is the same"
2753 " as the primary node of the instance" %
2754 self.op.remote_node, errors.ECODE_STATE)
2755 CheckNodeOnline(self, self.op.remote_node_uuid)
2756 CheckNodeNotDrained(self, self.op.remote_node_uuid)
2757
2758 assert self.instance.disk_template == constants.DT_PLAIN
2759 disks = [{constants.IDISK_SIZE: d.size,
2760 constants.IDISK_VG: d.logical_id[0]}
2761 for d in self.instance.disks]
2762 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2763 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2764
2765 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2766 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2767 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2768 snode_group)
2769 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2770 ignore=self.op.ignore_ipolicy)
2771 if pnode_info.group != snode_info.group:
2772 self.LogWarning("The primary and secondary nodes are in two"
2773 " different node groups; the disk parameters"
2774 " from the first disk's node group will be"
2775 " used")
2776
2777 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2778
2779 nodes = [pnode_info]
2780 if self.op.disk_template in constants.DTS_INT_MIRROR:
2781 assert snode_info
2782 nodes.append(snode_info)
2783 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2784 if compat.any(map(has_es, nodes)):
2785 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2786 " storage is enabled" % (self.instance.disk_template,
2787 self.op.disk_template))
2788 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2789
2791 """CheckPrereq checks related to disk changes.
2792
2793 @type ispec: dict
2794 @param ispec: instance specs to be updated with the new disks
2795
2796 """
2797 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2798
2799 excl_stor = compat.any(
2800 rpc.GetExclusiveStorageForNodes(self.cfg,
2801 self.instance.all_nodes).values()
2802 )
2803
2804
2805
2806 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2807 if self.instance.disk_template == constants.DT_EXT:
2808 self._CheckMods("disk", self.op.disks, {}, ver_fn)
2809 else:
2810 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2811 ver_fn)
2812
2813 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2814
2815
2816 if self.instance.disk_template in constants.DT_EXT:
2817 for mod in self.diskmod:
2818 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2819 if mod[0] == constants.DDM_ADD:
2820 if ext_provider is None:
2821 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2822 " '%s' missing, during disk add" %
2823 (constants.DT_EXT,
2824 constants.IDISK_PROVIDER),
2825 errors.ECODE_NOENT)
2826 elif mod[0] == constants.DDM_MODIFY:
2827 if ext_provider:
2828 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2829 " modification" %
2830 constants.IDISK_PROVIDER,
2831 errors.ECODE_INVAL)
2832 else:
2833 for mod in self.diskmod:
2834 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2835 if ext_provider is not None:
2836 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2837 " instances of type '%s'" %
2838 (constants.IDISK_PROVIDER,
2839 constants.DT_EXT),
2840 errors.ECODE_INVAL)
2841
2842 if not self.op.wait_for_sync and not self.instance.disks_active:
2843 for mod in self.diskmod:
2844 if mod[0] == constants.DDM_ADD:
2845 raise errors.OpPrereqError("Can't add a disk to an instance with"
2846 " deactivated disks and"
2847 " --no-wait-for-sync given.",
2848 errors.ECODE_INVAL)
2849
2850 if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2851 raise errors.OpPrereqError("Disk operations not supported for"
2852 " diskless instances", errors.ECODE_INVAL)
2853
2854 def _PrepareDiskMod(_, disk, params, __):
2855 disk.name = params.get(constants.IDISK_NAME, None)
2856
2857
2858 disks = copy.deepcopy(self.instance.disks)
2859 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2860 _PrepareDiskMod, None)
2861 utils.ValidateDeviceNames("disk", disks)
2862 if len(disks) > constants.MAX_DISKS:
2863 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2864 " more" % constants.MAX_DISKS,
2865 errors.ECODE_STATE)
2866 disk_sizes = [disk.size for disk in self.instance.disks]
2867 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2868 self.diskmod if op == constants.DDM_ADD)
2869 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2870 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2871
2872
2873 if self.op.offline is not None:
2874 if self.op.offline:
2875 msg = "can't change to offline without being down first"
2876 else:
2877 msg = "can't change to online (down) without being offline first"
2878 CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2879 msg=msg)
2880
2882 """Check prerequisites.
2883
2884 This only checks the instance list against the existing names.
2885
2886 """
2887 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2888 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2889 self.cluster = self.cfg.GetClusterInfo()
2890 cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
2891
2892 assert self.instance is not None, \
2893 "Cannot retrieve locked instance %s" % self.op.instance_name
2894
2895 pnode_uuid = self.instance.primary_node
2896
2897 self.warn = []
2898
2899 if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2900 not self.op.force):
2901
2902 instance_info = self.rpc.call_instance_info(
2903 pnode_uuid, self.instance.name, self.instance.hypervisor,
2904 cluster_hvparams)
2905 if instance_info.fail_msg:
2906 self.warn.append("Can't get instance runtime information: %s" %
2907 instance_info.fail_msg)
2908 elif instance_info.payload:
2909 raise errors.OpPrereqError("Instance is still running on %s" %
2910 self.cfg.GetNodeName(pnode_uuid),
2911 errors.ECODE_STATE)
2912
2913 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2914 node_uuids = list(self.instance.all_nodes)
2915 pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2916
2917
2918 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2919 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2920
2921
2922 ispec = {}
2923
2924 if self.op.hotplug or self.op.hotplug_if_possible:
2925 result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2926 self.instance)
2927 if result.fail_msg:
2928 if self.op.hotplug:
2929 result.Raise("Hotplug is not possible: %s" % result.fail_msg,
2930 prereq=True)
2931 else:
2932 self.LogWarning(result.fail_msg)
2933 self.op.hotplug = False
2934 self.LogInfo("Modification will take place without hotplugging.")
2935 else:
2936 self.op.hotplug = True
2937
2938
2939 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2940
2941
2942 if self.op.os_name and not self.op.force:
2943 CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2944 self.op.force_variant)
2945 instance_os = self.op.os_name
2946 else:
2947 instance_os = self.instance.os
2948
2949 assert not (self.op.disk_template and self.op.disks), \
2950 "Can't modify disk template and apply disk changes at the same time"
2951
2952 if self.op.disk_template:
2953 self._PreCheckDiskTemplate(pnode_info)
2954
2955 self._PreCheckDisks(ispec)
2956
2957
2958 if self.op.hvparams:
2959 hv_type = self.instance.hypervisor
2960 i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2961 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2962 hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2963
2964
2965 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2966 CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2967 self.hv_proposed = self.hv_new = hv_new
2968 self.hv_inst = i_hvdict
2969 else:
2970 self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2971 self.instance.os,
2972 self.instance.hvparams)
2973 self.hv_new = self.hv_inst = {}
2974
2975
2976 if self.op.beparams:
2977 i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2978 use_none=True)
2979 objects.UpgradeBeParams(i_bedict)
2980 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2981 be_new = self.cluster.SimpleFillBE(i_bedict)
2982 self.be_proposed = self.be_new = be_new
2983 self.be_inst = i_bedict
2984 else:
2985 self.be_new = self.be_inst = {}
2986 self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2987 be_old = self.cluster.FillBE(self.instance)
2988
2989
2990
2991
2992 if (constants.BE_VCPUS in self.be_proposed and
2993 constants.HV_CPU_MASK in self.hv_proposed):
2994 cpu_list = \
2995 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2996
2997
2998
2999 if (len(cpu_list) > 1 and
3000 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
3001 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
3002 " CPU mask [%s]" %
3003 (self.be_proposed[constants.BE_VCPUS],
3004 self.hv_proposed[constants.HV_CPU_MASK]),
3005 errors.ECODE_INVAL)
3006
3007
3008 if constants.HV_CPU_MASK in self.hv_new:
3009
3010 max_requested_cpu = max(map(max, cpu_list))
3011
3012
3013 hvspecs = [(self.instance.hypervisor,
3014 self.cfg.GetClusterInfo()
3015 .hvparams[self.instance.hypervisor])]
3016 _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
3017 max_requested_cpu + 1,
3018 hvspecs)
3019
3020
3021 if self.op.osparams:
3022 i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
3023 CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
3024 self.os_inst = i_osdict
3025 else:
3026 self.os_inst = {}
3027
3028
3029 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3030 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3031 mem_check_list = [pnode_uuid]
3032 if be_new[constants.BE_AUTO_BALANCE]:
3033
3034 mem_check_list.extend(self.instance.secondary_nodes)
3035 instance_info = self.rpc.call_instance_info(
3036 pnode_uuid, self.instance.name, self.instance.hypervisor,
3037 cluster_hvparams)
3038 hvspecs = [(self.instance.hypervisor,
3039 cluster_hvparams)]
3040 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3041 hvspecs)
3042 pninfo = nodeinfo[pnode_uuid]
3043 msg = pninfo.fail_msg
3044 if msg:
3045
3046 self.warn.append("Can't get info from primary node %s: %s" %
3047 (self.cfg.GetNodeName(pnode_uuid), msg))
3048 else:
3049 (_, _, (pnhvinfo, )) = pninfo.payload
3050 if not isinstance(pnhvinfo.get("memory_free", None), int):
3051 self.warn.append("Node data from primary node %s doesn't contain"
3052 " free memory information" %
3053 self.cfg.GetNodeName(pnode_uuid))
3054 elif instance_info.fail_msg:
3055 self.warn.append("Can't get instance runtime information: %s" %
3056 instance_info.fail_msg)
3057 else:
3058 if instance_info.payload:
3059 current_mem = int(instance_info.payload["memory"])
3060 else:
3061
3062
3063
3064
3065 current_mem = 0
3066
3067 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3068 pnhvinfo["memory_free"])
3069 if miss_mem > 0:
3070 raise errors.OpPrereqError("This change will prevent the instance"
3071 " from starting, due to %d MB of memory"
3072 " missing on its primary node" %
3073 miss_mem, errors.ECODE_NORES)
3074
3075 if be_new[constants.BE_AUTO_BALANCE]:
3076 for node_uuid, nres in nodeinfo.items():
3077 if node_uuid not in self.instance.secondary_nodes:
3078 continue
3079 nres.Raise("Can't get info from secondary node %s" %
3080 self.cfg.GetNodeName(node_uuid), prereq=True,
3081 ecode=errors.ECODE_STATE)
3082 (_, _, (nhvinfo, )) = nres.payload
3083 if not isinstance(nhvinfo.get("memory_free", None), int):
3084 raise errors.OpPrereqError("Secondary node %s didn't return free"
3085 " memory information" %
3086 self.cfg.GetNodeName(node_uuid),
3087 errors.ECODE_STATE)
3088
3089 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3090 raise errors.OpPrereqError("This change will prevent the instance"
3091 " from failover to its secondary node"
3092 " %s, due to not enough memory" %
3093 self.cfg.GetNodeName(node_uuid),
3094 errors.ECODE_STATE)
3095
3096 if self.op.runtime_mem:
3097 remote_info = self.rpc.call_instance_info(
3098 self.instance.primary_node, self.instance.name,
3099 self.instance.hypervisor,
3100 cluster_hvparams)
3101 remote_info.Raise("Error checking node %s" %
3102 self.cfg.GetNodeName(self.instance.primary_node),
3103 prereq=True)
3104 if not remote_info.payload:
3105 raise errors.OpPrereqError("Instance %s is not running" %
3106 self.instance.name, errors.ECODE_STATE)
3107
3108 current_memory = remote_info.payload["memory"]
3109 if (not self.op.force and
3110 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3111 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3112 raise errors.OpPrereqError("Instance %s must have memory between %d"
3113 " and %d MB of memory unless --force is"
3114 " given" %
3115 (self.instance.name,
3116 self.be_proposed[constants.BE_MINMEM],
3117 self.be_proposed[constants.BE_MAXMEM]),
3118 errors.ECODE_INVAL)
3119
3120 delta = self.op.runtime_mem - current_memory
3121 if delta > 0:
3122 CheckNodeFreeMemory(
3123 self, self.instance.primary_node,
3124 "ballooning memory for instance %s" % self.instance.name, delta,
3125 self.instance.hypervisor,
3126 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3127
3128
3129 cluster = self.cluster
3130
3131 def _PrepareNicCreate(_, params, private):
3132 self._PrepareNicModification(params, private, None, None,
3133 {}, cluster, pnode_uuid)
3134 return (None, None)
3135
3136 def _PrepareNicMod(_, nic, params, private):
3137 self._PrepareNicModification(params, private, nic.ip, nic.network,
3138 nic.nicparams, cluster, pnode_uuid)
3139 return None
3140
3141 def _PrepareNicRemove(_, params, __):
3142 ip = params.ip
3143 net = params.network
3144 if net is not None and ip is not None:
3145 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3146
3147
3148 nics = self.instance.nics[:]
3149 _ApplyContainerMods("NIC", nics, None, self.nicmod,
3150 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3151 if len(nics) > constants.MAX_NICS:
3152 raise errors.OpPrereqError("Instance has too many network interfaces"
3153 " (%d), cannot add more" % constants.MAX_NICS,
3154 errors.ECODE_STATE)
3155
3156
3157 self._nic_chgdesc = []
3158 if self.nicmod:
3159
3160 nics = [nic.Copy() for nic in self.instance.nics]
3161 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3162 self._CreateNewNic, self._ApplyNicMods,
3163 self._RemoveNic)
3164
3165 utils.ValidateDeviceNames("NIC", nics)
3166 self._new_nics = nics
3167 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3168 else:
3169 self._new_nics = None
3170 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3171
3172 if not self.op.ignore_ipolicy:
3173 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3174 group_info)
3175
3176
3177 ispec[constants.ISPEC_SPINDLE_USE] = \
3178 self.be_new.get(constants.BE_SPINDLE_USE, None)
3179 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3180 None)
3181
3182
3183 if self.op.disk_template:
3184 new_disk_template = self.op.disk_template
3185 else:
3186 new_disk_template = self.instance.disk_template
3187 ispec_max = ispec.copy()
3188 ispec_max[constants.ISPEC_MEM_SIZE] = \
3189 self.be_new.get(constants.BE_MAXMEM, None)
3190 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3191 new_disk_template)
3192 ispec_min = ispec.copy()
3193 ispec_min[constants.ISPEC_MEM_SIZE] = \
3194 self.be_new.get(constants.BE_MINMEM, None)
3195 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3196 new_disk_template)
3197
3198 if (res_max or res_min):
3199
3200
3201 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3202 (group_info, group_info.name,
3203 utils.CommaJoin(set(res_max + res_min))))
3204 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3205
3207 """Converts an instance from plain to drbd.
3208
3209 """
3210 feedback_fn("Converting template to drbd")
3211 pnode_uuid = self.instance.primary_node
3212 snode_uuid = self.op.remote_node_uuid
3213
3214 assert self.instance.disk_template == constants.DT_PLAIN
3215
3216
3217 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3218 constants.IDISK_VG: d.logical_id[0],
3219 constants.IDISK_NAME: d.name}
3220 for d in self.instance.disks]
3221 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3222 self.instance.uuid, pnode_uuid,
3223 [snode_uuid], disk_info, None, None, 0,
3224 feedback_fn, self.diskparams)
3225 anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3226 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3227 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3228 info = GetInstanceInfoText(self.instance)
3229 feedback_fn("Creating additional volumes...")
3230
3231 for disk in anno_disks:
3232
3233 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3234 info, True, p_excl_stor)
3235 for child in disk.children:
3236 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3237 s_excl_stor)
3238
3239
3240 feedback_fn("Renaming original volumes...")
3241 rename_list = [(o, n.children[0].logical_id)
3242 for (o, n) in zip(self.instance.disks, new_disks)]
3243 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3244 result.Raise("Failed to rename original LVs")
3245
3246 feedback_fn("Initializing DRBD devices...")
3247
3248 try:
3249 for disk in anno_disks:
3250 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3251 (snode_uuid, s_excl_stor)]:
3252 f_create = node_uuid == pnode_uuid
3253 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3254 f_create, excl_stor)
3255 except errors.GenericError, e:
3256 feedback_fn("Initializing of DRBD devices failed;"
3257 " renaming back original volumes...")
3258 rename_back_list = [(n.children[0], o.logical_id)
3259 for (n, o) in zip(new_disks, self.instance.disks)]
3260 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3261 result.Raise("Failed to rename LVs back after error %s" % str(e))
3262 raise
3263
3264
3265 self.instance.disk_template = constants.DT_DRBD8
3266 self.instance.disks = new_disks
3267 self.cfg.Update(self.instance, feedback_fn)
3268
3269
3270 ReleaseLocks(self, locking.LEVEL_NODE)
3271
3272
3273 disk_abort = not WaitForSync(self, self.instance,
3274 oneshot=not self.op.wait_for_sync)
3275 if disk_abort:
3276 raise errors.OpExecError("There are some degraded disks for"
3277 " this instance, please cleanup manually")
3278
3279
3280
3282 """Converts an instance from drbd to plain.
3283
3284 """
3285 assert self.instance.disk_template == constants.DT_DRBD8
3286 assert len(self.instance.secondary_nodes) == 1 or not self.instance.disks
3287
3288 pnode_uuid = self.instance.primary_node
3289
3290
3291 snode_uuid = None
3292 if self.instance.secondary_nodes:
3293 snode_uuid = self.instance.secondary_nodes[0]
3294
3295 feedback_fn("Converting template to plain")
3296
3297 old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3298 new_disks = [d.children[0] for d in self.instance.disks]
3299
3300
3301 for parent, child in zip(old_disks, new_disks):
3302 child.size = parent.size
3303 child.mode = parent.mode
3304 child.name = parent.name
3305
3306
3307
3308 for disk in old_disks:
3309 tcp_port = disk.logical_id[2]
3310 self.cfg.AddTcpUdpPort(tcp_port)
3311
3312
3313 self.instance.disks = new_disks
3314 self.instance.disk_template = constants.DT_PLAIN
3315 _UpdateIvNames(0, self.instance.disks)
3316 self.cfg.Update(self.instance, feedback_fn)
3317
3318
3319 ReleaseLocks(self, locking.LEVEL_NODE)
3320
3321 feedback_fn("Removing volumes on the secondary node...")
3322 for disk in old_disks:
3323 result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3324 result.Warn("Could not remove block device %s on node %s,"
3325 " continuing anyway" %
3326 (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3327 self.LogWarning)
3328
3329 feedback_fn("Removing unneeded volumes on the primary node...")
3330 for idx, disk in enumerate(old_disks):
3331 meta = disk.children[1]
3332 result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3333 result.Warn("Could not remove metadata for disk %d on node %s,"
3334 " continuing anyway" %
3335 (idx, self.cfg.GetNodeName(pnode_uuid)),
3336 self.LogWarning)
3337
3339 self.LogInfo("Trying to hotplug device...")
3340 msg = "hotplug:"
3341 result = self.rpc.call_hotplug_device(self.instance.primary_node,
3342 self.instance, action, dev_type,
3343 (device, self.instance),
3344 extra, seq)
3345 if result.fail_msg:
3346 self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3347 self.LogInfo("Continuing execution..")
3348 msg += "failed"
3349 else:
3350 self.LogInfo("Hotplug done.")
3351 msg += "done"
3352 return msg
3353
3355 """Creates a new disk.
3356
3357 """
3358
3359 if self.instance.disk_template in constants.DTS_FILEBASED:
3360 (file_driver, file_path) = self.instance.disks[0].logical_id
3361 file_path = os.path.dirname(file_path)
3362 else:
3363 file_driver = file_path = None
3364
3365 disk = \
3366 GenerateDiskTemplate(self, self.instance.disk_template,
3367 self.instance.uuid, self.instance.primary_node,
3368 self.instance.secondary_nodes, [params], file_path,
3369 file_driver, idx, self.Log, self.diskparams)[0]
3370
3371 new_disks = CreateDisks(self, self.instance, disks=[disk])
3372
3373 if self.cluster.prealloc_wipe_disks:
3374
3375 WipeOrCleanupDisks(self, self.instance,
3376 disks=[(idx, disk, 0)],
3377 cleanup=new_disks)
3378
3379 changes = [
3380 ("disk/%d" % idx,
3381 "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3382 ]
3383 if self.op.hotplug:
3384 result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3385 (disk, self.instance),
3386 self.instance, True, idx)
3387 if result.fail_msg:
3388 changes.append(("disk/%d" % idx, "assemble:failed"))
3389 self.LogWarning("Can't assemble newly created disk %d: %s",
3390 idx, result.fail_msg)
3391 else:
3392 _, link_name, uri = result.payload
3393 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3394 constants.HOTPLUG_TARGET_DISK,
3395 disk, (link_name, uri), idx)
3396 changes.append(("disk/%d" % idx, msg))
3397
3398 return (disk, changes)
3399
3400 - def _PostAddDisk(self, _, disk):
3401 if not WaitForSync(self, self.instance, disks=[disk],
3402 oneshot=not self.op.wait_for_sync):
3403 raise errors.OpExecError("Failed to sync disks of %s" %
3404 self.instance.name)
3405
3406
3407
3408 if not self.instance.disks_active:
3409 ShutdownInstanceDisks(self, self.instance, disks=[disk])
3410
3439
3441 """Removes a disk.
3442
3443 """
3444 hotmsg = ""
3445 if self.op.hotplug:
3446 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3447 constants.HOTPLUG_TARGET_DISK,
3448 root, None, idx)
3449 ShutdownInstanceDisks(self, self.instance, [root])
3450
3451 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3452 for node_uuid, disk in anno_disk.ComputeNodeTree(
3453 self.instance.primary_node):
3454 msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3455 .fail_msg
3456 if msg:
3457 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3458 " continuing anyway", idx,
3459 self.cfg.GetNodeName(node_uuid), msg)
3460
3461
3462 if root.dev_type in constants.DTS_DRBD:
3463 self.cfg.AddTcpUdpPort(root.logical_id[2])
3464
3465 return hotmsg
3466
3468 """Creates data structure for a new network interface.
3469
3470 """
3471 mac = params[constants.INIC_MAC]
3472 ip = params.get(constants.INIC_IP, None)
3473 net = params.get(constants.INIC_NETWORK, None)
3474 name = params.get(constants.INIC_NAME, None)
3475 net_uuid = self.cfg.LookupNetwork(net)
3476
3477 nicparams = private.filled
3478 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3479 nicparams=nicparams)
3480 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3481
3482 changes = [
3483 ("nic.%d" % idx,
3484 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3485 (mac, ip, private.filled[constants.NIC_MODE],
3486 private.filled[constants.NIC_LINK], net)),
3487 ]
3488
3489 if self.op.hotplug:
3490 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3491 constants.HOTPLUG_TARGET_NIC,
3492 nobj, None, idx)
3493 changes.append(("nic.%d" % idx, msg))
3494
3495 return (nobj, changes)
3496
3498 """Modifies a network interface.
3499
3500 """
3501 changes = []
3502
3503 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3504 if key in params:
3505 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3506 setattr(nic, key, params[key])
3507
3508 new_net = params.get(constants.INIC_NETWORK, nic.network)
3509 new_net_uuid = self.cfg.LookupNetwork(new_net)
3510 if new_net_uuid != nic.network:
3511 changes.append(("nic.network/%d" % idx, new_net))
3512 nic.network = new_net_uuid
3513
3514 if private.filled:
3515 nic.nicparams = private.filled
3516
3517 for (key, val) in nic.nicparams.items():
3518 changes.append(("nic.%s/%d" % (key, idx), val))
3519
3520 if self.op.hotplug:
3521 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3522 constants.HOTPLUG_TARGET_NIC,
3523 nic, None, idx)
3524 changes.append(("nic/%d" % idx, msg))
3525
3526 return changes
3527
3533
3534 - def Exec(self, feedback_fn):
3535 """Modifies an instance.
3536
3537 All parameters take effect only at the next restart of the instance.
3538
3539 """
3540
3541
3542
3543 for warn in self.warn:
3544 feedback_fn("WARNING: %s" % warn)
3545
3546 assert ((self.op.disk_template is None) ^
3547 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3548 "Not owning any node resource locks"
3549
3550 result = []
3551
3552
3553 if self.op.pnode_uuid:
3554 self.instance.primary_node = self.op.pnode_uuid
3555
3556
3557 if self.op.runtime_mem:
3558 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3559 self.instance,
3560 self.op.runtime_mem)
3561 rpcres.Raise("Cannot modify instance runtime memory")
3562 result.append(("runtime_memory", self.op.runtime_mem))
3563
3564
3565 _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3566 self._CreateNewDisk, self._ModifyDisk,
3567 self._RemoveDisk, post_add_fn=self._PostAddDisk)
3568 _UpdateIvNames(0, self.instance.disks)
3569
3570 if self.op.disk_template:
3571 if __debug__:
3572 check_nodes = set(self.instance.all_nodes)
3573 if self.op.remote_node_uuid:
3574 check_nodes.add(self.op.remote_node_uuid)
3575 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3576 owned = self.owned_locks(level)
3577 assert not (check_nodes - owned), \
3578 ("Not owning the correct locks, owning %r, expected at least %r" %
3579 (owned, check_nodes))
3580
3581 r_shut = ShutdownInstanceDisks(self, self.instance)
3582 if not r_shut:
3583 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3584 " proceed with disk template conversion")
3585 mode = (self.instance.disk_template, self.op.disk_template)
3586 try:
3587 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3588 except:
3589 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3590 raise
3591 result.append(("disk_template", self.op.disk_template))
3592
3593 assert self.instance.disk_template == self.op.disk_template, \
3594 ("Expected disk template '%s', found '%s'" %
3595 (self.op.disk_template, self.instance.disk_template))
3596
3597
3598
3599 ReleaseLocks(self, locking.LEVEL_NODE)
3600 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3601
3602
3603 if self._new_nics is not None:
3604 self.instance.nics = self._new_nics
3605 result.extend(self._nic_chgdesc)
3606
3607
3608 if self.op.hvparams:
3609 self.instance.hvparams = self.hv_inst
3610 for key, val in self.op.hvparams.iteritems():
3611 result.append(("hv/%s" % key, val))
3612
3613
3614 if self.op.beparams:
3615 self.instance.beparams = self.be_inst
3616 for key, val in self.op.beparams.iteritems():
3617 result.append(("be/%s" % key, val))
3618
3619
3620 if self.op.os_name:
3621 self.instance.os = self.op.os_name
3622
3623
3624 if self.op.osparams:
3625 self.instance.osparams = self.os_inst
3626 for key, val in self.op.osparams.iteritems():
3627 result.append(("os/%s" % key, val))
3628
3629 if self.op.offline is None:
3630
3631 pass
3632 elif self.op.offline:
3633
3634 self.cfg.MarkInstanceOffline(self.instance.uuid)
3635 result.append(("admin_state", constants.ADMINST_OFFLINE))
3636 else:
3637
3638 self.cfg.MarkInstanceDown(self.instance.uuid)
3639 result.append(("admin_state", constants.ADMINST_DOWN))
3640
3641 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3642
3643 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3644 self.owned_locks(locking.LEVEL_NODE)), \
3645 "All node locks should have been released by now"
3646
3647 return result
3648
3649 _DISK_CONVERSIONS = {
3650 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3651 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3652 }
3653
3656 HPATH = "instance-change-group"
3657 HTYPE = constants.HTYPE_INSTANCE
3658 REQ_BGL = False
3659
3678
3680 if level == locking.LEVEL_NODEGROUP:
3681 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3682
3683 if self.req_target_uuids:
3684 lock_groups = set(self.req_target_uuids)
3685
3686
3687
3688 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3689 lock_groups.update(instance_groups)
3690 else:
3691
3692 lock_groups = locking.ALL_SET
3693
3694 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3695
3696 elif level == locking.LEVEL_NODE:
3697 if self.req_target_uuids:
3698
3699 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3700 self._LockInstancesNodes()
3701
3702
3703 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3704 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3705 member_nodes = [node_uuid
3706 for group in lock_groups
3707 for node_uuid in self.cfg.GetNodeGroup(group).members]
3708 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3709 else:
3710
3711 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3712
3714 owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3715 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3716 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3717
3718 assert (self.req_target_uuids is None or
3719 owned_groups.issuperset(self.req_target_uuids))
3720 assert owned_instance_names == set([self.op.instance_name])
3721
3722
3723 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3724
3725
3726 assert owned_nodes.issuperset(self.instance.all_nodes), \
3727 ("Instance %s's nodes changed while we kept the lock" %
3728 self.op.instance_name)
3729
3730 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3731 owned_groups)
3732
3733 if self.req_target_uuids:
3734
3735 self.target_uuids = frozenset(self.req_target_uuids)
3736 else:
3737
3738 self.target_uuids = owned_groups - inst_groups
3739
3740 conflicting_groups = self.target_uuids & inst_groups
3741 if conflicting_groups:
3742 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3743 " used by the instance '%s'" %
3744 (utils.CommaJoin(conflicting_groups),
3745 self.op.instance_name),
3746 errors.ECODE_INVAL)
3747
3748 if not self.target_uuids:
3749 raise errors.OpPrereqError("There are no possible target groups",
3750 errors.ECODE_INVAL)
3751
3753 """Build hooks env.
3754
3755 """
3756 assert self.target_uuids
3757
3758 env = {
3759 "TARGET_GROUPS": " ".join(self.target_uuids),
3760 }
3761
3762 env.update(BuildInstanceHookEnvByObject(self, self.instance))
3763
3764 return env
3765
3767 """Build hooks nodes.
3768
3769 """
3770 mn = self.cfg.GetMasterNode()
3771 return ([mn], [mn])
3772
3773 - def Exec(self, feedback_fn):
3774 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3775
3776 assert instances == [self.op.instance_name], "Instance not locked"
3777
3778 req = iallocator.IAReqGroupChange(instances=instances,
3779 target_groups=list(self.target_uuids))
3780 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3781
3782 ial.Run(self.op.iallocator)
3783
3784 if not ial.success:
3785 raise errors.OpPrereqError("Can't compute solution for changing group of"
3786 " instance '%s' using iallocator '%s': %s" %
3787 (self.op.instance_name, self.op.iallocator,
3788 ial.info), errors.ECODE_NORES)
3789
3790 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3791
3792 self.LogInfo("Iallocator returned %s job(s) for changing group of"
3793 " instance '%s'", len(jobs), self.op.instance_name)
3794
3795 return ResultWithJobs(jobs)
3796