1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Logical units dealing with instances."""
32
33 import OpenSSL
34 import copy
35 import logging
36 import os
37
38 from ganeti import compat
39 from ganeti import constants
40 from ganeti import errors
41 from ganeti import ht
42 from ganeti import hypervisor
43 from ganeti import locking
44 from ganeti.masterd import iallocator
45 from ganeti import masterd
46 from ganeti import netutils
47 from ganeti import objects
48 from ganeti import pathutils
49 from ganeti import rpc
50 from ganeti import utils
51
52 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
53
54 from ganeti.cmdlib.common import INSTANCE_DOWN, \
55 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
56 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
57 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
58 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
59 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
60 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
61 CheckDiskTemplateEnabled, IsValidDiskAccessModeCombination
62 from ganeti.cmdlib.instance_storage import CreateDisks, \
63 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
64 IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
65 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
66 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
67 CheckSpindlesExclusiveStorage
68 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
69 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
70 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
71 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
72 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
73 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
74
75 import ganeti.masterd.instance
76
77
78
79
80 _TApplyContModsCbChanges = \
81 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
82 ht.TNonEmptyString,
83 ht.TAny,
84 ])))
88 """Ensures that a given hostname resolves to a 'sane' name.
89
90 The given name is required to be a prefix of the resolved hostname,
91 to prevent accidental mismatches.
92
93 @param lu: the logical unit on behalf of which we're checking
94 @param name: the name we should resolve and check
95 @return: the resolved hostname object
96
97 """
98 hostname = netutils.GetHostname(name=name)
99 if hostname.name != name:
100 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
101 if not utils.MatchNameComponent(name, [hostname.name]):
102 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
103 " same as given hostname '%s'") %
104 (hostname.name, name), errors.ECODE_INVAL)
105 return hostname
106
109 """Generate error if opportunistic locking is not possible.
110
111 """
112 if op.opportunistic_locking and not op.iallocator:
113 raise errors.OpPrereqError("Opportunistic locking is only available in"
114 " combination with an instance allocator",
115 errors.ECODE_INVAL)
116
119 """Wrapper around IAReqInstanceAlloc.
120
121 @param op: The instance opcode
122 @param disks: The computed disks
123 @param nics: The computed nics
124 @param beparams: The full filled beparams
125 @param node_name_whitelist: List of nodes which should appear as online to the
126 allocator (unless the node is already marked offline)
127
128 @returns: A filled L{iallocator.IAReqInstanceAlloc}
129
130 """
131 spindle_use = beparams[constants.BE_SPINDLE_USE]
132 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
133 disk_template=op.disk_template,
134 tags=op.tags,
135 os=op.os_type,
136 vcpus=beparams[constants.BE_VCPUS],
137 memory=beparams[constants.BE_MAXMEM],
138 spindle_use=spindle_use,
139 disks=disks,
140 nics=[n.ToDict() for n in nics],
141 hypervisor=op.hypervisor,
142 node_whitelist=node_name_whitelist)
143
161
164 """Computes the nics.
165
166 @param op: The instance opcode
167 @param cluster: Cluster configuration object
168 @param default_ip: The default ip to assign
169 @param cfg: An instance of the configuration object
170 @param ec_id: Execution context ID
171
172 @returns: The build up nics
173
174 """
175 nics = []
176 for nic in op.nics:
177 nic_mode_req = nic.get(constants.INIC_MODE, None)
178 nic_mode = nic_mode_req
179 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
180 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
181
182 net = nic.get(constants.INIC_NETWORK, None)
183 link = nic.get(constants.NIC_LINK, None)
184 ip = nic.get(constants.INIC_IP, None)
185 vlan = nic.get(constants.INIC_VLAN, None)
186
187 if net is None or net.lower() == constants.VALUE_NONE:
188 net = None
189 else:
190 if nic_mode_req is not None or link is not None:
191 raise errors.OpPrereqError("If network is given, no mode or link"
192 " is allowed to be passed",
193 errors.ECODE_INVAL)
194
195
196 if ip is None or ip.lower() == constants.VALUE_NONE:
197 nic_ip = None
198 elif ip.lower() == constants.VALUE_AUTO:
199 if not op.name_check:
200 raise errors.OpPrereqError("IP address set to auto but name checks"
201 " have been skipped",
202 errors.ECODE_INVAL)
203 nic_ip = default_ip
204 else:
205
206
207 if ip.lower() == constants.NIC_IP_POOL:
208 if net is None:
209 raise errors.OpPrereqError("if ip=pool, parameter network"
210 " must be passed too",
211 errors.ECODE_INVAL)
212
213 elif not netutils.IPAddress.IsValid(ip):
214 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
215 errors.ECODE_INVAL)
216
217 nic_ip = ip
218
219
220 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
221 raise errors.OpPrereqError("Routed nic mode requires an ip address",
222 errors.ECODE_INVAL)
223
224
225 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
226 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
227 mac = utils.NormalizeAndValidateMac(mac)
228
229 try:
230
231 cfg.ReserveMAC(mac, ec_id)
232 except errors.ReservationError:
233 raise errors.OpPrereqError("MAC address %s already in use"
234 " in cluster" % mac,
235 errors.ECODE_NOTUNIQUE)
236
237
238 nicparams = {}
239 if nic_mode_req:
240 nicparams[constants.NIC_MODE] = nic_mode
241 if link:
242 nicparams[constants.NIC_LINK] = link
243 if vlan:
244 nicparams[constants.NIC_VLAN] = vlan
245
246 check_params = cluster.SimpleFillNIC(nicparams)
247 objects.NIC.CheckParameterSyntax(check_params)
248 net_uuid = cfg.LookupNetwork(net)
249 name = nic.get(constants.INIC_NAME, None)
250 if name is not None and name.lower() == constants.VALUE_NONE:
251 name = None
252 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
253 network=net_uuid, nicparams=nicparams)
254 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
255 nics.append(nic_obj)
256
257 return nics
258
261 """In case of conflicting IP address raise error.
262
263 @type ip: string
264 @param ip: IP address
265 @type node_uuid: string
266 @param node_uuid: node UUID
267
268 """
269 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
270 if conf_net is not None:
271 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
272 " network %s, but the target NIC does not." %
273 (ip, conf_net)),
274 errors.ECODE_STATE)
275
276 return (None, None)
277
282 """Compute if instance specs meets the specs of ipolicy.
283
284 @type ipolicy: dict
285 @param ipolicy: The ipolicy to verify against
286 @param instance_spec: dict
287 @param instance_spec: The instance spec to verify
288 @type disk_template: string
289 @param disk_template: the disk template of the instance
290 @param _compute_fn: The function to verify ipolicy (unittest only)
291 @see: L{ComputeIPolicySpecViolation}
292
293 """
294 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
295 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
296 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
297 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
298 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
299 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
300
301 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
302 disk_sizes, spindle_use, disk_template)
303
306 """Check whether an OS name conforms to the os variants specification.
307
308 @type os_obj: L{objects.OS}
309 @param os_obj: OS object to check
310 @type name: string
311 @param name: OS name passed by the user, to check for validity
312
313 """
314 variant = objects.OS.GetVariant(name)
315 if not os_obj.supported_variants:
316 if variant:
317 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
318 " passed)" % (os_obj.name, variant),
319 errors.ECODE_INVAL)
320 return
321 if not variant:
322 raise errors.OpPrereqError("OS name must include a variant",
323 errors.ECODE_INVAL)
324
325 if variant not in os_obj.supported_variants:
326 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
327
330 """Create an instance.
331
332 """
333 HPATH = "instance-add"
334 HTYPE = constants.HTYPE_INSTANCE
335 REQ_BGL = False
336
350
392
394 """ Check validity of VLANs if given
395
396 """
397 for nic in self.op.nics:
398 vlan = nic.get(constants.INIC_VLAN, None)
399 if vlan:
400 if vlan[0] == ".":
401
402
403 if not vlan[1:].isdigit():
404 vlanlist = vlan[1:].split(':')
405 for vl in vlanlist:
406 if not vl.isdigit():
407 raise errors.OpPrereqError("Specified VLAN parameter is "
408 "invalid : %s" % vlan,
409 errors.ECODE_INVAL)
410 elif vlan[0] == ":":
411
412 vlanlist = vlan[1:].split(':')
413 for vl in vlanlist:
414 if not vl.isdigit():
415 raise errors.OpPrereqError("Specified VLAN parameter is invalid"
416 " : %s" % vlan, errors.ECODE_INVAL)
417 elif vlan.isdigit():
418
419
420 nic[constants.INIC_VLAN] = "." + vlan
421 else:
422 raise errors.OpPrereqError("Specified VLAN parameter is invalid"
423 " : %s" % vlan, errors.ECODE_INVAL)
424
426 """Check arguments.
427
428 """
429
430
431 if self.op.no_install and self.op.start:
432 self.LogInfo("No-installation mode selected, disabling startup")
433 self.op.start = False
434
435 self.op.instance_name = \
436 netutils.Hostname.GetNormalizedName(self.op.instance_name)
437
438 if self.op.ip_check and not self.op.name_check:
439
440 raise errors.OpPrereqError("Cannot do IP address check without a name"
441 " check", errors.ECODE_INVAL)
442
443
444 for nic in self.op.nics:
445 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
446
447 utils.ValidateDeviceNames("NIC", self.op.nics)
448
449 self._CheckVLANArguments()
450
451 self._CheckDiskArguments()
452 assert self.op.disk_template is not None
453
454
455 if self.op.name_check:
456 self.hostname = _CheckHostnameSane(self, self.op.instance_name)
457 self.op.instance_name = self.hostname.name
458
459 self.check_ip = self.hostname.ip
460 else:
461 self.check_ip = None
462
463
464 if (self.op.file_driver and
465 not self.op.file_driver in constants.FILE_DRIVER):
466 raise errors.OpPrereqError("Invalid file driver name '%s'" %
467 self.op.file_driver, errors.ECODE_INVAL)
468
469
470 if (not self.op.file_driver and
471 self.op.disk_template in [constants.DT_FILE,
472 constants.DT_SHARED_FILE]):
473 self.op.file_driver = constants.FD_DEFAULT
474
475
476 CheckIAllocatorOrNode(self, "iallocator", "pnode")
477
478 if self.op.pnode is not None:
479 if self.op.disk_template in constants.DTS_INT_MIRROR:
480 if self.op.snode is None:
481 raise errors.OpPrereqError("The networked disk templates need"
482 " a mirror node", errors.ECODE_INVAL)
483 elif self.op.snode:
484 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
485 " template")
486 self.op.snode = None
487
488 _CheckOpportunisticLocking(self.op)
489
490 if self.op.mode == constants.INSTANCE_IMPORT:
491
492
493
494 self.op.force_variant = True
495
496 if self.op.no_install:
497 self.LogInfo("No-installation mode has no effect during import")
498
499 elif self.op.mode == constants.INSTANCE_CREATE:
500 if self.op.os_type is None:
501 raise errors.OpPrereqError("No guest OS specified",
502 errors.ECODE_INVAL)
503 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
504 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
505 " installation" % self.op.os_type,
506 errors.ECODE_STATE)
507 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
508 self._cds = GetClusterDomainSecret()
509
510
511 src_handshake = self.op.source_handshake
512 if not src_handshake:
513 raise errors.OpPrereqError("Missing source handshake",
514 errors.ECODE_INVAL)
515
516 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
517 src_handshake)
518 if errmsg:
519 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
520 errors.ECODE_INVAL)
521
522
523 self.source_x509_ca_pem = self.op.source_x509_ca
524 if not self.source_x509_ca_pem:
525 raise errors.OpPrereqError("Missing source X509 CA",
526 errors.ECODE_INVAL)
527
528 try:
529 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
530 self._cds)
531 except OpenSSL.crypto.Error, err:
532 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
533 (err, ), errors.ECODE_INVAL)
534
535 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
536 if errcode is not None:
537 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
538 errors.ECODE_INVAL)
539
540 self.source_x509_ca = cert
541
542 src_instance_name = self.op.source_instance_name
543 if not src_instance_name:
544 raise errors.OpPrereqError("Missing source instance name",
545 errors.ECODE_INVAL)
546
547 self.source_instance_name = \
548 netutils.GetHostname(name=src_instance_name).name
549
550 else:
551 raise errors.OpPrereqError("Invalid instance creation mode %r" %
552 self.op.mode, errors.ECODE_INVAL)
553
555 """ExpandNames for CreateInstance.
556
557 Figure out the right locks for instance creation.
558
559 """
560 self.needed_locks = {}
561
562
563
564 if self.op.instance_name in\
565 [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
566 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
567 self.op.instance_name, errors.ECODE_EXISTS)
568
569 self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
570
571 if self.op.iallocator:
572
573
574
575 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
576 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
577
578 if self.op.opportunistic_locking:
579 self.opportunistic_locks[locking.LEVEL_NODE] = True
580 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
581 else:
582 (self.op.pnode_uuid, self.op.pnode) = \
583 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
584 nodelist = [self.op.pnode_uuid]
585 if self.op.snode is not None:
586 (self.op.snode_uuid, self.op.snode) = \
587 ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
588 nodelist.append(self.op.snode_uuid)
589 self.needed_locks[locking.LEVEL_NODE] = nodelist
590
591
592 if self.op.mode == constants.INSTANCE_IMPORT:
593 src_node = self.op.src_node
594 src_path = self.op.src_path
595
596 if src_path is None:
597 self.op.src_path = src_path = self.op.instance_name
598
599 if src_node is None:
600 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
601 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
602 self.op.src_node = None
603 if os.path.isabs(src_path):
604 raise errors.OpPrereqError("Importing an instance from a path"
605 " requires a source node option",
606 errors.ECODE_INVAL)
607 else:
608 (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
609 ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
610 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
611 self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
612 if not os.path.isabs(src_path):
613 self.op.src_path = \
614 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
615
616 self.needed_locks[locking.LEVEL_NODE_RES] = \
617 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
618
619
620
621
622
623 if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
624
625
626
627
628 self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
629 else:
630 self.needed_locks[locking.LEVEL_NODEGROUP] = \
631 list(self.cfg.GetNodeGroupsFromNodes(
632 self.needed_locks[locking.LEVEL_NODE]))
633 self.share_locks[locking.LEVEL_NODEGROUP] = 1
634
636 """Run the allocator based on input opcode.
637
638 """
639 if self.op.opportunistic_locking:
640
641 node_name_whitelist = self.cfg.GetNodeNames(
642 set(self.owned_locks(locking.LEVEL_NODE)) &
643 set(self.owned_locks(locking.LEVEL_NODE_RES)))
644 else:
645 node_name_whitelist = None
646
647 req = _CreateInstanceAllocRequest(self.op, self.disks,
648 self.nics, self.be_full,
649 node_name_whitelist)
650 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
651
652 ial.Run(self.op.iallocator)
653
654 if not ial.success:
655
656 if self.op.opportunistic_locking:
657 ecode = errors.ECODE_TEMP_NORES
658 else:
659 ecode = errors.ECODE_NORES
660
661 raise errors.OpPrereqError("Can't compute nodes using"
662 " iallocator '%s': %s" %
663 (self.op.iallocator, ial.info),
664 ecode)
665
666 (self.op.pnode_uuid, self.op.pnode) = \
667 ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
668 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
669 self.op.instance_name, self.op.iallocator,
670 utils.CommaJoin(ial.result))
671
672 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
673
674 if req.RequiredNodes() == 2:
675 (self.op.snode_uuid, self.op.snode) = \
676 ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
677
679 """Build hooks env.
680
681 This runs on master, primary and secondary nodes of the instance.
682
683 """
684 env = {
685 "ADD_MODE": self.op.mode,
686 }
687 if self.op.mode == constants.INSTANCE_IMPORT:
688 env["SRC_NODE"] = self.op.src_node
689 env["SRC_PATH"] = self.op.src_path
690 env["SRC_IMAGES"] = self.src_images
691
692 env.update(BuildInstanceHookEnv(
693 name=self.op.instance_name,
694 primary_node_name=self.op.pnode,
695 secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
696 status=self.op.start,
697 os_type=self.op.os_type,
698 minmem=self.be_full[constants.BE_MINMEM],
699 maxmem=self.be_full[constants.BE_MAXMEM],
700 vcpus=self.be_full[constants.BE_VCPUS],
701 nics=NICListToTuple(self, self.nics),
702 disk_template=self.op.disk_template,
703 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
704 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
705 for d in self.disks],
706 bep=self.be_full,
707 hvp=self.hv_full,
708 hypervisor_name=self.op.hypervisor,
709 tags=self.op.tags,
710 ))
711
712 return env
713
715 """Build hooks nodes.
716
717 """
718 nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
719 return nl, nl
720
766
768 """Use export parameters as defaults.
769
770 In case the opcode doesn't specify (as in override) some instance
771 parameters, then try to use them from the export information, if
772 that declares them.
773
774 """
775 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
776
777 if not self.op.disks:
778 disks = []
779
780 for idx in range(constants.MAX_DISKS):
781 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
782 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
783 disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
784 disk = {
785 constants.IDISK_SIZE: disk_sz,
786 constants.IDISK_NAME: disk_name
787 }
788 disks.append(disk)
789 self.op.disks = disks
790 if not disks and self.op.disk_template != constants.DT_DISKLESS:
791 raise errors.OpPrereqError("No disk info specified and the export"
792 " is missing the disk information",
793 errors.ECODE_INVAL)
794
795 if not self.op.nics:
796 nics = []
797 for idx in range(constants.MAX_NICS):
798 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
799 ndict = {}
800 for name in [constants.INIC_IP,
801 constants.INIC_MAC, constants.INIC_NAME]:
802 nic_param_name = "nic%d_%s" % (idx, name)
803 if einfo.has_option(constants.INISECT_INS, nic_param_name):
804 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
805 ndict[name] = v
806 network = einfo.get(constants.INISECT_INS,
807 "nic%d_%s" % (idx, constants.INIC_NETWORK))
808
809
810 if network:
811 ndict[constants.INIC_NETWORK] = network
812 else:
813 for name in list(constants.NICS_PARAMETERS):
814 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
815 ndict[name] = v
816 nics.append(ndict)
817 else:
818 break
819 self.op.nics = nics
820
821 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
822 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
823
824 if (self.op.hypervisor is None and
825 einfo.has_option(constants.INISECT_INS, "hypervisor")):
826 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
827
828 if einfo.has_section(constants.INISECT_HYP):
829
830
831 for name, value in einfo.items(constants.INISECT_HYP):
832 if name not in self.op.hvparams:
833 self.op.hvparams[name] = value
834
835 if einfo.has_section(constants.INISECT_BEP):
836
837 for name, value in einfo.items(constants.INISECT_BEP):
838 if name not in self.op.beparams:
839 self.op.beparams[name] = value
840
841 if name == constants.BE_MEMORY:
842 if constants.BE_MAXMEM not in self.op.beparams:
843 self.op.beparams[constants.BE_MAXMEM] = value
844 if constants.BE_MINMEM not in self.op.beparams:
845 self.op.beparams[constants.BE_MINMEM] = value
846 else:
847
848 for name in constants.BES_PARAMETERS:
849 if (name not in self.op.beparams and
850 einfo.has_option(constants.INISECT_INS, name)):
851 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
852
853 if einfo.has_section(constants.INISECT_OSP):
854
855 for name, value in einfo.items(constants.INISECT_OSP):
856 if name not in self.op.osparams:
857 self.op.osparams[name] = value
858
884
886 """Calculate final instance file storage dir.
887
888 """
889
890 self.instance_file_storage_dir = None
891 if self.op.disk_template in constants.DTS_FILEBASED:
892
893 joinargs = []
894
895 if self.op.disk_template == constants.DT_SHARED_FILE:
896 get_fsd_fn = self.cfg.GetSharedFileStorageDir
897 else:
898 get_fsd_fn = self.cfg.GetFileStorageDir
899
900 cfg_storagedir = get_fsd_fn()
901 if not cfg_storagedir:
902 raise errors.OpPrereqError("Cluster file storage dir not defined",
903 errors.ECODE_STATE)
904 joinargs.append(cfg_storagedir)
905
906 if self.op.file_storage_dir is not None:
907 joinargs.append(self.op.file_storage_dir)
908
909 joinargs.append(self.op.instance_name)
910
911
912 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
913
915 """Check prerequisites.
916
917 """
918
919
920 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
921 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
922 cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
923 if not owned_groups.issuperset(cur_groups):
924 raise errors.OpPrereqError("New instance %s's node groups changed since"
925 " locks were acquired, current groups are"
926 " are '%s', owning groups '%s'; retry the"
927 " operation" %
928 (self.op.instance_name,
929 utils.CommaJoin(cur_groups),
930 utils.CommaJoin(owned_groups)),
931 errors.ECODE_STATE)
932
933 self._CalculateFileStorageDir()
934
935 if self.op.mode == constants.INSTANCE_IMPORT:
936 export_info = self._ReadExportInfo()
937 self._ReadExportParams(export_info)
938 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
939 else:
940 self._old_instance_name = None
941
942 if (not self.cfg.GetVGName() and
943 self.op.disk_template not in constants.DTS_NOT_LVM):
944 raise errors.OpPrereqError("Cluster does not support lvm-based"
945 " instances", errors.ECODE_STATE)
946
947 if (self.op.hypervisor is None or
948 self.op.hypervisor == constants.VALUE_AUTO):
949 self.op.hypervisor = self.cfg.GetHypervisorType()
950
951 cluster = self.cfg.GetClusterInfo()
952 enabled_hvs = cluster.enabled_hypervisors
953 if self.op.hypervisor not in enabled_hvs:
954 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
955 " cluster (%s)" %
956 (self.op.hypervisor, ",".join(enabled_hvs)),
957 errors.ECODE_STATE)
958
959
960 for tag in self.op.tags:
961 objects.TaggableObject.ValidateTag(tag)
962
963
964 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
965 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
966 self.op.hvparams)
967 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
968 hv_type.CheckParameterSyntax(filled_hvp)
969 self.hv_full = filled_hvp
970
971 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
972 "instance", "cluster")
973
974
975 self.be_full = _ComputeFullBeParams(self.op, cluster)
976
977
978 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
979
980
981
982 if self.op.identify_defaults:
983 self._RevertToDefaults(cluster)
984
985
986 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
987 self.proc.GetECId())
988
989
990 default_vg = self.cfg.GetVGName()
991 self.disks = ComputeDisks(self.op, default_vg)
992
993 if self.op.mode == constants.INSTANCE_IMPORT:
994 disk_images = []
995 for idx in range(len(self.disks)):
996 option = "disk%d_dump" % idx
997 if export_info.has_option(constants.INISECT_INS, option):
998
999 export_name = export_info.get(constants.INISECT_INS, option)
1000 image = utils.PathJoin(self.op.src_path, export_name)
1001 disk_images.append(image)
1002 else:
1003 disk_images.append(False)
1004
1005 self.src_images = disk_images
1006
1007 if self.op.instance_name == self._old_instance_name:
1008 for idx, nic in enumerate(self.nics):
1009 if nic.mac == constants.VALUE_AUTO:
1010 nic_mac_ini = "nic%d_mac" % idx
1011 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
1012
1013
1014
1015
1016 if self.op.ip_check:
1017 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
1018 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1019 (self.check_ip, self.op.instance_name),
1020 errors.ECODE_NOTUNIQUE)
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030 for nic in self.nics:
1031 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
1032 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
1033
1034
1035
1036 if self.op.iallocator is not None:
1037 self._RunAllocator()
1038
1039
1040 keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1041 self.op.src_node_uuid])
1042 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1043 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1044 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1045
1046 ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1047 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1048
1049 assert (self.owned_locks(locking.LEVEL_NODE) ==
1050 self.owned_locks(locking.LEVEL_NODE_RES)), \
1051 "Node locks differ from node resource locks"
1052
1053
1054
1055
1056 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1057 assert self.pnode is not None, \
1058 "Cannot retrieve locked node %s" % self.op.pnode_uuid
1059 if pnode.offline:
1060 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1061 pnode.name, errors.ECODE_STATE)
1062 if pnode.drained:
1063 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1064 pnode.name, errors.ECODE_STATE)
1065 if not pnode.vm_capable:
1066 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1067 " '%s'" % pnode.name, errors.ECODE_STATE)
1068
1069 self.secondaries = []
1070
1071
1072
1073 for idx, nic in enumerate(self.nics):
1074 net_uuid = nic.network
1075 if net_uuid is not None:
1076 nobj = self.cfg.GetNetwork(net_uuid)
1077 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1078 if netparams is None:
1079 raise errors.OpPrereqError("No netparams found for network"
1080 " %s. Probably not connected to"
1081 " node's %s nodegroup" %
1082 (nobj.name, self.pnode.name),
1083 errors.ECODE_INVAL)
1084 self.LogInfo("NIC/%d inherits netparams %s" %
1085 (idx, netparams.values()))
1086 nic.nicparams = dict(netparams)
1087 if nic.ip is not None:
1088 if nic.ip.lower() == constants.NIC_IP_POOL:
1089 try:
1090 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1091 except errors.ReservationError:
1092 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1093 " from the address pool" % idx,
1094 errors.ECODE_STATE)
1095 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1096 else:
1097 try:
1098 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
1099 check=self.op.conflicts_check)
1100 except errors.ReservationError:
1101 raise errors.OpPrereqError("IP address %s already in use"
1102 " or does not belong to network %s" %
1103 (nic.ip, nobj.name),
1104 errors.ECODE_NOTUNIQUE)
1105
1106
1107 elif self.op.conflicts_check:
1108 _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1109
1110
1111 if self.op.disk_template in constants.DTS_INT_MIRROR:
1112 if self.op.snode_uuid == pnode.uuid:
1113 raise errors.OpPrereqError("The secondary node cannot be the"
1114 " primary node", errors.ECODE_INVAL)
1115 CheckNodeOnline(self, self.op.snode_uuid)
1116 CheckNodeNotDrained(self, self.op.snode_uuid)
1117 CheckNodeVmCapable(self, self.op.snode_uuid)
1118 self.secondaries.append(self.op.snode_uuid)
1119
1120 snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1121 if pnode.group != snode.group:
1122 self.LogWarning("The primary and secondary nodes are in two"
1123 " different node groups; the disk parameters"
1124 " from the first disk's node group will be"
1125 " used")
1126
1127 nodes = [pnode]
1128 if self.op.disk_template in constants.DTS_INT_MIRROR:
1129 nodes.append(snode)
1130 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1131 excl_stor = compat.any(map(has_es, nodes))
1132 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1133 raise errors.OpPrereqError("Disk template %s not supported with"
1134 " exclusive storage" % self.op.disk_template,
1135 errors.ECODE_STATE)
1136 for disk in self.disks:
1137 CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1138
1139 node_uuids = [pnode.uuid] + self.secondaries
1140
1141 if not self.adopt_disks:
1142 if self.op.disk_template == constants.DT_RBD:
1143
1144
1145
1146 CheckRADOSFreeSpace()
1147 elif self.op.disk_template == constants.DT_EXT:
1148
1149 pass
1150 elif self.op.disk_template in constants.DTS_LVM:
1151
1152 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1153 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1154 else:
1155
1156 pass
1157
1158 elif self.op.disk_template == constants.DT_PLAIN:
1159 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1160 disk[constants.IDISK_ADOPT])
1161 for disk in self.disks])
1162 if len(all_lvs) != len(self.disks):
1163 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1164 errors.ECODE_INVAL)
1165 for lv_name in all_lvs:
1166 try:
1167
1168
1169 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1170 except errors.ReservationError:
1171 raise errors.OpPrereqError("LV named %s used by another instance" %
1172 lv_name, errors.ECODE_NOTUNIQUE)
1173
1174 vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1175 vg_names.Raise("Cannot get VG information from node %s" % pnode.name,
1176 prereq=True)
1177
1178 node_lvs = self.rpc.call_lv_list([pnode.uuid],
1179 vg_names.payload.keys())[pnode.uuid]
1180 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name,
1181 prereq=True)
1182 node_lvs = node_lvs.payload
1183
1184 delta = all_lvs.difference(node_lvs.keys())
1185 if delta:
1186 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1187 utils.CommaJoin(delta),
1188 errors.ECODE_INVAL)
1189 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1190 if online_lvs:
1191 raise errors.OpPrereqError("Online logical volumes found, cannot"
1192 " adopt: %s" % utils.CommaJoin(online_lvs),
1193 errors.ECODE_STATE)
1194
1195 for dsk in self.disks:
1196 dsk[constants.IDISK_SIZE] = \
1197 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1198 dsk[constants.IDISK_ADOPT])][0]))
1199
1200 elif self.op.disk_template == constants.DT_BLOCK:
1201
1202 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1203 for disk in self.disks])
1204 if len(all_disks) != len(self.disks):
1205 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1206 errors.ECODE_INVAL)
1207 baddisks = [d for d in all_disks
1208 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1209 if baddisks:
1210 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1211 " cannot be adopted" %
1212 (utils.CommaJoin(baddisks),
1213 constants.ADOPTABLE_BLOCKDEV_ROOT),
1214 errors.ECODE_INVAL)
1215
1216 node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1217 list(all_disks))[pnode.uuid]
1218 node_disks.Raise("Cannot get block device information from node %s" %
1219 pnode.name, prereq=True)
1220 node_disks = node_disks.payload
1221 delta = all_disks.difference(node_disks.keys())
1222 if delta:
1223 raise errors.OpPrereqError("Missing block device(s): %s" %
1224 utils.CommaJoin(delta),
1225 errors.ECODE_INVAL)
1226 for dsk in self.disks:
1227 dsk[constants.IDISK_SIZE] = \
1228 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1229
1230
1231 node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1232 node_group = self.cfg.GetNodeGroup(node_info.group)
1233 disk_params = self.cfg.GetGroupDiskParams(node_group)
1234 access_type = disk_params[self.op.disk_template].get(
1235 constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1236 )
1237
1238 if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1239 self.op.disk_template,
1240 access_type):
1241 raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1242 " used with %s disk access param" %
1243 (self.op.hypervisor, access_type),
1244 errors.ECODE_STATE)
1245
1246
1247 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1248 ispec = {
1249 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1250 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1251 constants.ISPEC_DISK_COUNT: len(self.disks),
1252 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1253 for disk in self.disks],
1254 constants.ISPEC_NIC_COUNT: len(self.nics),
1255 constants.ISPEC_SPINDLE_USE: spindle_use,
1256 }
1257
1258 group_info = self.cfg.GetNodeGroup(pnode.group)
1259 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1260 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1261 self.op.disk_template)
1262 if not self.op.ignore_ipolicy and res:
1263 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1264 (pnode.group, group_info.name, utils.CommaJoin(res)))
1265 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1266
1267 CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1268
1269 CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1270
1271 CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1272
1273 CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1274
1275
1276
1277
1278
1279
1280 if self.op.start:
1281 hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1282 self.op.hvparams)
1283 CheckNodeFreeMemory(self, self.pnode.uuid,
1284 "creating instance %s" % self.op.instance_name,
1285 self.be_full[constants.BE_MAXMEM],
1286 self.op.hypervisor, hvfull)
1287
1288 self.dry_run_result = list(node_uuids)
1289
1290 - def Exec(self, feedback_fn):
1291 """Create and add the instance to the cluster.
1292
1293 """
1294 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1295 self.owned_locks(locking.LEVEL_NODE)), \
1296 "Node locks differ from node resource locks"
1297 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1298
1299 ht_kind = self.op.hypervisor
1300 if ht_kind in constants.HTS_REQ_PORT:
1301 network_port = self.cfg.AllocatePort()
1302 else:
1303 network_port = None
1304
1305 instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1306
1307
1308
1309
1310 nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1311 disks = GenerateDiskTemplate(self,
1312 self.op.disk_template,
1313 instance_uuid, self.pnode.uuid,
1314 self.secondaries,
1315 self.disks,
1316 self.instance_file_storage_dir,
1317 self.op.file_driver,
1318 0,
1319 feedback_fn,
1320 self.cfg.GetGroupDiskParams(nodegroup))
1321
1322 iobj = objects.Instance(name=self.op.instance_name,
1323 uuid=instance_uuid,
1324 os=self.op.os_type,
1325 primary_node=self.pnode.uuid,
1326 nics=self.nics, disks=disks,
1327 disk_template=self.op.disk_template,
1328 disks_active=False,
1329 admin_state=constants.ADMINST_DOWN,
1330 network_port=network_port,
1331 beparams=self.op.beparams,
1332 hvparams=self.op.hvparams,
1333 hypervisor=self.op.hypervisor,
1334 osparams=self.op.osparams,
1335 )
1336
1337 if self.op.tags:
1338 for tag in self.op.tags:
1339 iobj.AddTag(tag)
1340
1341 if self.adopt_disks:
1342 if self.op.disk_template == constants.DT_PLAIN:
1343
1344
1345 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1346 rename_to = []
1347 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1348 rename_to.append(t_dsk.logical_id)
1349 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1350 result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1351 zip(tmp_disks, rename_to))
1352 result.Raise("Failed to rename adoped LVs")
1353 else:
1354 feedback_fn("* creating instance disks...")
1355 try:
1356 CreateDisks(self, iobj)
1357 except errors.OpExecError:
1358 self.LogWarning("Device creation failed")
1359 self.cfg.ReleaseDRBDMinors(instance_uuid)
1360 raise
1361
1362 feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1363
1364 self.cfg.AddInstance(iobj, self.proc.GetECId())
1365
1366
1367
1368 del self.remove_locks[locking.LEVEL_INSTANCE]
1369
1370 if self.op.mode == constants.INSTANCE_IMPORT:
1371
1372 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1373 else:
1374
1375 ReleaseLocks(self, locking.LEVEL_NODE)
1376
1377 disk_abort = False
1378 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1379 feedback_fn("* wiping instance disks...")
1380 try:
1381 WipeDisks(self, iobj)
1382 except errors.OpExecError, err:
1383 logging.exception("Wiping disks failed")
1384 self.LogWarning("Wiping instance disks failed (%s)", err)
1385 disk_abort = True
1386
1387 if disk_abort:
1388
1389 pass
1390 elif self.op.wait_for_sync:
1391 disk_abort = not WaitForSync(self, iobj)
1392 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1393
1394 feedback_fn("* checking mirrors status")
1395 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1396 else:
1397 disk_abort = False
1398
1399 if disk_abort:
1400 RemoveDisks(self, iobj)
1401 self.cfg.RemoveInstance(iobj.uuid)
1402
1403 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1404 raise errors.OpExecError("There are some degraded disks for"
1405 " this instance")
1406
1407
1408 iobj.disks_active = True
1409
1410
1411 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1412
1413 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1414 if self.op.mode == constants.INSTANCE_CREATE:
1415 if not self.op.no_install:
1416 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1417 not self.op.wait_for_sync)
1418 if pause_sync:
1419 feedback_fn("* pausing disk sync to install instance OS")
1420 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1421 (iobj.disks,
1422 iobj), True)
1423 for idx, success in enumerate(result.payload):
1424 if not success:
1425 logging.warn("pause-sync of instance %s for disk %d failed",
1426 self.op.instance_name, idx)
1427
1428 feedback_fn("* running the instance OS create scripts...")
1429
1430 os_add_result = \
1431 self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1432 self.op.debug_level)
1433 if pause_sync:
1434 feedback_fn("* resuming disk sync")
1435 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1436 (iobj.disks,
1437 iobj), False)
1438 for idx, success in enumerate(result.payload):
1439 if not success:
1440 logging.warn("resume-sync of instance %s for disk %d failed",
1441 self.op.instance_name, idx)
1442
1443 os_add_result.Raise("Could not add os for instance %s"
1444 " on node %s" % (self.op.instance_name,
1445 self.pnode.name))
1446
1447 else:
1448 if self.op.mode == constants.INSTANCE_IMPORT:
1449 feedback_fn("* running the instance OS import scripts...")
1450
1451 transfers = []
1452
1453 for idx, image in enumerate(self.src_images):
1454 if not image:
1455 continue
1456
1457
1458 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1459 constants.IEIO_FILE, (image, ),
1460 constants.IEIO_SCRIPT,
1461 ((iobj.disks[idx], iobj), idx),
1462 None)
1463 transfers.append(dt)
1464
1465 import_result = \
1466 masterd.instance.TransferInstanceData(self, feedback_fn,
1467 self.op.src_node_uuid,
1468 self.pnode.uuid,
1469 self.pnode.secondary_ip,
1470 iobj, transfers)
1471 if not compat.all(import_result):
1472 self.LogWarning("Some disks for instance %s on node %s were not"
1473 " imported successfully" % (self.op.instance_name,
1474 self.pnode.name))
1475
1476 rename_from = self._old_instance_name
1477
1478 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1479 feedback_fn("* preparing remote import...")
1480
1481
1482
1483
1484 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1485 self.op.source_shutdown_timeout)
1486 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1487
1488 assert iobj.primary_node == self.pnode.uuid
1489 disk_results = \
1490 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1491 self.source_x509_ca,
1492 self._cds, timeouts)
1493 if not compat.all(disk_results):
1494
1495
1496 self.LogWarning("Some disks for instance %s on node %s were not"
1497 " imported successfully" % (self.op.instance_name,
1498 self.pnode.name))
1499
1500 rename_from = self.source_instance_name
1501
1502 else:
1503
1504 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1505 % self.op.mode)
1506
1507
1508 assert iobj.name == self.op.instance_name
1509 feedback_fn("Running rename script for %s" % self.op.instance_name)
1510 result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1511 rename_from,
1512 self.op.debug_level)
1513 result.Warn("Failed to run rename script for %s on node %s" %
1514 (self.op.instance_name, self.pnode.name), self.LogWarning)
1515
1516 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1517
1518 if self.op.start:
1519 iobj.admin_state = constants.ADMINST_UP
1520 self.cfg.Update(iobj, feedback_fn)
1521 logging.info("Starting instance %s on node %s", self.op.instance_name,
1522 self.pnode.name)
1523 feedback_fn("* starting instance...")
1524 result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1525 False, self.op.reason)
1526 result.Raise("Could not start instance")
1527
1528 return self.cfg.GetNodeNames(list(iobj.all_nodes))
1529
1532 """Rename an instance.
1533
1534 """
1535 HPATH = "instance-rename"
1536 HTYPE = constants.HTYPE_INSTANCE
1537
1539 """Check arguments.
1540
1541 """
1542 if self.op.ip_check and not self.op.name_check:
1543
1544 raise errors.OpPrereqError("IP address check requires a name check",
1545 errors.ECODE_INVAL)
1546
1548 """Build hooks env.
1549
1550 This runs on master, primary and secondary nodes of the instance.
1551
1552 """
1553 env = BuildInstanceHookEnvByObject(self, self.instance)
1554 env["INSTANCE_NEW_NAME"] = self.op.new_name
1555 return env
1556
1563
1565 """Check prerequisites.
1566
1567 This checks that the instance is in the cluster and is not running.
1568
1569 """
1570 (self.op.instance_uuid, self.op.instance_name) = \
1571 ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1572 self.op.instance_name)
1573 instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1574 assert instance is not None
1575
1576
1577
1578
1579 if (instance.disk_template in constants.DTS_FILEBASED and
1580 self.op.new_name != instance.name):
1581 CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1582 instance.disk_template)
1583
1584 CheckNodeOnline(self, instance.primary_node)
1585 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1586 msg="cannot rename")
1587 self.instance = instance
1588
1589 new_name = self.op.new_name
1590 if self.op.name_check:
1591 hostname = _CheckHostnameSane(self, new_name)
1592 new_name = self.op.new_name = hostname.name
1593 if (self.op.ip_check and
1594 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1595 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1596 (hostname.ip, new_name),
1597 errors.ECODE_NOTUNIQUE)
1598
1599 instance_names = [inst.name for
1600 inst in self.cfg.GetAllInstancesInfo().values()]
1601 if new_name in instance_names and new_name != instance.name:
1602 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1603 new_name, errors.ECODE_EXISTS)
1604
1605 - def Exec(self, feedback_fn):
1606 """Rename the instance.
1607
1608 """
1609 old_name = self.instance.name
1610
1611 rename_file_storage = False
1612 if (self.instance.disk_template in constants.DTS_FILEBASED and
1613 self.op.new_name != self.instance.name):
1614 old_file_storage_dir = os.path.dirname(
1615 self.instance.disks[0].logical_id[1])
1616 rename_file_storage = True
1617
1618 self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1619
1620
1621 assert self.REQ_BGL
1622 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1623 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1624 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1625
1626
1627 renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1628
1629 if rename_file_storage:
1630 new_file_storage_dir = os.path.dirname(
1631 renamed_inst.disks[0].logical_id[1])
1632 result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1633 old_file_storage_dir,
1634 new_file_storage_dir)
1635 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1636 " (but the instance has been renamed in Ganeti)" %
1637 (self.cfg.GetNodeName(renamed_inst.primary_node),
1638 old_file_storage_dir, new_file_storage_dir))
1639
1640 StartInstanceDisks(self, renamed_inst, None)
1641
1642 info = GetInstanceInfoText(renamed_inst)
1643 for (idx, disk) in enumerate(renamed_inst.disks):
1644 for node_uuid in renamed_inst.all_nodes:
1645 result = self.rpc.call_blockdev_setinfo(node_uuid,
1646 (disk, renamed_inst), info)
1647 result.Warn("Error setting info on node %s for disk %s" %
1648 (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1649 try:
1650 result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1651 renamed_inst, old_name,
1652 self.op.debug_level)
1653 result.Warn("Could not run OS rename script for instance %s on node %s"
1654 " (but the instance has been renamed in Ganeti)" %
1655 (renamed_inst.name,
1656 self.cfg.GetNodeName(renamed_inst.primary_node)),
1657 self.LogWarning)
1658 finally:
1659 ShutdownInstanceDisks(self, renamed_inst)
1660
1661 return renamed_inst.name
1662
1665 """Remove an instance.
1666
1667 """
1668 HPATH = "instance-remove"
1669 HTYPE = constants.HTYPE_INSTANCE
1670 REQ_BGL = False
1671
1677
1685
1687 """Build hooks env.
1688
1689 This runs on master, primary and secondary nodes of the instance.
1690
1691 """
1692 env = BuildInstanceHookEnvByObject(self, self.instance)
1693 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1694 return env
1695
1697 """Build hooks nodes.
1698
1699 """
1700 nl = [self.cfg.GetMasterNode()]
1701 nl_post = list(self.instance.all_nodes) + nl
1702 return (nl, nl_post)
1703
1705 """Check prerequisites.
1706
1707 This checks that the instance is in the cluster.
1708
1709 """
1710 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1711 assert self.instance is not None, \
1712 "Cannot retrieve locked instance %s" % self.op.instance_name
1713
1714 - def Exec(self, feedback_fn):
1715 """Remove the instance.
1716
1717 """
1718 logging.info("Shutting down instance %s on node %s", self.instance.name,
1719 self.cfg.GetNodeName(self.instance.primary_node))
1720
1721 result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1722 self.instance,
1723 self.op.shutdown_timeout,
1724 self.op.reason)
1725 if self.op.ignore_failures:
1726 result.Warn("Warning: can't shutdown instance", feedback_fn)
1727 else:
1728 result.Raise("Could not shutdown instance %s on node %s" %
1729 (self.instance.name,
1730 self.cfg.GetNodeName(self.instance.primary_node)))
1731
1732 assert (self.owned_locks(locking.LEVEL_NODE) ==
1733 self.owned_locks(locking.LEVEL_NODE_RES))
1734 assert not (set(self.instance.all_nodes) -
1735 self.owned_locks(locking.LEVEL_NODE)), \
1736 "Not owning correct locks"
1737
1738 RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1739
1742 """Move an instance by data-copying.
1743
1744 """
1745 HPATH = "instance-move"
1746 HTYPE = constants.HTYPE_INSTANCE
1747 REQ_BGL = False
1748
1757
1765
1767 """Build hooks env.
1768
1769 This runs on master, primary and secondary nodes of the instance.
1770
1771 """
1772 env = {
1773 "TARGET_NODE": self.op.target_node,
1774 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1775 }
1776 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1777 return env
1778
1780 """Build hooks nodes.
1781
1782 """
1783 nl = [
1784 self.cfg.GetMasterNode(),
1785 self.instance.primary_node,
1786 self.op.target_node_uuid,
1787 ]
1788 return (nl, nl)
1789
1791 """Check prerequisites.
1792
1793 This checks that the instance is in the cluster.
1794
1795 """
1796 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1797 assert self.instance is not None, \
1798 "Cannot retrieve locked instance %s" % self.op.instance_name
1799
1800 if self.instance.disk_template not in constants.DTS_COPYABLE:
1801 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1802 self.instance.disk_template,
1803 errors.ECODE_STATE)
1804
1805 target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1806 assert target_node is not None, \
1807 "Cannot retrieve locked node %s" % self.op.target_node
1808
1809 self.target_node_uuid = target_node.uuid
1810 if target_node.uuid == self.instance.primary_node:
1811 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1812 (self.instance.name, target_node.name),
1813 errors.ECODE_STATE)
1814
1815 bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1816
1817 for idx, dsk in enumerate(self.instance.disks):
1818 if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1819 constants.DT_SHARED_FILE):
1820 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1821 " cannot copy" % idx, errors.ECODE_STATE)
1822
1823 CheckNodeOnline(self, target_node.uuid)
1824 CheckNodeNotDrained(self, target_node.uuid)
1825 CheckNodeVmCapable(self, target_node.uuid)
1826 cluster = self.cfg.GetClusterInfo()
1827 group_info = self.cfg.GetNodeGroup(target_node.group)
1828 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1829 CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1830 ignore=self.op.ignore_ipolicy)
1831
1832 if self.instance.admin_state == constants.ADMINST_UP:
1833
1834 CheckNodeFreeMemory(
1835 self, target_node.uuid, "failing over instance %s" %
1836 self.instance.name, bep[constants.BE_MAXMEM],
1837 self.instance.hypervisor,
1838 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1839 else:
1840 self.LogInfo("Not checking memory on the secondary node as"
1841 " instance will not be started")
1842
1843
1844 CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1845
1846 - def Exec(self, feedback_fn):
1847 """Move an instance.
1848
1849 The move is done by shutting it down on its present node, copying
1850 the data over (slow) and starting it on the new node.
1851
1852 """
1853 source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1854 target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1855
1856 self.LogInfo("Shutting down instance %s on source node %s",
1857 self.instance.name, source_node.name)
1858
1859 assert (self.owned_locks(locking.LEVEL_NODE) ==
1860 self.owned_locks(locking.LEVEL_NODE_RES))
1861
1862 result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1863 self.op.shutdown_timeout,
1864 self.op.reason)
1865 if self.op.ignore_consistency:
1866 result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1867 " anyway. Please make sure node %s is down. Error details" %
1868 (self.instance.name, source_node.name, source_node.name),
1869 self.LogWarning)
1870 else:
1871 result.Raise("Could not shutdown instance %s on node %s" %
1872 (self.instance.name, source_node.name))
1873
1874
1875 try:
1876 CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1877 except errors.OpExecError:
1878 self.LogWarning("Device creation failed")
1879 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1880 raise
1881
1882 cluster_name = self.cfg.GetClusterInfo().cluster_name
1883
1884 errs = []
1885
1886 for idx, disk in enumerate(self.instance.disks):
1887 self.LogInfo("Copying data for disk %d", idx)
1888 result = self.rpc.call_blockdev_assemble(
1889 target_node.uuid, (disk, self.instance), self.instance.name,
1890 True, idx)
1891 if result.fail_msg:
1892 self.LogWarning("Can't assemble newly created disk %d: %s",
1893 idx, result.fail_msg)
1894 errs.append(result.fail_msg)
1895 break
1896 dev_path, _, __ = result.payload
1897 result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1898 self.instance),
1899 target_node.secondary_ip,
1900 dev_path, cluster_name)
1901 if result.fail_msg:
1902 self.LogWarning("Can't copy data over for disk %d: %s",
1903 idx, result.fail_msg)
1904 errs.append(result.fail_msg)
1905 break
1906
1907 if errs:
1908 self.LogWarning("Some disks failed to copy, aborting")
1909 try:
1910 RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1911 finally:
1912 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1913 raise errors.OpExecError("Errors during disk copy: %s" %
1914 (",".join(errs),))
1915
1916 self.instance.primary_node = target_node.uuid
1917 self.cfg.Update(self.instance, feedback_fn)
1918
1919 self.LogInfo("Removing the disks on the original node")
1920 RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1921
1922
1923 if self.instance.admin_state == constants.ADMINST_UP:
1924 self.LogInfo("Starting instance %s on node %s",
1925 self.instance.name, target_node.name)
1926
1927 disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1928 ignore_secondaries=True)
1929 if not disks_ok:
1930 ShutdownInstanceDisks(self, self.instance)
1931 raise errors.OpExecError("Can't activate the instance's disks")
1932
1933 result = self.rpc.call_instance_start(target_node.uuid,
1934 (self.instance, None, None), False,
1935 self.op.reason)
1936 msg = result.fail_msg
1937 if msg:
1938 ShutdownInstanceDisks(self, self.instance)
1939 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1940 (self.instance.name, target_node.name, msg))
1941
1944 """Allocates multiple instances at the same time.
1945
1946 """
1947 REQ_BGL = False
1948
1950 """Check arguments.
1951
1952 """
1953 nodes = []
1954 for inst in self.op.instances:
1955 if inst.iallocator is not None:
1956 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1957 " instance objects", errors.ECODE_INVAL)
1958 nodes.append(bool(inst.pnode))
1959 if inst.disk_template in constants.DTS_INT_MIRROR:
1960 nodes.append(bool(inst.snode))
1961
1962 has_nodes = compat.any(nodes)
1963 if compat.all(nodes) ^ has_nodes:
1964 raise errors.OpPrereqError("There are instance objects providing"
1965 " pnode/snode while others do not",
1966 errors.ECODE_INVAL)
1967
1968 if not has_nodes and self.op.iallocator is None:
1969 default_iallocator = self.cfg.GetDefaultIAllocator()
1970 if default_iallocator:
1971 self.op.iallocator = default_iallocator
1972 else:
1973 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1974 " given and no cluster-wide default"
1975 " iallocator found; please specify either"
1976 " an iallocator or nodes on the instances"
1977 " or set a cluster-wide default iallocator",
1978 errors.ECODE_INVAL)
1979
1980 _CheckOpportunisticLocking(self.op)
1981
1982 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1983 if dups:
1984 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1985 utils.CommaJoin(dups), errors.ECODE_INVAL)
1986
1988 """Calculate the locks.
1989
1990 """
1991 self.share_locks = ShareAll()
1992 self.needed_locks = {
1993
1994
1995 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1996 }
1997
1998 if self.op.iallocator:
1999 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
2000 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
2001
2002 if self.op.opportunistic_locking:
2003 self.opportunistic_locks[locking.LEVEL_NODE] = True
2004 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
2005 else:
2006 nodeslist = []
2007 for inst in self.op.instances:
2008 (inst.pnode_uuid, inst.pnode) = \
2009 ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
2010 nodeslist.append(inst.pnode_uuid)
2011 if inst.snode is not None:
2012 (inst.snode_uuid, inst.snode) = \
2013 ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
2014 nodeslist.append(inst.snode_uuid)
2015
2016 self.needed_locks[locking.LEVEL_NODE] = nodeslist
2017
2018
2019 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
2020
2022 """Check prerequisite.
2023
2024 """
2025 if self.op.iallocator:
2026 cluster = self.cfg.GetClusterInfo()
2027 default_vg = self.cfg.GetVGName()
2028 ec_id = self.proc.GetECId()
2029
2030 if self.op.opportunistic_locking:
2031
2032 node_whitelist = self.cfg.GetNodeNames(
2033 set(self.owned_locks(locking.LEVEL_NODE)) &
2034 set(self.owned_locks(locking.LEVEL_NODE_RES)))
2035 else:
2036 node_whitelist = None
2037
2038 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
2039 _ComputeNics(op, cluster, None,
2040 self.cfg, ec_id),
2041 _ComputeFullBeParams(op, cluster),
2042 node_whitelist)
2043 for op in self.op.instances]
2044
2045 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2046 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2047
2048 ial.Run(self.op.iallocator)
2049
2050 if not ial.success:
2051 raise errors.OpPrereqError("Can't compute nodes using"
2052 " iallocator '%s': %s" %
2053 (self.op.iallocator, ial.info),
2054 errors.ECODE_NORES)
2055
2056 self.ia_result = ial.result
2057
2058 if self.op.dry_run:
2059 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2060 constants.JOB_IDS_KEY: [],
2061 })
2062
2064 """Contructs the partial result.
2065
2066 """
2067 if self.op.iallocator:
2068 (allocatable, failed_insts) = self.ia_result
2069 allocatable_insts = map(compat.fst, allocatable)
2070 else:
2071 allocatable_insts = [op.instance_name for op in self.op.instances]
2072 failed_insts = []
2073
2074 return {
2075 constants.ALLOCATABLE_KEY: allocatable_insts,
2076 constants.FAILED_KEY: failed_insts,
2077 }
2078
2079 - def Exec(self, feedback_fn):
2080 """Executes the opcode.
2081
2082 """
2083 jobs = []
2084 if self.op.iallocator:
2085 op2inst = dict((op.instance_name, op) for op in self.op.instances)
2086 (allocatable, failed) = self.ia_result
2087
2088 for (name, node_names) in allocatable:
2089 op = op2inst.pop(name)
2090
2091 (op.pnode_uuid, op.pnode) = \
2092 ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2093 if len(node_names) > 1:
2094 (op.snode_uuid, op.snode) = \
2095 ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2096
2097 jobs.append([op])
2098
2099 missing = set(op2inst.keys()) - set(failed)
2100 assert not missing, \
2101 "Iallocator did return incomplete result: %s" % \
2102 utils.CommaJoin(missing)
2103 else:
2104 jobs.extend([op] for op in self.op.instances)
2105
2106 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2107
2110 """Data structure for network interface modifications.
2111
2112 Used by L{LUInstanceSetParams}.
2113
2114 """
2116 self.params = None
2117 self.filled = None
2118
2121 """Prepares a list of container modifications by adding a private data field.
2122
2123 @type mods: list of tuples; (operation, index, parameters)
2124 @param mods: List of modifications
2125 @type private_fn: callable or None
2126 @param private_fn: Callable for constructing a private data field for a
2127 modification
2128 @rtype: list
2129
2130 """
2131 if private_fn is None:
2132 fn = lambda: None
2133 else:
2134 fn = private_fn
2135
2136 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2137
2140 """Checks if nodes have enough physical CPUs
2141
2142 This function checks if all given nodes have the needed number of
2143 physical CPUs. In case any node has less CPUs or we cannot get the
2144 information from the node, this function raises an OpPrereqError
2145 exception.
2146
2147 @type lu: C{LogicalUnit}
2148 @param lu: a logical unit from which we get configuration data
2149 @type node_uuids: C{list}
2150 @param node_uuids: the list of node UUIDs to check
2151 @type requested: C{int}
2152 @param requested: the minimum acceptable number of physical CPUs
2153 @type hypervisor_specs: list of pairs (string, dict of strings)
2154 @param hypervisor_specs: list of hypervisor specifications in
2155 pairs (hypervisor_name, hvparams)
2156 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2157 or we cannot check the node
2158
2159 """
2160 nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2161 for node_uuid in node_uuids:
2162 info = nodeinfo[node_uuid]
2163 node_name = lu.cfg.GetNodeName(node_uuid)
2164 info.Raise("Cannot get current information from node %s" % node_name,
2165 prereq=True, ecode=errors.ECODE_ENVIRON)
2166 (_, _, (hv_info, )) = info.payload
2167 num_cpus = hv_info.get("cpu_total", None)
2168 if not isinstance(num_cpus, int):
2169 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2170 " on node %s, result was '%s'" %
2171 (node_name, num_cpus), errors.ECODE_ENVIRON)
2172 if requested > num_cpus:
2173 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2174 "required" % (node_name, num_cpus, requested),
2175 errors.ECODE_NORES)
2176
2179 """Return the item refered by the identifier.
2180
2181 @type identifier: string
2182 @param identifier: Item index or name or UUID
2183 @type kind: string
2184 @param kind: One-word item description
2185 @type container: list
2186 @param container: Container to get the item from
2187
2188 """
2189
2190 try:
2191 idx = int(identifier)
2192 if idx == -1:
2193
2194 absidx = len(container) - 1
2195 elif idx < 0:
2196 raise IndexError("Not accepting negative indices other than -1")
2197 elif idx > len(container):
2198 raise IndexError("Got %s index %s, but there are only %s" %
2199 (kind, idx, len(container)))
2200 else:
2201 absidx = idx
2202 return (absidx, container[idx])
2203 except ValueError:
2204 pass
2205
2206 for idx, item in enumerate(container):
2207 if item.uuid == identifier or item.name == identifier:
2208 return (idx, item)
2209
2210 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2211 (kind, identifier), errors.ECODE_NOENT)
2212
2213
2214 -def _ApplyContainerMods(kind, container, chgdesc, mods,
2215 create_fn, modify_fn, remove_fn,
2216 post_add_fn=None):
2217 """Applies descriptions in C{mods} to C{container}.
2218
2219 @type kind: string
2220 @param kind: One-word item description
2221 @type container: list
2222 @param container: Container to modify
2223 @type chgdesc: None or list
2224 @param chgdesc: List of applied changes
2225 @type mods: list
2226 @param mods: Modifications as returned by L{_PrepareContainerMods}
2227 @type create_fn: callable
2228 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2229 receives absolute item index, parameters and private data object as added
2230 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2231 as list
2232 @type modify_fn: callable
2233 @param modify_fn: Callback for modifying an existing item
2234 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2235 and private data object as added by L{_PrepareContainerMods}, returns
2236 changes as list
2237 @type remove_fn: callable
2238 @param remove_fn: Callback on removing item; receives absolute item index,
2239 item and private data object as added by L{_PrepareContainerMods}
2240 @type post_add_fn: callable
2241 @param post_add_fn: Callable for post-processing a newly created item after
2242 it has been put into the container. It receives the index of the new item
2243 and the new item as parameters.
2244
2245 """
2246 for (op, identifier, params, private) in mods:
2247 changes = None
2248
2249 if op == constants.DDM_ADD:
2250
2251
2252 try:
2253 idx = int(identifier)
2254 except ValueError:
2255 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2256 " identifier for %s" % constants.DDM_ADD,
2257 errors.ECODE_INVAL)
2258 if idx == -1:
2259 addidx = len(container)
2260 else:
2261 if idx < 0:
2262 raise IndexError("Not accepting negative indices other than -1")
2263 elif idx > len(container):
2264 raise IndexError("Got %s index %s, but there are only %s" %
2265 (kind, idx, len(container)))
2266 addidx = idx
2267
2268 if create_fn is None:
2269 item = params
2270 else:
2271 (item, changes) = create_fn(addidx, params, private)
2272
2273 if idx == -1:
2274 container.append(item)
2275 else:
2276 assert idx >= 0
2277 assert idx <= len(container)
2278
2279 container.insert(idx, item)
2280
2281 if post_add_fn is not None:
2282 post_add_fn(addidx, item)
2283
2284 else:
2285
2286 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2287
2288 if op == constants.DDM_REMOVE:
2289 assert not params
2290
2291 changes = [("%s/%s" % (kind, absidx), "remove")]
2292
2293 if remove_fn is not None:
2294 msg = remove_fn(absidx, item, private)
2295 if msg:
2296 changes.append(("%s/%s" % (kind, absidx), msg))
2297
2298 assert container[absidx] == item
2299 del container[absidx]
2300 elif op == constants.DDM_MODIFY:
2301 if modify_fn is not None:
2302 changes = modify_fn(absidx, item, params, private)
2303 else:
2304 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2305
2306 assert _TApplyContModsCbChanges(changes)
2307
2308 if not (chgdesc is None or changes is None):
2309 chgdesc.extend(changes)
2310
2313 """Updates the C{iv_name} attribute of disks.
2314
2315 @type disks: list of L{objects.Disk}
2316
2317 """
2318 for (idx, disk) in enumerate(disks):
2319 disk.iv_name = "disk/%s" % (base_index + idx, )
2320
2323 """Modifies an instances's parameters.
2324
2325 """
2326 HPATH = "instance-modify"
2327 HTYPE = constants.HTYPE_INSTANCE
2328 REQ_BGL = False
2329
2330 @staticmethod
2332 assert ht.TList(mods)
2333 assert not mods or len(mods[0]) in (2, 3)
2334
2335 if mods and len(mods[0]) == 2:
2336 result = []
2337
2338 addremove = 0
2339 for op, params in mods:
2340 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2341 result.append((op, -1, params))
2342 addremove += 1
2343
2344 if addremove > 1:
2345 raise errors.OpPrereqError("Only one %s add or remove operation is"
2346 " supported at a time" % kind,
2347 errors.ECODE_INVAL)
2348 else:
2349 result.append((constants.DDM_MODIFY, op, params))
2350
2351 assert verify_fn(result)
2352 else:
2353 result = mods
2354
2355 return result
2356
2357 @staticmethod
2379
2381 """Verifies a disk modification.
2382
2383 """
2384 if op == constants.DDM_ADD:
2385 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2386 if mode not in constants.DISK_ACCESS_SET:
2387 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2388 errors.ECODE_INVAL)
2389
2390 size = params.get(constants.IDISK_SIZE, None)
2391 if size is None:
2392 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2393 constants.IDISK_SIZE, errors.ECODE_INVAL)
2394 size = int(size)
2395
2396 params[constants.IDISK_SIZE] = size
2397 name = params.get(constants.IDISK_NAME, None)
2398 if name is not None and name.lower() == constants.VALUE_NONE:
2399 params[constants.IDISK_NAME] = None
2400
2401 CheckSpindlesExclusiveStorage(params, excl_stor, True)
2402
2403 elif op == constants.DDM_MODIFY:
2404 if constants.IDISK_SIZE in params:
2405 raise errors.OpPrereqError("Disk size change not possible, use"
2406 " grow-disk", errors.ECODE_INVAL)
2407
2408
2409
2410 if self.instance.disk_template != constants.DT_EXT:
2411 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2412
2413 name = params.get(constants.IDISK_NAME, None)
2414 if name is not None and name.lower() == constants.VALUE_NONE:
2415 params[constants.IDISK_NAME] = None
2416
2417 @staticmethod
2419 """Verifies a network interface modification.
2420
2421 """
2422 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2423 ip = params.get(constants.INIC_IP, None)
2424 name = params.get(constants.INIC_NAME, None)
2425 req_net = params.get(constants.INIC_NETWORK, None)
2426 link = params.get(constants.NIC_LINK, None)
2427 mode = params.get(constants.NIC_MODE, None)
2428 if name is not None and name.lower() == constants.VALUE_NONE:
2429 params[constants.INIC_NAME] = None
2430 if req_net is not None:
2431 if req_net.lower() == constants.VALUE_NONE:
2432 params[constants.INIC_NETWORK] = None
2433 req_net = None
2434 elif link is not None or mode is not None:
2435 raise errors.OpPrereqError("If network is given"
2436 " mode or link should not",
2437 errors.ECODE_INVAL)
2438
2439 if op == constants.DDM_ADD:
2440 macaddr = params.get(constants.INIC_MAC, None)
2441 if macaddr is None:
2442 params[constants.INIC_MAC] = constants.VALUE_AUTO
2443
2444 if ip is not None:
2445 if ip.lower() == constants.VALUE_NONE:
2446 params[constants.INIC_IP] = None
2447 else:
2448 if ip.lower() == constants.NIC_IP_POOL:
2449 if op == constants.DDM_ADD and req_net is None:
2450 raise errors.OpPrereqError("If ip=pool, parameter network"
2451 " cannot be none",
2452 errors.ECODE_INVAL)
2453 else:
2454 if not netutils.IPAddress.IsValid(ip):
2455 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2456 errors.ECODE_INVAL)
2457
2458 if constants.INIC_MAC in params:
2459 macaddr = params[constants.INIC_MAC]
2460 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2461 macaddr = utils.NormalizeAndValidateMac(macaddr)
2462
2463 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2464 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2465 " modifying an existing NIC",
2466 errors.ECODE_INVAL)
2467
2469 if not (self.op.nics or self.op.disks or self.op.disk_template or
2470 self.op.hvparams or self.op.beparams or self.op.os_name or
2471 self.op.osparams or self.op.offline is not None or
2472 self.op.runtime_mem or self.op.pnode):
2473 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2474
2475 if self.op.hvparams:
2476 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2477 "hypervisor", "instance", "cluster")
2478
2479 self.op.disks = self._UpgradeDiskNicMods(
2480 "disk", self.op.disks, ht.TSetParamsMods(ht.TIDiskParams))
2481 self.op.nics = self._UpgradeDiskNicMods(
2482 "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
2483
2484 if self.op.disks and self.op.disk_template is not None:
2485 raise errors.OpPrereqError("Disk template conversion and other disk"
2486 " changes not supported at the same time",
2487 errors.ECODE_INVAL)
2488
2489 if (self.op.disk_template and
2490 self.op.disk_template in constants.DTS_INT_MIRROR and
2491 self.op.remote_node is None):
2492 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2493 " one requires specifying a secondary node",
2494 errors.ECODE_INVAL)
2495
2496
2497 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2498 self._VerifyNicModification)
2499
2500 if self.op.pnode:
2501 (self.op.pnode_uuid, self.op.pnode) = \
2502 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2503
2514
2516 if level == locking.LEVEL_NODEGROUP:
2517 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2518
2519
2520 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2521 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2522 elif level == locking.LEVEL_NODE:
2523 self._LockInstancesNodes()
2524 if self.op.disk_template and self.op.remote_node:
2525 (self.op.remote_node_uuid, self.op.remote_node) = \
2526 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2527 self.op.remote_node)
2528 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2529 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2530
2531 self.needed_locks[locking.LEVEL_NODE_RES] = \
2532 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2533
2568
2575
2578
2579 update_params_dict = dict([(key, params[key])
2580 for key in constants.NICS_PARAMETERS
2581 if key in params])
2582
2583 req_link = update_params_dict.get(constants.NIC_LINK, None)
2584 req_mode = update_params_dict.get(constants.NIC_MODE, None)
2585
2586 new_net_uuid = None
2587 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
2588 if new_net_uuid_or_name:
2589 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
2590 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
2591
2592 if old_net_uuid:
2593 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
2594
2595 if new_net_uuid:
2596 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
2597 if not netparams:
2598 raise errors.OpPrereqError("No netparams found for the network"
2599 " %s, probably not connected" %
2600 new_net_obj.name, errors.ECODE_INVAL)
2601 new_params = dict(netparams)
2602 else:
2603 new_params = GetUpdatedParams(old_params, update_params_dict)
2604
2605 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
2606
2607 new_filled_params = cluster.SimpleFillNIC(new_params)
2608 objects.NIC.CheckParameterSyntax(new_filled_params)
2609
2610 new_mode = new_filled_params[constants.NIC_MODE]
2611 if new_mode == constants.NIC_MODE_BRIDGED:
2612 bridge = new_filled_params[constants.NIC_LINK]
2613 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
2614 if msg:
2615 msg = "Error checking bridges on node '%s': %s" % \
2616 (self.cfg.GetNodeName(pnode_uuid), msg)
2617 if self.op.force:
2618 self.warn.append(msg)
2619 else:
2620 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
2621
2622 elif new_mode == constants.NIC_MODE_ROUTED:
2623 ip = params.get(constants.INIC_IP, old_ip)
2624 if ip is None:
2625 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
2626 " on a routed NIC", errors.ECODE_INVAL)
2627
2628 elif new_mode == constants.NIC_MODE_OVS:
2629
2630 self.LogInfo("OVS links are currently not checked for correctness")
2631
2632 if constants.INIC_MAC in params:
2633 mac = params[constants.INIC_MAC]
2634 if mac is None:
2635 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
2636 errors.ECODE_INVAL)
2637 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2638
2639 params[constants.INIC_MAC] = \
2640 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2641 else:
2642
2643 try:
2644 self.cfg.ReserveMAC(mac, self.proc.GetECId())
2645 except errors.ReservationError:
2646 raise errors.OpPrereqError("MAC address '%s' already in use"
2647 " in cluster" % mac,
2648 errors.ECODE_NOTUNIQUE)
2649 elif new_net_uuid != old_net_uuid:
2650
2651 def get_net_prefix(net_uuid):
2652 mac_prefix = None
2653 if net_uuid:
2654 nobj = self.cfg.GetNetwork(net_uuid)
2655 mac_prefix = nobj.mac_prefix
2656
2657 return mac_prefix
2658
2659 new_prefix = get_net_prefix(new_net_uuid)
2660 old_prefix = get_net_prefix(old_net_uuid)
2661 if old_prefix != new_prefix:
2662 params[constants.INIC_MAC] = \
2663 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
2664
2665
2666 new_ip = params.get(constants.INIC_IP, old_ip)
2667 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
2668 if new_ip:
2669
2670 if new_ip.lower() == constants.NIC_IP_POOL:
2671 if new_net_uuid:
2672 try:
2673 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
2674 except errors.ReservationError:
2675 raise errors.OpPrereqError("Unable to get a free IP"
2676 " from the address pool",
2677 errors.ECODE_STATE)
2678 self.LogInfo("Chose IP %s from network %s",
2679 new_ip,
2680 new_net_obj.name)
2681 params[constants.INIC_IP] = new_ip
2682 else:
2683 raise errors.OpPrereqError("ip=pool, but no network found",
2684 errors.ECODE_INVAL)
2685
2686 elif new_net_uuid:
2687 try:
2688 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
2689 check=self.op.conflicts_check)
2690 self.LogInfo("Reserving IP %s in network %s",
2691 new_ip, new_net_obj.name)
2692 except errors.ReservationError:
2693 raise errors.OpPrereqError("IP %s not available in network %s" %
2694 (new_ip, new_net_obj.name),
2695 errors.ECODE_NOTUNIQUE)
2696
2697 elif self.op.conflicts_check:
2698 _CheckForConflictingIp(self, new_ip, pnode_uuid)
2699
2700
2701 if old_ip and old_net_uuid:
2702 try:
2703 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
2704 except errors.AddressPoolError:
2705 logging.warning("Release IP %s not contained in network %s",
2706 old_ip, old_net_obj.name)
2707
2708
2709 elif (old_net_uuid is not None and
2710 (req_link is not None or req_mode is not None)):
2711 raise errors.OpPrereqError("Not allowed to change link or mode of"
2712 " a NIC that is connected to a network",
2713 errors.ECODE_INVAL)
2714
2715 private.params = new_params
2716 private.filled = new_filled_params
2717
2719 """CheckPrereq checks related to a new disk template."""
2720
2721 pnode_uuid = self.instance.primary_node
2722 if self.instance.disk_template == self.op.disk_template:
2723 raise errors.OpPrereqError("Instance already has disk template %s" %
2724 self.instance.disk_template,
2725 errors.ECODE_INVAL)
2726
2727 if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
2728 raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
2729 " cluster." % self.op.disk_template)
2730
2731 if (self.instance.disk_template,
2732 self.op.disk_template) not in self._DISK_CONVERSIONS:
2733 raise errors.OpPrereqError("Unsupported disk template conversion from"
2734 " %s to %s" % (self.instance.disk_template,
2735 self.op.disk_template),
2736 errors.ECODE_INVAL)
2737 CheckInstanceState(self, self.instance, INSTANCE_DOWN,
2738 msg="cannot change disk template")
2739 if self.op.disk_template in constants.DTS_INT_MIRROR:
2740 if self.op.remote_node_uuid == pnode_uuid:
2741 raise errors.OpPrereqError("Given new secondary node %s is the same"
2742 " as the primary node of the instance" %
2743 self.op.remote_node, errors.ECODE_STATE)
2744 CheckNodeOnline(self, self.op.remote_node_uuid)
2745 CheckNodeNotDrained(self, self.op.remote_node_uuid)
2746
2747 assert self.instance.disk_template == constants.DT_PLAIN
2748 disks = [{constants.IDISK_SIZE: d.size,
2749 constants.IDISK_VG: d.logical_id[0]}
2750 for d in self.instance.disks]
2751 required = ComputeDiskSizePerVG(self.op.disk_template, disks)
2752 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], required)
2753
2754 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
2755 snode_group = self.cfg.GetNodeGroup(snode_info.group)
2756 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
2757 snode_group)
2758 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
2759 ignore=self.op.ignore_ipolicy)
2760 if pnode_info.group != snode_info.group:
2761 self.LogWarning("The primary and secondary nodes are in two"
2762 " different node groups; the disk parameters"
2763 " from the first disk's node group will be"
2764 " used")
2765
2766 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
2767
2768 nodes = [pnode_info]
2769 if self.op.disk_template in constants.DTS_INT_MIRROR:
2770 assert snode_info
2771 nodes.append(snode_info)
2772 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
2773 if compat.any(map(has_es, nodes)):
2774 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
2775 " storage is enabled" % (self.instance.disk_template,
2776 self.op.disk_template))
2777 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
2778
2780 """CheckPrereq checks related to disk changes.
2781
2782 @type ispec: dict
2783 @param ispec: instance specs to be updated with the new disks
2784
2785 """
2786 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
2787
2788 excl_stor = compat.any(
2789 rpc.GetExclusiveStorageForNodes(self.cfg,
2790 self.instance.all_nodes).values()
2791 )
2792
2793
2794
2795 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor)
2796 if self.instance.disk_template == constants.DT_EXT:
2797 self._CheckMods("disk", self.op.disks, {}, ver_fn)
2798 else:
2799 self._CheckMods("disk", self.op.disks, constants.IDISK_PARAMS_TYPES,
2800 ver_fn)
2801
2802 self.diskmod = _PrepareContainerMods(self.op.disks, None)
2803
2804
2805 if self.instance.disk_template in constants.DT_EXT:
2806 for mod in self.diskmod:
2807 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2808 if mod[0] == constants.DDM_ADD:
2809 if ext_provider is None:
2810 raise errors.OpPrereqError("Instance template is '%s' and parameter"
2811 " '%s' missing, during disk add" %
2812 (constants.DT_EXT,
2813 constants.IDISK_PROVIDER),
2814 errors.ECODE_NOENT)
2815 elif mod[0] == constants.DDM_MODIFY:
2816 if ext_provider:
2817 raise errors.OpPrereqError("Parameter '%s' is invalid during disk"
2818 " modification" %
2819 constants.IDISK_PROVIDER,
2820 errors.ECODE_INVAL)
2821 else:
2822 for mod in self.diskmod:
2823 ext_provider = mod[2].get(constants.IDISK_PROVIDER, None)
2824 if ext_provider is not None:
2825 raise errors.OpPrereqError("Parameter '%s' is only valid for"
2826 " instances of type '%s'" %
2827 (constants.IDISK_PROVIDER,
2828 constants.DT_EXT),
2829 errors.ECODE_INVAL)
2830
2831 if not self.op.wait_for_sync and not self.instance.disks_active:
2832 for mod in self.diskmod:
2833 if mod[0] == constants.DDM_ADD:
2834 raise errors.OpPrereqError("Can't add a disk to an instance with"
2835 " deactivated disks and"
2836 " --no-wait-for-sync given.",
2837 errors.ECODE_INVAL)
2838
2839 if self.op.disks and self.instance.disk_template == constants.DT_DISKLESS:
2840 raise errors.OpPrereqError("Disk operations not supported for"
2841 " diskless instances", errors.ECODE_INVAL)
2842
2843 def _PrepareDiskMod(_, disk, params, __):
2844 disk.name = params.get(constants.IDISK_NAME, None)
2845
2846
2847 disks = copy.deepcopy(self.instance.disks)
2848 _ApplyContainerMods("disk", disks, None, self.diskmod, None,
2849 _PrepareDiskMod, None)
2850 utils.ValidateDeviceNames("disk", disks)
2851 if len(disks) > constants.MAX_DISKS:
2852 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
2853 " more" % constants.MAX_DISKS,
2854 errors.ECODE_STATE)
2855 disk_sizes = [disk.size for disk in self.instance.disks]
2856 disk_sizes.extend(params["size"] for (op, idx, params, private) in
2857 self.diskmod if op == constants.DDM_ADD)
2858 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
2859 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
2860
2861
2862 if self.op.offline is not None:
2863 if self.op.offline:
2864 msg = "can't change to offline without being down first"
2865 else:
2866 msg = "can't change to online (down) without being offline first"
2867 CheckInstanceState(self, self.instance, CAN_CHANGE_INSTANCE_OFFLINE,
2868 msg=msg)
2869
2871 """Check prerequisites.
2872
2873 This only checks the instance list against the existing names.
2874
2875 """
2876 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
2877 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
2878 self.cluster = self.cfg.GetClusterInfo()
2879 cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
2880
2881 assert self.instance is not None, \
2882 "Cannot retrieve locked instance %s" % self.op.instance_name
2883
2884 pnode_uuid = self.instance.primary_node
2885
2886 self.warn = []
2887
2888 if (self.op.pnode_uuid is not None and self.op.pnode_uuid != pnode_uuid and
2889 not self.op.force):
2890
2891 instance_info = self.rpc.call_instance_info(
2892 pnode_uuid, self.instance.name, self.instance.hypervisor,
2893 cluster_hvparams)
2894 if instance_info.fail_msg:
2895 self.warn.append("Can't get instance runtime information: %s" %
2896 instance_info.fail_msg)
2897 elif instance_info.payload:
2898 raise errors.OpPrereqError("Instance is still running on %s" %
2899 self.cfg.GetNodeName(pnode_uuid),
2900 errors.ECODE_STATE)
2901
2902 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
2903 node_uuids = list(self.instance.all_nodes)
2904 pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
2905
2906
2907 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
2908 group_info = self.cfg.GetNodeGroup(pnode_info.group)
2909
2910
2911 ispec = {}
2912
2913 if self.op.hotplug or self.op.hotplug_if_possible:
2914 result = self.rpc.call_hotplug_supported(self.instance.primary_node,
2915 self.instance)
2916 if result.fail_msg:
2917 if self.op.hotplug:
2918 result.Raise("Hotplug is not possible: %s" % result.fail_msg,
2919 prereq=True)
2920 else:
2921 self.LogWarning(result.fail_msg)
2922 self.op.hotplug = False
2923 self.LogInfo("Modification will take place without hotplugging.")
2924 else:
2925 self.op.hotplug = True
2926
2927
2928 self.nicmod = _PrepareContainerMods(self.op.nics, _InstNicModPrivate)
2929
2930
2931 if self.op.os_name and not self.op.force:
2932 CheckNodeHasOS(self, self.instance.primary_node, self.op.os_name,
2933 self.op.force_variant)
2934 instance_os = self.op.os_name
2935 else:
2936 instance_os = self.instance.os
2937
2938 assert not (self.op.disk_template and self.op.disks), \
2939 "Can't modify disk template and apply disk changes at the same time"
2940
2941 if self.op.disk_template:
2942 self._PreCheckDiskTemplate(pnode_info)
2943
2944 self._PreCheckDisks(ispec)
2945
2946
2947 if self.op.hvparams:
2948 hv_type = self.instance.hypervisor
2949 i_hvdict = GetUpdatedParams(self.instance.hvparams, self.op.hvparams)
2950 utils.ForceDictType(i_hvdict, constants.HVS_PARAMETER_TYPES)
2951 hv_new = self.cluster.SimpleFillHV(hv_type, self.instance.os, i_hvdict)
2952
2953
2954 hypervisor.GetHypervisorClass(hv_type).CheckParameterSyntax(hv_new)
2955 CheckHVParams(self, node_uuids, self.instance.hypervisor, hv_new)
2956 self.hv_proposed = self.hv_new = hv_new
2957 self.hv_inst = i_hvdict
2958 else:
2959 self.hv_proposed = self.cluster.SimpleFillHV(self.instance.hypervisor,
2960 self.instance.os,
2961 self.instance.hvparams)
2962 self.hv_new = self.hv_inst = {}
2963
2964
2965 if self.op.beparams:
2966 i_bedict = GetUpdatedParams(self.instance.beparams, self.op.beparams,
2967 use_none=True)
2968 objects.UpgradeBeParams(i_bedict)
2969 utils.ForceDictType(i_bedict, constants.BES_PARAMETER_TYPES)
2970 be_new = self.cluster.SimpleFillBE(i_bedict)
2971 self.be_proposed = self.be_new = be_new
2972 self.be_inst = i_bedict
2973 else:
2974 self.be_new = self.be_inst = {}
2975 self.be_proposed = self.cluster.SimpleFillBE(self.instance.beparams)
2976 be_old = self.cluster.FillBE(self.instance)
2977
2978
2979
2980
2981 if (constants.BE_VCPUS in self.be_proposed and
2982 constants.HV_CPU_MASK in self.hv_proposed):
2983 cpu_list = \
2984 utils.ParseMultiCpuMask(self.hv_proposed[constants.HV_CPU_MASK])
2985
2986
2987
2988 if (len(cpu_list) > 1 and
2989 len(cpu_list) != self.be_proposed[constants.BE_VCPUS]):
2990 raise errors.OpPrereqError("Number of vCPUs [%d] does not match the"
2991 " CPU mask [%s]" %
2992 (self.be_proposed[constants.BE_VCPUS],
2993 self.hv_proposed[constants.HV_CPU_MASK]),
2994 errors.ECODE_INVAL)
2995
2996
2997 if constants.HV_CPU_MASK in self.hv_new:
2998
2999 max_requested_cpu = max(map(max, cpu_list))
3000
3001
3002 hvspecs = [(self.instance.hypervisor,
3003 self.cfg.GetClusterInfo()
3004 .hvparams[self.instance.hypervisor])]
3005 _CheckNodesPhysicalCPUs(self, self.instance.all_nodes,
3006 max_requested_cpu + 1,
3007 hvspecs)
3008
3009
3010 if self.op.osparams:
3011 i_osdict = GetUpdatedParams(self.instance.osparams, self.op.osparams)
3012 CheckOSParams(self, True, node_uuids, instance_os, i_osdict)
3013 self.os_inst = i_osdict
3014 else:
3015 self.os_inst = {}
3016
3017
3018 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
3019 be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
3020 mem_check_list = [pnode_uuid]
3021 if be_new[constants.BE_AUTO_BALANCE]:
3022
3023 mem_check_list.extend(self.instance.secondary_nodes)
3024 instance_info = self.rpc.call_instance_info(
3025 pnode_uuid, self.instance.name, self.instance.hypervisor,
3026 cluster_hvparams)
3027 hvspecs = [(self.instance.hypervisor,
3028 cluster_hvparams)]
3029 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
3030 hvspecs)
3031 pninfo = nodeinfo[pnode_uuid]
3032 msg = pninfo.fail_msg
3033 if msg:
3034
3035 self.warn.append("Can't get info from primary node %s: %s" %
3036 (self.cfg.GetNodeName(pnode_uuid), msg))
3037 else:
3038 (_, _, (pnhvinfo, )) = pninfo.payload
3039 if not isinstance(pnhvinfo.get("memory_free", None), int):
3040 self.warn.append("Node data from primary node %s doesn't contain"
3041 " free memory information" %
3042 self.cfg.GetNodeName(pnode_uuid))
3043 elif instance_info.fail_msg:
3044 self.warn.append("Can't get instance runtime information: %s" %
3045 instance_info.fail_msg)
3046 else:
3047 if instance_info.payload:
3048 current_mem = int(instance_info.payload["memory"])
3049 else:
3050
3051
3052
3053
3054 current_mem = 0
3055
3056 miss_mem = (be_new[constants.BE_MAXMEM] - current_mem -
3057 pnhvinfo["memory_free"])
3058 if miss_mem > 0:
3059 raise errors.OpPrereqError("This change will prevent the instance"
3060 " from starting, due to %d MB of memory"
3061 " missing on its primary node" %
3062 miss_mem, errors.ECODE_NORES)
3063
3064 if be_new[constants.BE_AUTO_BALANCE]:
3065 for node_uuid, nres in nodeinfo.items():
3066 if node_uuid not in self.instance.secondary_nodes:
3067 continue
3068 nres.Raise("Can't get info from secondary node %s" %
3069 self.cfg.GetNodeName(node_uuid), prereq=True,
3070 ecode=errors.ECODE_STATE)
3071 (_, _, (nhvinfo, )) = nres.payload
3072 if not isinstance(nhvinfo.get("memory_free", None), int):
3073 raise errors.OpPrereqError("Secondary node %s didn't return free"
3074 " memory information" %
3075 self.cfg.GetNodeName(node_uuid),
3076 errors.ECODE_STATE)
3077
3078 elif be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
3079 raise errors.OpPrereqError("This change will prevent the instance"
3080 " from failover to its secondary node"
3081 " %s, due to not enough memory" %
3082 self.cfg.GetNodeName(node_uuid),
3083 errors.ECODE_STATE)
3084
3085 if self.op.runtime_mem:
3086 remote_info = self.rpc.call_instance_info(
3087 self.instance.primary_node, self.instance.name,
3088 self.instance.hypervisor,
3089 cluster_hvparams)
3090 remote_info.Raise("Error checking node %s" %
3091 self.cfg.GetNodeName(self.instance.primary_node),
3092 prereq=True)
3093 if not remote_info.payload:
3094 raise errors.OpPrereqError("Instance %s is not running" %
3095 self.instance.name, errors.ECODE_STATE)
3096
3097 current_memory = remote_info.payload["memory"]
3098 if (not self.op.force and
3099 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
3100 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
3101 raise errors.OpPrereqError("Instance %s must have memory between %d"
3102 " and %d MB of memory unless --force is"
3103 " given" %
3104 (self.instance.name,
3105 self.be_proposed[constants.BE_MINMEM],
3106 self.be_proposed[constants.BE_MAXMEM]),
3107 errors.ECODE_INVAL)
3108
3109 delta = self.op.runtime_mem - current_memory
3110 if delta > 0:
3111 CheckNodeFreeMemory(
3112 self, self.instance.primary_node,
3113 "ballooning memory for instance %s" % self.instance.name, delta,
3114 self.instance.hypervisor,
3115 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
3116
3117
3118 cluster = self.cluster
3119
3120 def _PrepareNicCreate(_, params, private):
3121 self._PrepareNicModification(params, private, None, None,
3122 {}, cluster, pnode_uuid)
3123 return (None, None)
3124
3125 def _PrepareNicMod(_, nic, params, private):
3126 self._PrepareNicModification(params, private, nic.ip, nic.network,
3127 nic.nicparams, cluster, pnode_uuid)
3128 return None
3129
3130 def _PrepareNicRemove(_, params, __):
3131 ip = params.ip
3132 net = params.network
3133 if net is not None and ip is not None:
3134 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
3135
3136
3137 nics = self.instance.nics[:]
3138 _ApplyContainerMods("NIC", nics, None, self.nicmod,
3139 _PrepareNicCreate, _PrepareNicMod, _PrepareNicRemove)
3140 if len(nics) > constants.MAX_NICS:
3141 raise errors.OpPrereqError("Instance has too many network interfaces"
3142 " (%d), cannot add more" % constants.MAX_NICS,
3143 errors.ECODE_STATE)
3144
3145
3146 self._nic_chgdesc = []
3147 if self.nicmod:
3148
3149 nics = [nic.Copy() for nic in self.instance.nics]
3150 _ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
3151 self._CreateNewNic, self._ApplyNicMods,
3152 self._RemoveNic)
3153
3154 utils.ValidateDeviceNames("NIC", nics)
3155 self._new_nics = nics
3156 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
3157 else:
3158 self._new_nics = None
3159 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
3160
3161 if not self.op.ignore_ipolicy:
3162 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
3163 group_info)
3164
3165
3166 ispec[constants.ISPEC_SPINDLE_USE] = \
3167 self.be_new.get(constants.BE_SPINDLE_USE, None)
3168 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
3169 None)
3170
3171
3172 if self.op.disk_template:
3173 new_disk_template = self.op.disk_template
3174 else:
3175 new_disk_template = self.instance.disk_template
3176 ispec_max = ispec.copy()
3177 ispec_max[constants.ISPEC_MEM_SIZE] = \
3178 self.be_new.get(constants.BE_MAXMEM, None)
3179 res_max = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
3180 new_disk_template)
3181 ispec_min = ispec.copy()
3182 ispec_min[constants.ISPEC_MEM_SIZE] = \
3183 self.be_new.get(constants.BE_MINMEM, None)
3184 res_min = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
3185 new_disk_template)
3186
3187 if (res_max or res_min):
3188
3189
3190 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
3191 (group_info, group_info.name,
3192 utils.CommaJoin(set(res_max + res_min))))
3193 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
3194
3196 """Converts an instance from plain to drbd.
3197
3198 """
3199 feedback_fn("Converting template to drbd")
3200 pnode_uuid = self.instance.primary_node
3201 snode_uuid = self.op.remote_node_uuid
3202
3203 assert self.instance.disk_template == constants.DT_PLAIN
3204
3205
3206 disk_info = [{constants.IDISK_SIZE: d.size, constants.IDISK_MODE: d.mode,
3207 constants.IDISK_VG: d.logical_id[0],
3208 constants.IDISK_NAME: d.name}
3209 for d in self.instance.disks]
3210 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
3211 self.instance.uuid, pnode_uuid,
3212 [snode_uuid], disk_info, None, None, 0,
3213 feedback_fn, self.diskparams)
3214 anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
3215 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
3216 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
3217 info = GetInstanceInfoText(self.instance)
3218 feedback_fn("Creating additional volumes...")
3219
3220 for disk in anno_disks:
3221
3222 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
3223 info, True, p_excl_stor)
3224 for child in disk.children:
3225 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
3226 s_excl_stor)
3227
3228
3229 feedback_fn("Renaming original volumes...")
3230 rename_list = [(o, n.children[0].logical_id)
3231 for (o, n) in zip(self.instance.disks, new_disks)]
3232 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
3233 result.Raise("Failed to rename original LVs")
3234
3235 feedback_fn("Initializing DRBD devices...")
3236
3237 try:
3238 for disk in anno_disks:
3239 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
3240 (snode_uuid, s_excl_stor)]:
3241 f_create = node_uuid == pnode_uuid
3242 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
3243 f_create, excl_stor)
3244 except errors.GenericError, e:
3245 feedback_fn("Initializing of DRBD devices failed;"
3246 " renaming back original volumes...")
3247 rename_back_list = [(n.children[0], o.logical_id)
3248 for (n, o) in zip(new_disks, self.instance.disks)]
3249 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
3250 result.Raise("Failed to rename LVs back after error %s" % str(e))
3251 raise
3252
3253
3254 self.instance.disk_template = constants.DT_DRBD8
3255 self.instance.disks = new_disks
3256 self.cfg.Update(self.instance, feedback_fn)
3257
3258
3259 ReleaseLocks(self, locking.LEVEL_NODE)
3260
3261
3262 disk_abort = not WaitForSync(self, self.instance,
3263 oneshot=not self.op.wait_for_sync)
3264 if disk_abort:
3265 raise errors.OpExecError("There are some degraded disks for"
3266 " this instance, please cleanup manually")
3267
3268
3269
3271 """Converts an instance from drbd to plain.
3272
3273 """
3274 assert self.instance.disk_template == constants.DT_DRBD8
3275 assert len(self.instance.secondary_nodes) == 1 or not self.instance.disks
3276
3277 pnode_uuid = self.instance.primary_node
3278
3279
3280 snode_uuid = None
3281 if self.instance.secondary_nodes:
3282 snode_uuid = self.instance.secondary_nodes[0]
3283
3284 feedback_fn("Converting template to plain")
3285
3286 old_disks = AnnotateDiskParams(self.instance, self.instance.disks, self.cfg)
3287 new_disks = [d.children[0] for d in self.instance.disks]
3288
3289
3290 for parent, child in zip(old_disks, new_disks):
3291 child.size = parent.size
3292 child.mode = parent.mode
3293 child.name = parent.name
3294
3295
3296
3297 for disk in old_disks:
3298 tcp_port = disk.logical_id[2]
3299 self.cfg.AddTcpUdpPort(tcp_port)
3300
3301
3302 self.instance.disks = new_disks
3303 self.instance.disk_template = constants.DT_PLAIN
3304 _UpdateIvNames(0, self.instance.disks)
3305 self.cfg.Update(self.instance, feedback_fn)
3306
3307
3308 ReleaseLocks(self, locking.LEVEL_NODE)
3309
3310 feedback_fn("Removing volumes on the secondary node...")
3311 for disk in old_disks:
3312 result = self.rpc.call_blockdev_remove(snode_uuid, (disk, self.instance))
3313 result.Warn("Could not remove block device %s on node %s,"
3314 " continuing anyway" %
3315 (disk.iv_name, self.cfg.GetNodeName(snode_uuid)),
3316 self.LogWarning)
3317
3318 feedback_fn("Removing unneeded volumes on the primary node...")
3319 for idx, disk in enumerate(old_disks):
3320 meta = disk.children[1]
3321 result = self.rpc.call_blockdev_remove(pnode_uuid, (meta, self.instance))
3322 result.Warn("Could not remove metadata for disk %d on node %s,"
3323 " continuing anyway" %
3324 (idx, self.cfg.GetNodeName(pnode_uuid)),
3325 self.LogWarning)
3326
3328 self.LogInfo("Trying to hotplug device...")
3329 msg = "hotplug:"
3330 result = self.rpc.call_hotplug_device(self.instance.primary_node,
3331 self.instance, action, dev_type,
3332 (device, self.instance),
3333 extra, seq)
3334 if result.fail_msg:
3335 self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
3336 self.LogInfo("Continuing execution..")
3337 msg += "failed"
3338 else:
3339 self.LogInfo("Hotplug done.")
3340 msg += "done"
3341 return msg
3342
3344 """Creates a new disk.
3345
3346 """
3347
3348 if self.instance.disk_template in constants.DTS_FILEBASED:
3349 (file_driver, file_path) = self.instance.disks[0].logical_id
3350 file_path = os.path.dirname(file_path)
3351 else:
3352 file_driver = file_path = None
3353
3354 disk = \
3355 GenerateDiskTemplate(self, self.instance.disk_template,
3356 self.instance.uuid, self.instance.primary_node,
3357 self.instance.secondary_nodes, [params], file_path,
3358 file_driver, idx, self.Log, self.diskparams)[0]
3359
3360 new_disks = CreateDisks(self, self.instance, disks=[disk])
3361
3362 if self.cluster.prealloc_wipe_disks:
3363
3364 WipeOrCleanupDisks(self, self.instance,
3365 disks=[(idx, disk, 0)],
3366 cleanup=new_disks)
3367
3368 changes = [
3369 ("disk/%d" % idx,
3370 "add:size=%s,mode=%s" % (disk.size, disk.mode)),
3371 ]
3372 if self.op.hotplug:
3373 result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
3374 (disk, self.instance),
3375 self.instance, True, idx)
3376 if result.fail_msg:
3377 changes.append(("disk/%d" % idx, "assemble:failed"))
3378 self.LogWarning("Can't assemble newly created disk %d: %s",
3379 idx, result.fail_msg)
3380 else:
3381 _, link_name, uri = result.payload
3382 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3383 constants.HOTPLUG_TARGET_DISK,
3384 disk, (link_name, uri), idx)
3385 changes.append(("disk/%d" % idx, msg))
3386
3387 return (disk, changes)
3388
3389 - def _PostAddDisk(self, _, disk):
3390 if not WaitForSync(self, self.instance, disks=[disk],
3391 oneshot=not self.op.wait_for_sync):
3392 raise errors.OpExecError("Failed to sync disks of %s" %
3393 self.instance.name)
3394
3395
3396
3397 if not self.instance.disks_active:
3398 ShutdownInstanceDisks(self, self.instance, disks=[disk])
3399
3428
3430 """Removes a disk.
3431
3432 """
3433 hotmsg = ""
3434 if self.op.hotplug:
3435 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
3436 constants.HOTPLUG_TARGET_DISK,
3437 root, None, idx)
3438 ShutdownInstanceDisks(self, self.instance, [root])
3439
3440 (anno_disk,) = AnnotateDiskParams(self.instance, [root], self.cfg)
3441 for node_uuid, disk in anno_disk.ComputeNodeTree(
3442 self.instance.primary_node):
3443 msg = self.rpc.call_blockdev_remove(node_uuid, (disk, self.instance)) \
3444 .fail_msg
3445 if msg:
3446 self.LogWarning("Could not remove disk/%d on node '%s': %s,"
3447 " continuing anyway", idx,
3448 self.cfg.GetNodeName(node_uuid), msg)
3449
3450
3451 if root.dev_type in constants.DTS_DRBD:
3452 self.cfg.AddTcpUdpPort(root.logical_id[2])
3453
3454 return hotmsg
3455
3457 """Creates data structure for a new network interface.
3458
3459 """
3460 mac = params[constants.INIC_MAC]
3461 ip = params.get(constants.INIC_IP, None)
3462 net = params.get(constants.INIC_NETWORK, None)
3463 name = params.get(constants.INIC_NAME, None)
3464 net_uuid = self.cfg.LookupNetwork(net)
3465
3466 nicparams = private.filled
3467 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
3468 nicparams=nicparams)
3469 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
3470
3471 changes = [
3472 ("nic.%d" % idx,
3473 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
3474 (mac, ip, private.filled[constants.NIC_MODE],
3475 private.filled[constants.NIC_LINK], net)),
3476 ]
3477
3478 if self.op.hotplug:
3479 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
3480 constants.HOTPLUG_TARGET_NIC,
3481 nobj, None, idx)
3482 changes.append(("nic.%d" % idx, msg))
3483
3484 return (nobj, changes)
3485
3487 """Modifies a network interface.
3488
3489 """
3490 changes = []
3491
3492 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
3493 if key in params:
3494 changes.append(("nic.%s/%d" % (key, idx), params[key]))
3495 setattr(nic, key, params[key])
3496
3497 new_net = params.get(constants.INIC_NETWORK, nic.network)
3498 new_net_uuid = self.cfg.LookupNetwork(new_net)
3499 if new_net_uuid != nic.network:
3500 changes.append(("nic.network/%d" % idx, new_net))
3501 nic.network = new_net_uuid
3502
3503 if private.filled:
3504 nic.nicparams = private.filled
3505
3506 for (key, val) in nic.nicparams.items():
3507 changes.append(("nic.%s/%d" % (key, idx), val))
3508
3509 if self.op.hotplug:
3510 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
3511 constants.HOTPLUG_TARGET_NIC,
3512 nic, None, idx)
3513 changes.append(("nic/%d" % idx, msg))
3514
3515 return changes
3516
3522
3523 - def Exec(self, feedback_fn):
3524 """Modifies an instance.
3525
3526 All parameters take effect only at the next restart of the instance.
3527
3528 """
3529
3530
3531
3532 for warn in self.warn:
3533 feedback_fn("WARNING: %s" % warn)
3534
3535 assert ((self.op.disk_template is None) ^
3536 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
3537 "Not owning any node resource locks"
3538
3539 result = []
3540
3541
3542 if self.op.pnode_uuid:
3543 self.instance.primary_node = self.op.pnode_uuid
3544
3545
3546 if self.op.runtime_mem:
3547 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
3548 self.instance,
3549 self.op.runtime_mem)
3550 rpcres.Raise("Cannot modify instance runtime memory")
3551 result.append(("runtime_memory", self.op.runtime_mem))
3552
3553
3554 _ApplyContainerMods("disk", self.instance.disks, result, self.diskmod,
3555 self._CreateNewDisk, self._ModifyDisk,
3556 self._RemoveDisk, post_add_fn=self._PostAddDisk)
3557 _UpdateIvNames(0, self.instance.disks)
3558
3559 if self.op.disk_template:
3560 if __debug__:
3561 check_nodes = set(self.instance.all_nodes)
3562 if self.op.remote_node_uuid:
3563 check_nodes.add(self.op.remote_node_uuid)
3564 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
3565 owned = self.owned_locks(level)
3566 assert not (check_nodes - owned), \
3567 ("Not owning the correct locks, owning %r, expected at least %r" %
3568 (owned, check_nodes))
3569
3570 r_shut = ShutdownInstanceDisks(self, self.instance)
3571 if not r_shut:
3572 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
3573 " proceed with disk template conversion")
3574 mode = (self.instance.disk_template, self.op.disk_template)
3575 try:
3576 self._DISK_CONVERSIONS[mode](self, feedback_fn)
3577 except:
3578 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
3579 raise
3580 result.append(("disk_template", self.op.disk_template))
3581
3582 assert self.instance.disk_template == self.op.disk_template, \
3583 ("Expected disk template '%s', found '%s'" %
3584 (self.op.disk_template, self.instance.disk_template))
3585
3586
3587
3588 ReleaseLocks(self, locking.LEVEL_NODE)
3589 ReleaseLocks(self, locking.LEVEL_NODE_RES)
3590
3591
3592 if self._new_nics is not None:
3593 self.instance.nics = self._new_nics
3594 result.extend(self._nic_chgdesc)
3595
3596
3597 if self.op.hvparams:
3598 self.instance.hvparams = self.hv_inst
3599 for key, val in self.op.hvparams.iteritems():
3600 result.append(("hv/%s" % key, val))
3601
3602
3603 if self.op.beparams:
3604 self.instance.beparams = self.be_inst
3605 for key, val in self.op.beparams.iteritems():
3606 result.append(("be/%s" % key, val))
3607
3608
3609 if self.op.os_name:
3610 self.instance.os = self.op.os_name
3611
3612
3613 if self.op.osparams:
3614 self.instance.osparams = self.os_inst
3615 for key, val in self.op.osparams.iteritems():
3616 result.append(("os/%s" % key, val))
3617
3618 if self.op.offline is None:
3619
3620 pass
3621 elif self.op.offline:
3622
3623 self.cfg.MarkInstanceOffline(self.instance.uuid)
3624 result.append(("admin_state", constants.ADMINST_OFFLINE))
3625 else:
3626
3627 self.cfg.MarkInstanceDown(self.instance.uuid)
3628 result.append(("admin_state", constants.ADMINST_DOWN))
3629
3630 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
3631
3632 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
3633 self.owned_locks(locking.LEVEL_NODE)), \
3634 "All node locks should have been released by now"
3635
3636 return result
3637
3638 _DISK_CONVERSIONS = {
3639 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
3640 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
3641 }
3642
3645 HPATH = "instance-change-group"
3646 HTYPE = constants.HTYPE_INSTANCE
3647 REQ_BGL = False
3648
3667
3669 if level == locking.LEVEL_NODEGROUP:
3670 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
3671
3672 if self.req_target_uuids:
3673 lock_groups = set(self.req_target_uuids)
3674
3675
3676
3677 instance_groups = self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
3678 lock_groups.update(instance_groups)
3679 else:
3680
3681 lock_groups = locking.ALL_SET
3682
3683 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
3684
3685 elif level == locking.LEVEL_NODE:
3686 if self.req_target_uuids:
3687
3688 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
3689 self._LockInstancesNodes()
3690
3691
3692 lock_groups = (frozenset(self.owned_locks(locking.LEVEL_NODEGROUP)) -
3693 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid))
3694 member_nodes = [node_uuid
3695 for group in lock_groups
3696 for node_uuid in self.cfg.GetNodeGroup(group).members]
3697 self.needed_locks[locking.LEVEL_NODE].extend(member_nodes)
3698 else:
3699
3700 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
3701
3703 owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
3704 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
3705 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
3706
3707 assert (self.req_target_uuids is None or
3708 owned_groups.issuperset(self.req_target_uuids))
3709 assert owned_instance_names == set([self.op.instance_name])
3710
3711
3712 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
3713
3714
3715 assert owned_nodes.issuperset(self.instance.all_nodes), \
3716 ("Instance %s's nodes changed while we kept the lock" %
3717 self.op.instance_name)
3718
3719 inst_groups = CheckInstanceNodeGroups(self.cfg, self.op.instance_uuid,
3720 owned_groups)
3721
3722 if self.req_target_uuids:
3723
3724 self.target_uuids = frozenset(self.req_target_uuids)
3725 else:
3726
3727 self.target_uuids = owned_groups - inst_groups
3728
3729 conflicting_groups = self.target_uuids & inst_groups
3730 if conflicting_groups:
3731 raise errors.OpPrereqError("Can't use group(s) '%s' as targets, they are"
3732 " used by the instance '%s'" %
3733 (utils.CommaJoin(conflicting_groups),
3734 self.op.instance_name),
3735 errors.ECODE_INVAL)
3736
3737 if not self.target_uuids:
3738 raise errors.OpPrereqError("There are no possible target groups",
3739 errors.ECODE_INVAL)
3740
3742 """Build hooks env.
3743
3744 """
3745 assert self.target_uuids
3746
3747 env = {
3748 "TARGET_GROUPS": " ".join(self.target_uuids),
3749 }
3750
3751 env.update(BuildInstanceHookEnvByObject(self, self.instance))
3752
3753 return env
3754
3756 """Build hooks nodes.
3757
3758 """
3759 mn = self.cfg.GetMasterNode()
3760 return ([mn], [mn])
3761
3762 - def Exec(self, feedback_fn):
3763 instances = list(self.owned_locks(locking.LEVEL_INSTANCE))
3764
3765 assert instances == [self.op.instance_name], "Instance not locked"
3766
3767 req = iallocator.IAReqGroupChange(instances=instances,
3768 target_groups=list(self.target_uuids))
3769 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
3770
3771 ial.Run(self.op.iallocator)
3772
3773 if not ial.success:
3774 raise errors.OpPrereqError("Can't compute solution for changing group of"
3775 " instance '%s' using iallocator '%s': %s" %
3776 (self.op.instance_name, self.op.iallocator,
3777 ial.info), errors.ECODE_NORES)
3778
3779 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
3780
3781 self.LogInfo("Iallocator returned %s job(s) for changing group of"
3782 " instance '%s'", len(jobs), self.op.instance_name)
3783
3784 return ResultWithJobs(jobs)
3785