1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 """Logical units dealing with instances."""
23
24 import OpenSSL
25 import copy
26 import logging
27 import os
28
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
32 from ganeti import ht
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
43
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceUuidAndName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeUuidAndName, \
53 CheckDiskTemplateEnabled
54 from ganeti.cmdlib.instance_storage import CreateDisks, \
55 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
56 IsExclusiveStorageEnabledNodeUuid, CreateSingleBlockDev, ComputeDisks, \
57 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
58 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks, \
59 CheckSpindlesExclusiveStorage
60 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
61 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
62 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
63 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
66
67 import ganeti.masterd.instance
68
69
70
71
72 _TApplyContModsCbChanges = \
73 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
74 ht.TNonEmptyString,
75 ht.TAny,
76 ])))
80 """Ensures that a given hostname resolves to a 'sane' name.
81
82 The given name is required to be a prefix of the resolved hostname,
83 to prevent accidental mismatches.
84
85 @param lu: the logical unit on behalf of which we're checking
86 @param name: the name we should resolve and check
87 @return: the resolved hostname object
88
89 """
90 hostname = netutils.GetHostname(name=name)
91 if hostname.name != name:
92 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
93 if not utils.MatchNameComponent(name, [hostname.name]):
94 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
95 " same as given hostname '%s'") %
96 (hostname.name, name), errors.ECODE_INVAL)
97 return hostname
98
101 """Generate error if opportunistic locking is not possible.
102
103 """
104 if op.opportunistic_locking and not op.iallocator:
105 raise errors.OpPrereqError("Opportunistic locking is only available in"
106 " combination with an instance allocator",
107 errors.ECODE_INVAL)
108
111 """Wrapper around IAReqInstanceAlloc.
112
113 @param op: The instance opcode
114 @param disks: The computed disks
115 @param nics: The computed nics
116 @param beparams: The full filled beparams
117 @param node_name_whitelist: List of nodes which should appear as online to the
118 allocator (unless the node is already marked offline)
119
120 @returns: A filled L{iallocator.IAReqInstanceAlloc}
121
122 """
123 spindle_use = beparams[constants.BE_SPINDLE_USE]
124 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
125 disk_template=op.disk_template,
126 tags=op.tags,
127 os=op.os_type,
128 vcpus=beparams[constants.BE_VCPUS],
129 memory=beparams[constants.BE_MAXMEM],
130 spindle_use=spindle_use,
131 disks=disks,
132 nics=[n.ToDict() for n in nics],
133 hypervisor=op.hypervisor,
134 node_whitelist=node_name_whitelist)
135
153
156 """Computes the nics.
157
158 @param op: The instance opcode
159 @param cluster: Cluster configuration object
160 @param default_ip: The default ip to assign
161 @param cfg: An instance of the configuration object
162 @param ec_id: Execution context ID
163
164 @returns: The build up nics
165
166 """
167 nics = []
168 for nic in op.nics:
169 nic_mode_req = nic.get(constants.INIC_MODE, None)
170 nic_mode = nic_mode_req
171 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
172 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
173
174 net = nic.get(constants.INIC_NETWORK, None)
175 link = nic.get(constants.NIC_LINK, None)
176 ip = nic.get(constants.INIC_IP, None)
177
178 if net is None or net.lower() == constants.VALUE_NONE:
179 net = None
180 else:
181 if nic_mode_req is not None or link is not None:
182 raise errors.OpPrereqError("If network is given, no mode or link"
183 " is allowed to be passed",
184 errors.ECODE_INVAL)
185
186
187 if ip is None or ip.lower() == constants.VALUE_NONE:
188 nic_ip = None
189 elif ip.lower() == constants.VALUE_AUTO:
190 if not op.name_check:
191 raise errors.OpPrereqError("IP address set to auto but name checks"
192 " have been skipped",
193 errors.ECODE_INVAL)
194 nic_ip = default_ip
195 else:
196
197
198 if ip.lower() == constants.NIC_IP_POOL:
199 if net is None:
200 raise errors.OpPrereqError("if ip=pool, parameter network"
201 " must be passed too",
202 errors.ECODE_INVAL)
203
204 elif not netutils.IPAddress.IsValid(ip):
205 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
206 errors.ECODE_INVAL)
207
208 nic_ip = ip
209
210
211 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
212 raise errors.OpPrereqError("Routed nic mode requires an ip address",
213 errors.ECODE_INVAL)
214
215
216 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
217 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
218 mac = utils.NormalizeAndValidateMac(mac)
219
220 try:
221
222 cfg.ReserveMAC(mac, ec_id)
223 except errors.ReservationError:
224 raise errors.OpPrereqError("MAC address %s already in use"
225 " in cluster" % mac,
226 errors.ECODE_NOTUNIQUE)
227
228
229 nicparams = {}
230 if nic_mode_req:
231 nicparams[constants.NIC_MODE] = nic_mode
232 if link:
233 nicparams[constants.NIC_LINK] = link
234
235 check_params = cluster.SimpleFillNIC(nicparams)
236 objects.NIC.CheckParameterSyntax(check_params)
237 net_uuid = cfg.LookupNetwork(net)
238 name = nic.get(constants.INIC_NAME, None)
239 if name is not None and name.lower() == constants.VALUE_NONE:
240 name = None
241 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
242 network=net_uuid, nicparams=nicparams)
243 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
244 nics.append(nic_obj)
245
246 return nics
247
250 """In case of conflicting IP address raise error.
251
252 @type ip: string
253 @param ip: IP address
254 @type node_uuid: string
255 @param node_uuid: node UUID
256
257 """
258 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node_uuid)
259 if conf_net is not None:
260 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
261 " network %s, but the target NIC does not." %
262 (ip, conf_net)),
263 errors.ECODE_STATE)
264
265 return (None, None)
266
271 """Compute if instance specs meets the specs of ipolicy.
272
273 @type ipolicy: dict
274 @param ipolicy: The ipolicy to verify against
275 @param instance_spec: dict
276 @param instance_spec: The instance spec to verify
277 @type disk_template: string
278 @param disk_template: the disk template of the instance
279 @param _compute_fn: The function to verify ipolicy (unittest only)
280 @see: L{ComputeIPolicySpecViolation}
281
282 """
283 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
284 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
285 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
286 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
287 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
288 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
289
290 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
291 disk_sizes, spindle_use, disk_template)
292
295 """Check whether an OS name conforms to the os variants specification.
296
297 @type os_obj: L{objects.OS}
298 @param os_obj: OS object to check
299 @type name: string
300 @param name: OS name passed by the user, to check for validity
301
302 """
303 variant = objects.OS.GetVariant(name)
304 if not os_obj.supported_variants:
305 if variant:
306 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
307 " passed)" % (os_obj.name, variant),
308 errors.ECODE_INVAL)
309 return
310 if not variant:
311 raise errors.OpPrereqError("OS name must include a variant",
312 errors.ECODE_INVAL)
313
314 if variant not in os_obj.supported_variants:
315 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
316
319 """Create an instance.
320
321 """
322 HPATH = "instance-add"
323 HTYPE = constants.HTYPE_INSTANCE
324 REQ_BGL = False
325
339
381
383 """Check arguments.
384
385 """
386
387
388 if self.op.no_install and self.op.start:
389 self.LogInfo("No-installation mode selected, disabling startup")
390 self.op.start = False
391
392 self.op.instance_name = \
393 netutils.Hostname.GetNormalizedName(self.op.instance_name)
394
395 if self.op.ip_check and not self.op.name_check:
396
397 raise errors.OpPrereqError("Cannot do IP address check without a name"
398 " check", errors.ECODE_INVAL)
399
400
401 for nic in self.op.nics:
402 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
403
404 utils.ValidateDeviceNames("NIC", self.op.nics)
405
406 self._CheckDiskArguments()
407
408
409 if self.op.name_check:
410 self.hostname = _CheckHostnameSane(self, self.op.instance_name)
411 self.op.instance_name = self.hostname.name
412
413 self.check_ip = self.hostname.ip
414 else:
415 self.check_ip = None
416
417
418 if (self.op.file_driver and
419 not self.op.file_driver in constants.FILE_DRIVER):
420 raise errors.OpPrereqError("Invalid file driver name '%s'" %
421 self.op.file_driver, errors.ECODE_INVAL)
422
423
424 if (not self.op.file_driver and
425 self.op.disk_template in [constants.DT_FILE,
426 constants.DT_SHARED_FILE]):
427 self.op.file_driver = constants.FD_DEFAULT
428
429
430 CheckIAllocatorOrNode(self, "iallocator", "pnode")
431
432 if self.op.pnode is not None:
433 if self.op.disk_template in constants.DTS_INT_MIRROR:
434 if self.op.snode is None:
435 raise errors.OpPrereqError("The networked disk templates need"
436 " a mirror node", errors.ECODE_INVAL)
437 elif self.op.snode:
438 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
439 " template")
440 self.op.snode = None
441
442 _CheckOpportunisticLocking(self.op)
443
444 self._cds = GetClusterDomainSecret()
445
446 if self.op.mode == constants.INSTANCE_IMPORT:
447
448
449
450 self.op.force_variant = True
451
452 if self.op.no_install:
453 self.LogInfo("No-installation mode has no effect during import")
454
455 elif self.op.mode == constants.INSTANCE_CREATE:
456 if self.op.os_type is None:
457 raise errors.OpPrereqError("No guest OS specified",
458 errors.ECODE_INVAL)
459 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
460 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
461 " installation" % self.op.os_type,
462 errors.ECODE_STATE)
463 if self.op.disk_template is None:
464 raise errors.OpPrereqError("No disk template specified",
465 errors.ECODE_INVAL)
466
467 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
468
469 src_handshake = self.op.source_handshake
470 if not src_handshake:
471 raise errors.OpPrereqError("Missing source handshake",
472 errors.ECODE_INVAL)
473
474 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
475 src_handshake)
476 if errmsg:
477 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
478 errors.ECODE_INVAL)
479
480
481 self.source_x509_ca_pem = self.op.source_x509_ca
482 if not self.source_x509_ca_pem:
483 raise errors.OpPrereqError("Missing source X509 CA",
484 errors.ECODE_INVAL)
485
486 try:
487 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
488 self._cds)
489 except OpenSSL.crypto.Error, err:
490 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
491 (err, ), errors.ECODE_INVAL)
492
493 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
494 if errcode is not None:
495 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
496 errors.ECODE_INVAL)
497
498 self.source_x509_ca = cert
499
500 src_instance_name = self.op.source_instance_name
501 if not src_instance_name:
502 raise errors.OpPrereqError("Missing source instance name",
503 errors.ECODE_INVAL)
504
505 self.source_instance_name = \
506 netutils.GetHostname(name=src_instance_name).name
507
508 else:
509 raise errors.OpPrereqError("Invalid instance creation mode %r" %
510 self.op.mode, errors.ECODE_INVAL)
511
513 """ExpandNames for CreateInstance.
514
515 Figure out the right locks for instance creation.
516
517 """
518 self.needed_locks = {}
519
520
521
522 if self.op.instance_name in\
523 [inst.name for inst in self.cfg.GetAllInstancesInfo().values()]:
524 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
525 self.op.instance_name, errors.ECODE_EXISTS)
526
527 self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
528
529 if self.op.iallocator:
530
531
532
533 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
534 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
535
536 if self.op.opportunistic_locking:
537 self.opportunistic_locks[locking.LEVEL_NODE] = True
538 else:
539 (self.op.pnode_uuid, self.op.pnode) = \
540 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
541 nodelist = [self.op.pnode_uuid]
542 if self.op.snode is not None:
543 (self.op.snode_uuid, self.op.snode) = \
544 ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
545 nodelist.append(self.op.snode_uuid)
546 self.needed_locks[locking.LEVEL_NODE] = nodelist
547
548
549 if self.op.mode == constants.INSTANCE_IMPORT:
550 src_node = self.op.src_node
551 src_path = self.op.src_path
552
553 if src_path is None:
554 self.op.src_path = src_path = self.op.instance_name
555
556 if src_node is None:
557 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
558 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
559 self.op.src_node = None
560 if os.path.isabs(src_path):
561 raise errors.OpPrereqError("Importing an instance from a path"
562 " requires a source node option",
563 errors.ECODE_INVAL)
564 else:
565 (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
566 ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
567 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
568 self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
569 if not os.path.isabs(src_path):
570 self.op.src_path = \
571 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
572
573 self.needed_locks[locking.LEVEL_NODE_RES] = \
574 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
575
576
577
578
579
580 if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
581
582
583
584
585 self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
586 else:
587 self.needed_locks[locking.LEVEL_NODEGROUP] = \
588 list(self.cfg.GetNodeGroupsFromNodes(
589 self.needed_locks[locking.LEVEL_NODE]))
590 self.share_locks[locking.LEVEL_NODEGROUP] = 1
591
593 if level == locking.LEVEL_NODE_RES and \
594 self.opportunistic_locks[locking.LEVEL_NODE]:
595
596
597 self.needed_locks[locking.LEVEL_NODE_RES] = \
598 self.owned_locks(locking.LEVEL_NODE)
599
601 """Run the allocator based on input opcode.
602
603 """
604 if self.op.opportunistic_locking:
605
606 node_name_whitelist = self.cfg.GetNodeNames(
607 self.owned_locks(locking.LEVEL_NODE))
608 else:
609 node_name_whitelist = None
610
611
612
613 req = _CreateInstanceAllocRequest(self.op, self.disks,
614 self.nics, self.be_full,
615 node_name_whitelist)
616 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
617
618 ial.Run(self.op.iallocator)
619
620 if not ial.success:
621
622 if self.op.opportunistic_locking:
623 ecode = errors.ECODE_TEMP_NORES
624 else:
625 ecode = errors.ECODE_NORES
626
627 raise errors.OpPrereqError("Can't compute nodes using"
628 " iallocator '%s': %s" %
629 (self.op.iallocator, ial.info),
630 ecode)
631
632 (self.op.pnode_uuid, self.op.pnode) = \
633 ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
634 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
635 self.op.instance_name, self.op.iallocator,
636 utils.CommaJoin(ial.result))
637
638 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
639
640 if req.RequiredNodes() == 2:
641 (self.op.snode_uuid, self.op.snode) = \
642 ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
643
645 """Build hooks env.
646
647 This runs on master, primary and secondary nodes of the instance.
648
649 """
650 env = {
651 "ADD_MODE": self.op.mode,
652 }
653 if self.op.mode == constants.INSTANCE_IMPORT:
654 env["SRC_NODE"] = self.op.src_node
655 env["SRC_PATH"] = self.op.src_path
656 env["SRC_IMAGES"] = self.src_images
657
658 env.update(BuildInstanceHookEnv(
659 name=self.op.instance_name,
660 primary_node_name=self.op.pnode,
661 secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
662 status=self.op.start,
663 os_type=self.op.os_type,
664 minmem=self.be_full[constants.BE_MINMEM],
665 maxmem=self.be_full[constants.BE_MAXMEM],
666 vcpus=self.be_full[constants.BE_VCPUS],
667 nics=NICListToTuple(self, self.nics),
668 disk_template=self.op.disk_template,
669 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
670 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
671 for d in self.disks],
672 bep=self.be_full,
673 hvp=self.hv_full,
674 hypervisor_name=self.op.hypervisor,
675 tags=self.op.tags,
676 ))
677
678 return env
679
681 """Build hooks nodes.
682
683 """
684 nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
685 return nl, nl
686
732
734 """Use export parameters as defaults.
735
736 In case the opcode doesn't specify (as in override) some instance
737 parameters, then try to use them from the export information, if
738 that declares them.
739
740 """
741 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
742
743 if not self.op.disks:
744 disks = []
745
746 for idx in range(constants.MAX_DISKS):
747 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
748 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
749 disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
750 disk = {
751 constants.IDISK_SIZE: disk_sz,
752 constants.IDISK_NAME: disk_name
753 }
754 disks.append(disk)
755 self.op.disks = disks
756 if not disks and self.op.disk_template != constants.DT_DISKLESS:
757 raise errors.OpPrereqError("No disk info specified and the export"
758 " is missing the disk information",
759 errors.ECODE_INVAL)
760
761 if not self.op.nics:
762 nics = []
763 for idx in range(constants.MAX_NICS):
764 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
765 ndict = {}
766 for name in [constants.INIC_IP,
767 constants.INIC_MAC, constants.INIC_NAME]:
768 nic_param_name = "nic%d_%s" % (idx, name)
769 if einfo.has_option(constants.INISECT_INS, nic_param_name):
770 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
771 ndict[name] = v
772 network = einfo.get(constants.INISECT_INS,
773 "nic%d_%s" % (idx, constants.INIC_NETWORK))
774
775
776 if network:
777 ndict[constants.INIC_NETWORK] = network
778 else:
779 for name in list(constants.NICS_PARAMETERS):
780 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
781 ndict[name] = v
782 nics.append(ndict)
783 else:
784 break
785 self.op.nics = nics
786
787 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
788 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
789
790 if (self.op.hypervisor is None and
791 einfo.has_option(constants.INISECT_INS, "hypervisor")):
792 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
793
794 if einfo.has_section(constants.INISECT_HYP):
795
796
797 for name, value in einfo.items(constants.INISECT_HYP):
798 if name not in self.op.hvparams:
799 self.op.hvparams[name] = value
800
801 if einfo.has_section(constants.INISECT_BEP):
802
803 for name, value in einfo.items(constants.INISECT_BEP):
804 if name not in self.op.beparams:
805 self.op.beparams[name] = value
806
807 if name == constants.BE_MEMORY:
808 if constants.BE_MAXMEM not in self.op.beparams:
809 self.op.beparams[constants.BE_MAXMEM] = value
810 if constants.BE_MINMEM not in self.op.beparams:
811 self.op.beparams[constants.BE_MINMEM] = value
812 else:
813
814 for name in constants.BES_PARAMETERS:
815 if (name not in self.op.beparams and
816 einfo.has_option(constants.INISECT_INS, name)):
817 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
818
819 if einfo.has_section(constants.INISECT_OSP):
820
821 for name, value in einfo.items(constants.INISECT_OSP):
822 if name not in self.op.osparams:
823 self.op.osparams[name] = value
824
850
852 """Calculate final instance file storage dir.
853
854 """
855
856 self.instance_file_storage_dir = None
857 if self.op.disk_template in constants.DTS_FILEBASED:
858
859 joinargs = []
860
861 if self.op.disk_template == constants.DT_SHARED_FILE:
862 get_fsd_fn = self.cfg.GetSharedFileStorageDir
863 else:
864 get_fsd_fn = self.cfg.GetFileStorageDir
865
866 cfg_storagedir = get_fsd_fn()
867 if not cfg_storagedir:
868 raise errors.OpPrereqError("Cluster file storage dir not defined",
869 errors.ECODE_STATE)
870 joinargs.append(cfg_storagedir)
871
872 if self.op.file_storage_dir is not None:
873 joinargs.append(self.op.file_storage_dir)
874
875 joinargs.append(self.op.instance_name)
876
877
878 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
879
881 """Check prerequisites.
882
883 """
884
885
886 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
887 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
888 cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
889 if not owned_groups.issuperset(cur_groups):
890 raise errors.OpPrereqError("New instance %s's node groups changed since"
891 " locks were acquired, current groups are"
892 " are '%s', owning groups '%s'; retry the"
893 " operation" %
894 (self.op.instance_name,
895 utils.CommaJoin(cur_groups),
896 utils.CommaJoin(owned_groups)),
897 errors.ECODE_STATE)
898
899 self._CalculateFileStorageDir()
900
901 if self.op.mode == constants.INSTANCE_IMPORT:
902 export_info = self._ReadExportInfo()
903 self._ReadExportParams(export_info)
904 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
905 else:
906 self._old_instance_name = None
907
908 if (not self.cfg.GetVGName() and
909 self.op.disk_template not in constants.DTS_NOT_LVM):
910 raise errors.OpPrereqError("Cluster does not support lvm-based"
911 " instances", errors.ECODE_STATE)
912
913 if (self.op.hypervisor is None or
914 self.op.hypervisor == constants.VALUE_AUTO):
915 self.op.hypervisor = self.cfg.GetHypervisorType()
916
917 cluster = self.cfg.GetClusterInfo()
918 enabled_hvs = cluster.enabled_hypervisors
919 if self.op.hypervisor not in enabled_hvs:
920 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
921 " cluster (%s)" %
922 (self.op.hypervisor, ",".join(enabled_hvs)),
923 errors.ECODE_STATE)
924
925
926 for tag in self.op.tags:
927 objects.TaggableObject.ValidateTag(tag)
928
929
930 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
931 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
932 self.op.hvparams)
933 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
934 hv_type.CheckParameterSyntax(filled_hvp)
935 self.hv_full = filled_hvp
936
937 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
938 "instance", "cluster")
939
940
941 self.be_full = _ComputeFullBeParams(self.op, cluster)
942
943
944 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
945
946
947
948 if self.op.identify_defaults:
949 self._RevertToDefaults(cluster)
950
951
952 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
953 self.proc.GetECId())
954
955
956 default_vg = self.cfg.GetVGName()
957 self.disks = ComputeDisks(self.op, default_vg)
958
959 if self.op.mode == constants.INSTANCE_IMPORT:
960 disk_images = []
961 for idx in range(len(self.disks)):
962 option = "disk%d_dump" % idx
963 if export_info.has_option(constants.INISECT_INS, option):
964
965 export_name = export_info.get(constants.INISECT_INS, option)
966 image = utils.PathJoin(self.op.src_path, export_name)
967 disk_images.append(image)
968 else:
969 disk_images.append(False)
970
971 self.src_images = disk_images
972
973 if self.op.instance_name == self._old_instance_name:
974 for idx, nic in enumerate(self.nics):
975 if nic.mac == constants.VALUE_AUTO:
976 nic_mac_ini = "nic%d_mac" % idx
977 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
978
979
980
981
982 if self.op.ip_check:
983 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
984 raise errors.OpPrereqError("IP %s of instance %s already in use" %
985 (self.check_ip, self.op.instance_name),
986 errors.ECODE_NOTUNIQUE)
987
988
989
990
991
992
993
994
995
996 for nic in self.nics:
997 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
998 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
999
1000
1001
1002 if self.op.iallocator is not None:
1003 self._RunAllocator()
1004
1005
1006 keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
1007 self.op.src_node_uuid])
1008 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1009 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1010 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1011
1012 ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1013 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1014
1015 assert (self.owned_locks(locking.LEVEL_NODE) ==
1016 self.owned_locks(locking.LEVEL_NODE_RES)), \
1017 "Node locks differ from node resource locks"
1018
1019
1020
1021
1022 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1023 assert self.pnode is not None, \
1024 "Cannot retrieve locked node %s" % self.op.pnode_uuid
1025 if pnode.offline:
1026 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1027 pnode.name, errors.ECODE_STATE)
1028 if pnode.drained:
1029 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1030 pnode.name, errors.ECODE_STATE)
1031 if not pnode.vm_capable:
1032 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1033 " '%s'" % pnode.name, errors.ECODE_STATE)
1034
1035 self.secondaries = []
1036
1037
1038
1039 for idx, nic in enumerate(self.nics):
1040 net_uuid = nic.network
1041 if net_uuid is not None:
1042 nobj = self.cfg.GetNetwork(net_uuid)
1043 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
1044 if netparams is None:
1045 raise errors.OpPrereqError("No netparams found for network"
1046 " %s. Propably not connected to"
1047 " node's %s nodegroup" %
1048 (nobj.name, self.pnode.name),
1049 errors.ECODE_INVAL)
1050 self.LogInfo("NIC/%d inherits netparams %s" %
1051 (idx, netparams.values()))
1052 nic.nicparams = dict(netparams)
1053 if nic.ip is not None:
1054 if nic.ip.lower() == constants.NIC_IP_POOL:
1055 try:
1056 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1057 except errors.ReservationError:
1058 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1059 " from the address pool" % idx,
1060 errors.ECODE_STATE)
1061 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1062 else:
1063 try:
1064 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1065 except errors.ReservationError:
1066 raise errors.OpPrereqError("IP address %s already in use"
1067 " or does not belong to network %s" %
1068 (nic.ip, nobj.name),
1069 errors.ECODE_NOTUNIQUE)
1070
1071
1072 elif self.op.conflicts_check:
1073 _CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
1074
1075
1076 if self.op.disk_template in constants.DTS_INT_MIRROR:
1077 if self.op.snode_uuid == pnode.uuid:
1078 raise errors.OpPrereqError("The secondary node cannot be the"
1079 " primary node", errors.ECODE_INVAL)
1080 CheckNodeOnline(self, self.op.snode_uuid)
1081 CheckNodeNotDrained(self, self.op.snode_uuid)
1082 CheckNodeVmCapable(self, self.op.snode_uuid)
1083 self.secondaries.append(self.op.snode_uuid)
1084
1085 snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
1086 if pnode.group != snode.group:
1087 self.LogWarning("The primary and secondary nodes are in two"
1088 " different node groups; the disk parameters"
1089 " from the first disk's node group will be"
1090 " used")
1091
1092 nodes = [pnode]
1093 if self.op.disk_template in constants.DTS_INT_MIRROR:
1094 nodes.append(snode)
1095 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1096 excl_stor = compat.any(map(has_es, nodes))
1097 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1098 raise errors.OpPrereqError("Disk template %s not supported with"
1099 " exclusive storage" % self.op.disk_template,
1100 errors.ECODE_STATE)
1101 for disk in self.disks:
1102 CheckSpindlesExclusiveStorage(disk, excl_stor, True)
1103
1104 node_uuids = [pnode.uuid] + self.secondaries
1105
1106 if not self.adopt_disks:
1107 if self.op.disk_template == constants.DT_RBD:
1108
1109
1110
1111 CheckRADOSFreeSpace()
1112 elif self.op.disk_template == constants.DT_EXT:
1113
1114 pass
1115 elif self.op.disk_template in utils.GetLvmDiskTemplates():
1116
1117 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1118 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1119 else:
1120
1121 pass
1122
1123 elif self.op.disk_template == constants.DT_PLAIN:
1124 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1125 disk[constants.IDISK_ADOPT])
1126 for disk in self.disks])
1127 if len(all_lvs) != len(self.disks):
1128 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1129 errors.ECODE_INVAL)
1130 for lv_name in all_lvs:
1131 try:
1132
1133
1134 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1135 except errors.ReservationError:
1136 raise errors.OpPrereqError("LV named %s used by another instance" %
1137 lv_name, errors.ECODE_NOTUNIQUE)
1138
1139 vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1140 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1141
1142 node_lvs = self.rpc.call_lv_list([pnode.uuid],
1143 vg_names.payload.keys())[pnode.uuid]
1144 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1145 node_lvs = node_lvs.payload
1146
1147 delta = all_lvs.difference(node_lvs.keys())
1148 if delta:
1149 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1150 utils.CommaJoin(delta),
1151 errors.ECODE_INVAL)
1152 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1153 if online_lvs:
1154 raise errors.OpPrereqError("Online logical volumes found, cannot"
1155 " adopt: %s" % utils.CommaJoin(online_lvs),
1156 errors.ECODE_STATE)
1157
1158 for dsk in self.disks:
1159 dsk[constants.IDISK_SIZE] = \
1160 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1161 dsk[constants.IDISK_ADOPT])][0]))
1162
1163 elif self.op.disk_template == constants.DT_BLOCK:
1164
1165 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1166 for disk in self.disks])
1167 if len(all_disks) != len(self.disks):
1168 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1169 errors.ECODE_INVAL)
1170 baddisks = [d for d in all_disks
1171 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1172 if baddisks:
1173 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1174 " cannot be adopted" %
1175 (utils.CommaJoin(baddisks),
1176 constants.ADOPTABLE_BLOCKDEV_ROOT),
1177 errors.ECODE_INVAL)
1178
1179 node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1180 list(all_disks))[pnode.uuid]
1181 node_disks.Raise("Cannot get block device information from node %s" %
1182 pnode.name)
1183 node_disks = node_disks.payload
1184 delta = all_disks.difference(node_disks.keys())
1185 if delta:
1186 raise errors.OpPrereqError("Missing block device(s): %s" %
1187 utils.CommaJoin(delta),
1188 errors.ECODE_INVAL)
1189 for dsk in self.disks:
1190 dsk[constants.IDISK_SIZE] = \
1191 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1192
1193
1194 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1195 ispec = {
1196 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1197 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1198 constants.ISPEC_DISK_COUNT: len(self.disks),
1199 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1200 for disk in self.disks],
1201 constants.ISPEC_NIC_COUNT: len(self.nics),
1202 constants.ISPEC_SPINDLE_USE: spindle_use,
1203 }
1204
1205 group_info = self.cfg.GetNodeGroup(pnode.group)
1206 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1207 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1208 self.op.disk_template)
1209 if not self.op.ignore_ipolicy and res:
1210 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1211 (pnode.group, group_info.name, utils.CommaJoin(res)))
1212 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1213
1214 CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1215
1216 CheckNodeHasOS(self, pnode.uuid, self.op.os_type, self.op.force_variant)
1217
1218 CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full)
1219
1220 CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1221
1222
1223
1224
1225
1226
1227 if self.op.start:
1228 hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1229 self.op.hvparams)
1230 CheckNodeFreeMemory(self, self.pnode.uuid,
1231 "creating instance %s" % self.op.instance_name,
1232 self.be_full[constants.BE_MAXMEM],
1233 self.op.hypervisor, hvfull)
1234
1235 self.dry_run_result = list(node_uuids)
1236
1237 - def Exec(self, feedback_fn):
1238 """Create and add the instance to the cluster.
1239
1240 """
1241 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1242 self.owned_locks(locking.LEVEL_NODE)), \
1243 "Node locks differ from node resource locks"
1244 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1245
1246 ht_kind = self.op.hypervisor
1247 if ht_kind in constants.HTS_REQ_PORT:
1248 network_port = self.cfg.AllocatePort()
1249 else:
1250 network_port = None
1251
1252 instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1253
1254
1255
1256
1257 nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1258 disks = GenerateDiskTemplate(self,
1259 self.op.disk_template,
1260 instance_uuid, self.pnode.uuid,
1261 self.secondaries,
1262 self.disks,
1263 self.instance_file_storage_dir,
1264 self.op.file_driver,
1265 0,
1266 feedback_fn,
1267 self.cfg.GetGroupDiskParams(nodegroup))
1268
1269 iobj = objects.Instance(name=self.op.instance_name,
1270 uuid=instance_uuid,
1271 os=self.op.os_type,
1272 primary_node=self.pnode.uuid,
1273 nics=self.nics, disks=disks,
1274 disk_template=self.op.disk_template,
1275 disks_active=False,
1276 admin_state=constants.ADMINST_DOWN,
1277 network_port=network_port,
1278 beparams=self.op.beparams,
1279 hvparams=self.op.hvparams,
1280 hypervisor=self.op.hypervisor,
1281 osparams=self.op.osparams,
1282 )
1283
1284 if self.op.tags:
1285 for tag in self.op.tags:
1286 iobj.AddTag(tag)
1287
1288 if self.adopt_disks:
1289 if self.op.disk_template == constants.DT_PLAIN:
1290
1291
1292 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1293 rename_to = []
1294 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1295 rename_to.append(t_dsk.logical_id)
1296 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1297 self.cfg.SetDiskID(t_dsk, self.pnode.uuid)
1298 result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1299 zip(tmp_disks, rename_to))
1300 result.Raise("Failed to rename adoped LVs")
1301 else:
1302 feedback_fn("* creating instance disks...")
1303 try:
1304 CreateDisks(self, iobj)
1305 except errors.OpExecError:
1306 self.LogWarning("Device creation failed")
1307 self.cfg.ReleaseDRBDMinors(self.op.instance_name)
1308 raise
1309
1310 feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1311
1312 self.cfg.AddInstance(iobj, self.proc.GetECId())
1313
1314
1315
1316 del self.remove_locks[locking.LEVEL_INSTANCE]
1317
1318 if self.op.mode == constants.INSTANCE_IMPORT:
1319
1320 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1321 else:
1322
1323 ReleaseLocks(self, locking.LEVEL_NODE)
1324
1325 disk_abort = False
1326 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1327 feedback_fn("* wiping instance disks...")
1328 try:
1329 WipeDisks(self, iobj)
1330 except errors.OpExecError, err:
1331 logging.exception("Wiping disks failed")
1332 self.LogWarning("Wiping instance disks failed (%s)", err)
1333 disk_abort = True
1334
1335 if disk_abort:
1336
1337 pass
1338 elif self.op.wait_for_sync:
1339 disk_abort = not WaitForSync(self, iobj)
1340 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1341
1342 feedback_fn("* checking mirrors status")
1343 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1344 else:
1345 disk_abort = False
1346
1347 if disk_abort:
1348 RemoveDisks(self, iobj)
1349 self.cfg.RemoveInstance(iobj.uuid)
1350
1351 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1352 raise errors.OpExecError("There are some degraded disks for"
1353 " this instance")
1354
1355
1356 iobj.disks_active = True
1357
1358
1359 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1360
1361 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1362
1363
1364
1365 for disk in iobj.disks:
1366 self.cfg.SetDiskID(disk, self.pnode.uuid)
1367 if self.op.mode == constants.INSTANCE_CREATE:
1368 if not self.op.no_install:
1369 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1370 not self.op.wait_for_sync)
1371 if pause_sync:
1372 feedback_fn("* pausing disk sync to install instance OS")
1373 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1374 (iobj.disks,
1375 iobj), True)
1376 for idx, success in enumerate(result.payload):
1377 if not success:
1378 logging.warn("pause-sync of instance %s for disk %d failed",
1379 self.op.instance_name, idx)
1380
1381 feedback_fn("* running the instance OS create scripts...")
1382
1383 os_add_result = \
1384 self.rpc.call_instance_os_add(self.pnode.uuid, (iobj, None), False,
1385 self.op.debug_level)
1386 if pause_sync:
1387 feedback_fn("* resuming disk sync")
1388 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1389 (iobj.disks,
1390 iobj), False)
1391 for idx, success in enumerate(result.payload):
1392 if not success:
1393 logging.warn("resume-sync of instance %s for disk %d failed",
1394 self.op.instance_name, idx)
1395
1396 os_add_result.Raise("Could not add os for instance %s"
1397 " on node %s" % (self.op.instance_name,
1398 self.pnode.name))
1399
1400 else:
1401 if self.op.mode == constants.INSTANCE_IMPORT:
1402 feedback_fn("* running the instance OS import scripts...")
1403
1404 transfers = []
1405
1406 for idx, image in enumerate(self.src_images):
1407 if not image:
1408 continue
1409
1410
1411 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1412 constants.IEIO_FILE, (image, ),
1413 constants.IEIO_SCRIPT,
1414 (iobj.disks[idx], idx),
1415 None)
1416 transfers.append(dt)
1417
1418 import_result = \
1419 masterd.instance.TransferInstanceData(self, feedback_fn,
1420 self.op.src_node_uuid,
1421 self.pnode.uuid,
1422 self.pnode.secondary_ip,
1423 iobj, transfers)
1424 if not compat.all(import_result):
1425 self.LogWarning("Some disks for instance %s on node %s were not"
1426 " imported successfully" % (self.op.instance_name,
1427 self.pnode.name))
1428
1429 rename_from = self._old_instance_name
1430
1431 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1432 feedback_fn("* preparing remote import...")
1433
1434
1435
1436
1437 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1438 self.op.source_shutdown_timeout)
1439 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1440
1441 assert iobj.primary_node == self.pnode.uuid
1442 disk_results = \
1443 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1444 self.source_x509_ca,
1445 self._cds, timeouts)
1446 if not compat.all(disk_results):
1447
1448
1449 self.LogWarning("Some disks for instance %s on node %s were not"
1450 " imported successfully" % (self.op.instance_name,
1451 self.pnode.name))
1452
1453 rename_from = self.source_instance_name
1454
1455 else:
1456
1457 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1458 % self.op.mode)
1459
1460
1461 assert iobj.name == self.op.instance_name
1462 feedback_fn("Running rename script for %s" % self.op.instance_name)
1463 result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1464 rename_from,
1465 self.op.debug_level)
1466 result.Warn("Failed to run rename script for %s on node %s" %
1467 (self.op.instance_name, self.pnode.name), self.LogWarning)
1468
1469 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1470
1471 if self.op.start:
1472 iobj.admin_state = constants.ADMINST_UP
1473 self.cfg.Update(iobj, feedback_fn)
1474 logging.info("Starting instance %s on node %s", self.op.instance_name,
1475 self.pnode.name)
1476 feedback_fn("* starting instance...")
1477 result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1478 False, self.op.reason)
1479 result.Raise("Could not start instance")
1480
1481 return self.cfg.GetNodeNames(list(iobj.all_nodes))
1482
1485 """Rename an instance.
1486
1487 """
1488 HPATH = "instance-rename"
1489 HTYPE = constants.HTYPE_INSTANCE
1490
1492 """Check arguments.
1493
1494 """
1495 if self.op.ip_check and not self.op.name_check:
1496
1497 raise errors.OpPrereqError("IP address check requires a name check",
1498 errors.ECODE_INVAL)
1499
1501 """Build hooks env.
1502
1503 This runs on master, primary and secondary nodes of the instance.
1504
1505 """
1506 env = BuildInstanceHookEnvByObject(self, self.instance)
1507 env["INSTANCE_NEW_NAME"] = self.op.new_name
1508 return env
1509
1516
1518 """Check prerequisites.
1519
1520 This checks that the instance is in the cluster and is not running.
1521
1522 """
1523 (self.op.instance_uuid, self.op.instance_name) = \
1524 ExpandInstanceUuidAndName(self.cfg, self.op.instance_uuid,
1525 self.op.instance_name)
1526 instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1527 assert instance is not None
1528
1529
1530
1531
1532 if (instance.disk_template in constants.DTS_FILEBASED and
1533 self.op.new_name != instance.name):
1534 CheckDiskTemplateEnabled(self.cfg.GetClusterInfo(),
1535 instance.disk_template)
1536
1537 CheckNodeOnline(self, instance.primary_node)
1538 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1539 msg="cannot rename")
1540 self.instance = instance
1541
1542 new_name = self.op.new_name
1543 if self.op.name_check:
1544 hostname = _CheckHostnameSane(self, new_name)
1545 new_name = self.op.new_name = hostname.name
1546 if (self.op.ip_check and
1547 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1548 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1549 (hostname.ip, new_name),
1550 errors.ECODE_NOTUNIQUE)
1551
1552 instance_names = [inst.name for
1553 inst in self.cfg.GetAllInstancesInfo().values()]
1554 if new_name in instance_names and new_name != instance.name:
1555 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1556 new_name, errors.ECODE_EXISTS)
1557
1558 - def Exec(self, feedback_fn):
1559 """Rename the instance.
1560
1561 """
1562 old_name = self.instance.name
1563
1564 rename_file_storage = False
1565 if (self.instance.disk_template in constants.DTS_FILEBASED and
1566 self.op.new_name != self.instance.name):
1567 old_file_storage_dir = os.path.dirname(
1568 self.instance.disks[0].logical_id[1])
1569 rename_file_storage = True
1570
1571 self.cfg.RenameInstance(self.instance.uuid, self.op.new_name)
1572
1573
1574 assert self.REQ_BGL
1575 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1576 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1577 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1578
1579
1580 renamed_inst = self.cfg.GetInstanceInfo(self.instance.uuid)
1581
1582 if rename_file_storage:
1583 new_file_storage_dir = os.path.dirname(
1584 renamed_inst.disks[0].logical_id[1])
1585 result = self.rpc.call_file_storage_dir_rename(renamed_inst.primary_node,
1586 old_file_storage_dir,
1587 new_file_storage_dir)
1588 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1589 " (but the instance has been renamed in Ganeti)" %
1590 (self.cfg.GetNodeName(renamed_inst.primary_node),
1591 old_file_storage_dir, new_file_storage_dir))
1592
1593 StartInstanceDisks(self, renamed_inst, None)
1594
1595 info = GetInstanceInfoText(renamed_inst)
1596 for (idx, disk) in enumerate(renamed_inst.disks):
1597 for node_uuid in renamed_inst.all_nodes:
1598 self.cfg.SetDiskID(disk, node_uuid)
1599 result = self.rpc.call_blockdev_setinfo(node_uuid, disk, info)
1600 result.Warn("Error setting info on node %s for disk %s" %
1601 (self.cfg.GetNodeName(node_uuid), idx), self.LogWarning)
1602 try:
1603 result = self.rpc.call_instance_run_rename(renamed_inst.primary_node,
1604 renamed_inst, old_name,
1605 self.op.debug_level)
1606 result.Warn("Could not run OS rename script for instance %s on node %s"
1607 " (but the instance has been renamed in Ganeti)" %
1608 (renamed_inst.name,
1609 self.cfg.GetNodeName(renamed_inst.primary_node)),
1610 self.LogWarning)
1611 finally:
1612 ShutdownInstanceDisks(self, renamed_inst)
1613
1614 return renamed_inst.name
1615
1618 """Remove an instance.
1619
1620 """
1621 HPATH = "instance-remove"
1622 HTYPE = constants.HTYPE_INSTANCE
1623 REQ_BGL = False
1624
1630
1638
1640 """Build hooks env.
1641
1642 This runs on master, primary and secondary nodes of the instance.
1643
1644 """
1645 env = BuildInstanceHookEnvByObject(self, self.instance)
1646 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1647 return env
1648
1650 """Build hooks nodes.
1651
1652 """
1653 nl = [self.cfg.GetMasterNode()]
1654 nl_post = list(self.instance.all_nodes) + nl
1655 return (nl, nl_post)
1656
1658 """Check prerequisites.
1659
1660 This checks that the instance is in the cluster.
1661
1662 """
1663 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1664 assert self.instance is not None, \
1665 "Cannot retrieve locked instance %s" % self.op.instance_name
1666
1667 - def Exec(self, feedback_fn):
1668 """Remove the instance.
1669
1670 """
1671 logging.info("Shutting down instance %s on node %s", self.instance.name,
1672 self.cfg.GetNodeName(self.instance.primary_node))
1673
1674 result = self.rpc.call_instance_shutdown(self.instance.primary_node,
1675 self.instance,
1676 self.op.shutdown_timeout,
1677 self.op.reason)
1678 if self.op.ignore_failures:
1679 result.Warn("Warning: can't shutdown instance", feedback_fn)
1680 else:
1681 result.Raise("Could not shutdown instance %s on node %s" %
1682 (self.instance.name,
1683 self.cfg.GetNodeName(self.instance.primary_node)))
1684
1685 assert (self.owned_locks(locking.LEVEL_NODE) ==
1686 self.owned_locks(locking.LEVEL_NODE_RES))
1687 assert not (set(self.instance.all_nodes) -
1688 self.owned_locks(locking.LEVEL_NODE)), \
1689 "Not owning correct locks"
1690
1691 RemoveInstance(self, feedback_fn, self.instance, self.op.ignore_failures)
1692
1695 """Move an instance by data-copying.
1696
1697 """
1698 HPATH = "instance-move"
1699 HTYPE = constants.HTYPE_INSTANCE
1700 REQ_BGL = False
1701
1710
1718
1720 """Build hooks env.
1721
1722 This runs on master, primary and secondary nodes of the instance.
1723
1724 """
1725 env = {
1726 "TARGET_NODE": self.op.target_node,
1727 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1728 }
1729 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1730 return env
1731
1733 """Build hooks nodes.
1734
1735 """
1736 nl = [
1737 self.cfg.GetMasterNode(),
1738 self.instance.primary_node,
1739 self.op.target_node_uuid,
1740 ]
1741 return (nl, nl)
1742
1744 """Check prerequisites.
1745
1746 This checks that the instance is in the cluster.
1747
1748 """
1749 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1750 assert self.instance is not None, \
1751 "Cannot retrieve locked instance %s" % self.op.instance_name
1752
1753 if self.instance.disk_template not in constants.DTS_COPYABLE:
1754 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1755 self.instance.disk_template,
1756 errors.ECODE_STATE)
1757
1758 target_node = self.cfg.GetNodeInfo(self.op.target_node_uuid)
1759 assert target_node is not None, \
1760 "Cannot retrieve locked node %s" % self.op.target_node
1761
1762 self.target_node_uuid = target_node.uuid
1763 if target_node.uuid == self.instance.primary_node:
1764 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1765 (self.instance.name, target_node.name),
1766 errors.ECODE_STATE)
1767
1768 bep = self.cfg.GetClusterInfo().FillBE(self.instance)
1769
1770 for idx, dsk in enumerate(self.instance.disks):
1771 if dsk.dev_type not in (constants.DT_PLAIN, constants.DT_FILE,
1772 constants.DT_SHARED_FILE):
1773 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1774 " cannot copy" % idx, errors.ECODE_STATE)
1775
1776 CheckNodeOnline(self, target_node.uuid)
1777 CheckNodeNotDrained(self, target_node.uuid)
1778 CheckNodeVmCapable(self, target_node.uuid)
1779 cluster = self.cfg.GetClusterInfo()
1780 group_info = self.cfg.GetNodeGroup(target_node.group)
1781 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1782 CheckTargetNodeIPolicy(self, ipolicy, self.instance, target_node, self.cfg,
1783 ignore=self.op.ignore_ipolicy)
1784
1785 if self.instance.admin_state == constants.ADMINST_UP:
1786
1787 CheckNodeFreeMemory(
1788 self, target_node.uuid, "failing over instance %s" %
1789 self.instance.name, bep[constants.BE_MAXMEM],
1790 self.instance.hypervisor,
1791 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1792 else:
1793 self.LogInfo("Not checking memory on the secondary node as"
1794 " instance will not be started")
1795
1796
1797 CheckInstanceBridgesExist(self, self.instance, node_uuid=target_node.uuid)
1798
1799 - def Exec(self, feedback_fn):
1800 """Move an instance.
1801
1802 The move is done by shutting it down on its present node, copying
1803 the data over (slow) and starting it on the new node.
1804
1805 """
1806 source_node = self.cfg.GetNodeInfo(self.instance.primary_node)
1807 target_node = self.cfg.GetNodeInfo(self.target_node_uuid)
1808
1809 self.LogInfo("Shutting down instance %s on source node %s",
1810 self.instance.name, source_node.name)
1811
1812 assert (self.owned_locks(locking.LEVEL_NODE) ==
1813 self.owned_locks(locking.LEVEL_NODE_RES))
1814
1815 result = self.rpc.call_instance_shutdown(source_node.uuid, self.instance,
1816 self.op.shutdown_timeout,
1817 self.op.reason)
1818 if self.op.ignore_consistency:
1819 result.Warn("Could not shutdown instance %s on node %s. Proceeding"
1820 " anyway. Please make sure node %s is down. Error details" %
1821 (self.instance.name, source_node.name, source_node.name),
1822 self.LogWarning)
1823 else:
1824 result.Raise("Could not shutdown instance %s on node %s" %
1825 (self.instance.name, source_node.name))
1826
1827
1828 try:
1829 CreateDisks(self, self.instance, target_node_uuid=target_node.uuid)
1830 except errors.OpExecError:
1831 self.LogWarning("Device creation failed")
1832 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1833 raise
1834
1835 cluster_name = self.cfg.GetClusterInfo().cluster_name
1836
1837 errs = []
1838
1839 for idx, disk in enumerate(self.instance.disks):
1840 self.LogInfo("Copying data for disk %d", idx)
1841 result = self.rpc.call_blockdev_assemble(
1842 target_node.uuid, (disk, self.instance), self.instance.name,
1843 True, idx)
1844 if result.fail_msg:
1845 self.LogWarning("Can't assemble newly created disk %d: %s",
1846 idx, result.fail_msg)
1847 errs.append(result.fail_msg)
1848 break
1849 dev_path = result.payload
1850 result = self.rpc.call_blockdev_export(source_node.uuid, (disk,
1851 self.instance),
1852 target_node.secondary_ip,
1853 dev_path, cluster_name)
1854 if result.fail_msg:
1855 self.LogWarning("Can't copy data over for disk %d: %s",
1856 idx, result.fail_msg)
1857 errs.append(result.fail_msg)
1858 break
1859
1860 if errs:
1861 self.LogWarning("Some disks failed to copy, aborting")
1862 try:
1863 RemoveDisks(self, self.instance, target_node_uuid=target_node.uuid)
1864 finally:
1865 self.cfg.ReleaseDRBDMinors(self.instance.uuid)
1866 raise errors.OpExecError("Errors during disk copy: %s" %
1867 (",".join(errs),))
1868
1869 self.instance.primary_node = target_node.uuid
1870 self.cfg.Update(self.instance, feedback_fn)
1871
1872 self.LogInfo("Removing the disks on the original node")
1873 RemoveDisks(self, self.instance, target_node_uuid=source_node.uuid)
1874
1875
1876 if self.instance.admin_state == constants.ADMINST_UP:
1877 self.LogInfo("Starting instance %s on node %s",
1878 self.instance.name, target_node.name)
1879
1880 disks_ok, _ = AssembleInstanceDisks(self, self.instance,
1881 ignore_secondaries=True)
1882 if not disks_ok:
1883 ShutdownInstanceDisks(self, self.instance)
1884 raise errors.OpExecError("Can't activate the instance's disks")
1885
1886 result = self.rpc.call_instance_start(target_node.uuid,
1887 (self.instance, None, None), False,
1888 self.op.reason)
1889 msg = result.fail_msg
1890 if msg:
1891 ShutdownInstanceDisks(self, self.instance)
1892 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1893 (self.instance.name, target_node.name, msg))
1894
1897 """Allocates multiple instances at the same time.
1898
1899 """
1900 REQ_BGL = False
1901
1903 """Check arguments.
1904
1905 """
1906 nodes = []
1907 for inst in self.op.instances:
1908 if inst.iallocator is not None:
1909 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1910 " instance objects", errors.ECODE_INVAL)
1911 nodes.append(bool(inst.pnode))
1912 if inst.disk_template in constants.DTS_INT_MIRROR:
1913 nodes.append(bool(inst.snode))
1914
1915 has_nodes = compat.any(nodes)
1916 if compat.all(nodes) ^ has_nodes:
1917 raise errors.OpPrereqError("There are instance objects providing"
1918 " pnode/snode while others do not",
1919 errors.ECODE_INVAL)
1920
1921 if not has_nodes and self.op.iallocator is None:
1922 default_iallocator = self.cfg.GetDefaultIAllocator()
1923 if default_iallocator:
1924 self.op.iallocator = default_iallocator
1925 else:
1926 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1927 " given and no cluster-wide default"
1928 " iallocator found; please specify either"
1929 " an iallocator or nodes on the instances"
1930 " or set a cluster-wide default iallocator",
1931 errors.ECODE_INVAL)
1932
1933 _CheckOpportunisticLocking(self.op)
1934
1935 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1936 if dups:
1937 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1938 utils.CommaJoin(dups), errors.ECODE_INVAL)
1939
1941 """Calculate the locks.
1942
1943 """
1944 self.share_locks = ShareAll()
1945 self.needed_locks = {
1946
1947
1948 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1949 }
1950
1951 if self.op.iallocator:
1952 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1953 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1954
1955 if self.op.opportunistic_locking:
1956 self.opportunistic_locks[locking.LEVEL_NODE] = True
1957 else:
1958 nodeslist = []
1959 for inst in self.op.instances:
1960 (inst.pnode_uuid, inst.pnode) = \
1961 ExpandNodeUuidAndName(self.cfg, inst.pnode_uuid, inst.pnode)
1962 nodeslist.append(inst.pnode_uuid)
1963 if inst.snode is not None:
1964 (inst.snode_uuid, inst.snode) = \
1965 ExpandNodeUuidAndName(self.cfg, inst.snode_uuid, inst.snode)
1966 nodeslist.append(inst.snode_uuid)
1967
1968 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1969
1970
1971 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1972
1974 if level == locking.LEVEL_NODE_RES and \
1975 self.opportunistic_locks[locking.LEVEL_NODE]:
1976
1977
1978 self.needed_locks[locking.LEVEL_NODE_RES] = \
1979 self.owned_locks(locking.LEVEL_NODE)
1980
1982 """Check prerequisite.
1983
1984 """
1985 if self.op.iallocator:
1986 cluster = self.cfg.GetClusterInfo()
1987 default_vg = self.cfg.GetVGName()
1988 ec_id = self.proc.GetECId()
1989
1990 if self.op.opportunistic_locking:
1991
1992 node_whitelist = self.cfg.GetNodeNames(
1993 list(self.owned_locks(locking.LEVEL_NODE)))
1994 else:
1995 node_whitelist = None
1996
1997 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1998 _ComputeNics(op, cluster, None,
1999 self.cfg, ec_id),
2000 _ComputeFullBeParams(op, cluster),
2001 node_whitelist)
2002 for op in self.op.instances]
2003
2004 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
2005 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
2006
2007 ial.Run(self.op.iallocator)
2008
2009 if not ial.success:
2010 raise errors.OpPrereqError("Can't compute nodes using"
2011 " iallocator '%s': %s" %
2012 (self.op.iallocator, ial.info),
2013 errors.ECODE_NORES)
2014
2015 self.ia_result = ial.result
2016
2017 if self.op.dry_run:
2018 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
2019 constants.JOB_IDS_KEY: [],
2020 })
2021
2037
2038 - def Exec(self, feedback_fn):
2039 """Executes the opcode.
2040
2041 """
2042 jobs = []
2043 if self.op.iallocator:
2044 op2inst = dict((op.instance_name, op) for op in self.op.instances)
2045 (allocatable, failed) = self.ia_result
2046
2047 for (name, node_names) in allocatable:
2048 op = op2inst.pop(name)
2049
2050 (op.pnode_uuid, op.pnode) = \
2051 ExpandNodeUuidAndName(self.cfg, None, node_names[0])
2052 if len(node_names) > 1:
2053 (op.snode_uuid, op.snode) = \
2054 ExpandNodeUuidAndName(self.cfg, None, node_names[1])
2055
2056 jobs.append([op])
2057
2058 missing = set(op2inst.keys()) - set(failed)
2059 assert not missing, \
2060 "Iallocator did return incomplete result: %s" % \
2061 utils.CommaJoin(missing)
2062 else:
2063 jobs.extend([op] for op in self.op.instances)
2064
2065 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2066
2069 """Data structure for network interface modifications.
2070
2071 Used by L{LUInstanceSetParams}.
2072
2073 """
2075 self.params = None
2076 self.filled = None
2077
2080 """Prepares a list of container modifications by adding a private data field.
2081
2082 @type mods: list of tuples; (operation, index, parameters)
2083 @param mods: List of modifications
2084 @type private_fn: callable or None
2085 @param private_fn: Callable for constructing a private data field for a
2086 modification
2087 @rtype: list
2088
2089 """
2090 if private_fn is None:
2091 fn = lambda: None
2092 else:
2093 fn = private_fn
2094
2095 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2096
2099 """Checks if nodes have enough physical CPUs
2100
2101 This function checks if all given nodes have the needed number of
2102 physical CPUs. In case any node has less CPUs or we cannot get the
2103 information from the node, this function raises an OpPrereqError
2104 exception.
2105
2106 @type lu: C{LogicalUnit}
2107 @param lu: a logical unit from which we get configuration data
2108 @type node_uuids: C{list}
2109 @param node_uuids: the list of node UUIDs to check
2110 @type requested: C{int}
2111 @param requested: the minimum acceptable number of physical CPUs
2112 @type hypervisor_specs: list of pairs (string, dict of strings)
2113 @param hypervisor_specs: list of hypervisor specifications in
2114 pairs (hypervisor_name, hvparams)
2115 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2116 or we cannot check the node
2117
2118 """
2119 nodeinfo = lu.rpc.call_node_info(node_uuids, None, hypervisor_specs)
2120 for node_uuid in node_uuids:
2121 info = nodeinfo[node_uuid]
2122 node_name = lu.cfg.GetNodeName(node_uuid)
2123 info.Raise("Cannot get current information from node %s" % node_name,
2124 prereq=True, ecode=errors.ECODE_ENVIRON)
2125 (_, _, (hv_info, )) = info.payload
2126 num_cpus = hv_info.get("cpu_total", None)
2127 if not isinstance(num_cpus, int):
2128 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2129 " on node %s, result was '%s'" %
2130 (node_name, num_cpus), errors.ECODE_ENVIRON)
2131 if requested > num_cpus:
2132 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2133 "required" % (node_name, num_cpus, requested),
2134 errors.ECODE_NORES)
2135
2138 """Return the item refered by the identifier.
2139
2140 @type identifier: string
2141 @param identifier: Item index or name or UUID
2142 @type kind: string
2143 @param kind: One-word item description
2144 @type container: list
2145 @param container: Container to get the item from
2146
2147 """
2148
2149 try:
2150 idx = int(identifier)
2151 if idx == -1:
2152
2153 absidx = len(container) - 1
2154 elif idx < 0:
2155 raise IndexError("Not accepting negative indices other than -1")
2156 elif idx > len(container):
2157 raise IndexError("Got %s index %s, but there are only %s" %
2158 (kind, idx, len(container)))
2159 else:
2160 absidx = idx
2161 return (absidx, container[idx])
2162 except ValueError:
2163 pass
2164
2165 for idx, item in enumerate(container):
2166 if item.uuid == identifier or item.name == identifier:
2167 return (idx, item)
2168
2169 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2170 (kind, identifier), errors.ECODE_NOENT)
2171
2172
2173 -def _ApplyContainerMods(kind, container, chgdesc, mods,
2174 create_fn, modify_fn, remove_fn):
2175 """Applies descriptions in C{mods} to C{container}.
2176
2177 @type kind: string
2178 @param kind: One-word item description
2179 @type container: list
2180 @param container: Container to modify
2181 @type chgdesc: None or list
2182 @param chgdesc: List of applied changes
2183 @type mods: list
2184 @param mods: Modifications as returned by L{_PrepareContainerMods}
2185 @type create_fn: callable
2186 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2187 receives absolute item index, parameters and private data object as added
2188 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2189 as list
2190 @type modify_fn: callable
2191 @param modify_fn: Callback for modifying an existing item
2192 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2193 and private data object as added by L{_PrepareContainerMods}, returns
2194 changes as list
2195 @type remove_fn: callable
2196 @param remove_fn: Callback on removing item; receives absolute item index,
2197 item and private data object as added by L{_PrepareContainerMods}
2198
2199 """
2200 for (op, identifier, params, private) in mods:
2201 changes = None
2202
2203 if op == constants.DDM_ADD:
2204
2205
2206 try:
2207 idx = int(identifier)
2208 except ValueError:
2209 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2210 " identifier for %s" % constants.DDM_ADD,
2211 errors.ECODE_INVAL)
2212 if idx == -1:
2213 addidx = len(container)
2214 else:
2215 if idx < 0:
2216 raise IndexError("Not accepting negative indices other than -1")
2217 elif idx > len(container):
2218 raise IndexError("Got %s index %s, but there are only %s" %
2219 (kind, idx, len(container)))
2220 addidx = idx
2221
2222 if create_fn is None:
2223 item = params
2224 else:
2225 (item, changes) = create_fn(addidx, params, private)
2226
2227 if idx == -1:
2228 container.append(item)
2229 else:
2230 assert idx >= 0
2231 assert idx <= len(container)
2232
2233 container.insert(idx, item)
2234 else:
2235
2236 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2237
2238 if op == constants.DDM_REMOVE:
2239 assert not params
2240
2241 if remove_fn is not None:
2242 remove_fn(absidx, item, private)
2243
2244 changes = [("%s/%s" % (kind, absidx), "remove")]
2245
2246 assert container[absidx] == item
2247 del container[absidx]
2248 elif op == constants.DDM_MODIFY:
2249 if modify_fn is not None:
2250 changes = modify_fn(absidx, item, params, private)
2251 else:
2252 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2253
2254 assert _TApplyContModsCbChanges(changes)
2255
2256 if not (chgdesc is None or changes is None):
2257 chgdesc.extend(changes)
2258
2261 """Updates the C{iv_name} attribute of disks.
2262
2263 @type disks: list of L{objects.Disk}
2264
2265 """
2266 for (idx, disk) in enumerate(disks):
2267 disk.iv_name = "disk/%s" % (base_index + idx, )
2268
2271 """Modifies an instances's parameters.
2272
2273 """
2274 HPATH = "instance-modify"
2275 HTYPE = constants.HTYPE_INSTANCE
2276 REQ_BGL = False
2277
2278 @staticmethod
2280 assert ht.TList(mods)
2281 assert not mods or len(mods[0]) in (2, 3)
2282
2283 if mods and len(mods[0]) == 2:
2284 result = []
2285
2286 addremove = 0
2287 for op, params in mods:
2288 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2289 result.append((op, -1, params))
2290 addremove += 1
2291
2292 if addremove > 1:
2293 raise errors.OpPrereqError("Only one %s add or remove operation is"
2294 " supported at a time" % kind,
2295 errors.ECODE_INVAL)
2296 else:
2297 result.append((constants.DDM_MODIFY, op, params))
2298
2299 assert verify_fn(result)
2300 else:
2301 result = mods
2302
2303 return result
2304
2305 @staticmethod
2327
2329 """Verifies a disk modification.
2330
2331 """
2332 if op == constants.DDM_ADD:
2333 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2334 if mode not in constants.DISK_ACCESS_SET:
2335 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2336 errors.ECODE_INVAL)
2337
2338 size = params.get(constants.IDISK_SIZE, None)
2339 if size is None:
2340 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2341 constants.IDISK_SIZE, errors.ECODE_INVAL)
2342
2343 try:
2344 size = int(size)
2345 except (TypeError, ValueError), err:
2346 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2347 errors.ECODE_INVAL)
2348
2349 params[constants.IDISK_SIZE] = size
2350 name = params.get(constants.IDISK_NAME, None)
2351 if name is not None and name.lower() == constants.VALUE_NONE:
2352 params[constants.IDISK_NAME] = None
2353
2354 CheckSpindlesExclusiveStorage(params, excl_stor, True)
2355
2356 elif op == constants.DDM_MODIFY:
2357 if constants.IDISK_SIZE in params:
2358 raise errors.OpPrereqError("Disk size change not possible, use"
2359 " grow-disk", errors.ECODE_INVAL)
2360
2361
2362
2363 if self.instance.disk_template != constants.DT_EXT:
2364 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2365
2366 name = params.get(constants.IDISK_NAME, None)
2367 if name is not None and name.lower() == constants.VALUE_NONE:
2368 params[constants.IDISK_NAME] = None
2369
2370 @staticmethod
2372 """Verifies a network interface modification.
2373
2374 """
2375 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2376 ip = params.get(constants.INIC_IP, None)
2377 name = params.get(constants.INIC_NAME, None)
2378 req_net = params.get(constants.INIC_NETWORK, None)
2379 link = params.get(constants.NIC_LINK, None)
2380 mode = params.get(constants.NIC_MODE, None)
2381 if name is not None and name.lower() == constants.VALUE_NONE:
2382 params[constants.INIC_NAME] = None
2383 if req_net is not None:
2384 if req_net.lower() == constants.VALUE_NONE:
2385 params[constants.INIC_NETWORK] = None
2386 req_net = None
2387 elif link is not None or mode is not None:
2388 raise errors.OpPrereqError("If network is given"
2389 " mode or link should not",
2390 errors.ECODE_INVAL)
2391
2392 if op == constants.DDM_ADD:
2393 macaddr = params.get(constants.INIC_MAC, None)
2394 if macaddr is None:
2395 params[constants.INIC_MAC] = constants.VALUE_AUTO
2396
2397 if ip is not None:
2398 if ip.lower() == constants.VALUE_NONE:
2399 params[constants.INIC_IP] = None
2400 else:
2401 if ip.lower() == constants.NIC_IP_POOL:
2402 if op == constants.DDM_ADD and req_net is None:
2403 raise errors.OpPrereqError("If ip=pool, parameter network"
2404 " cannot be none",
2405 errors.ECODE_INVAL)
2406 else:
2407 if not netutils.IPAddress.IsValid(ip):
2408 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2409 errors.ECODE_INVAL)
2410
2411 if constants.INIC_MAC in params:
2412 macaddr = params[constants.INIC_MAC]
2413 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2414 macaddr = utils.NormalizeAndValidateMac(macaddr)
2415
2416 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2417 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2418 " modifying an existing NIC",
2419 errors.ECODE_INVAL)
2420
2422 if not (self.op.nics or self.op.disks or self.op.disk_template or
2423 self.op.hvparams or self.op.beparams or self.op.os_name or
2424 self.op.osparams or self.op.offline is not None or
2425 self.op.runtime_mem or self.op.pnode):
2426 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2427
2428 if self.op.hvparams:
2429 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2430 "hypervisor", "instance", "cluster")
2431
2432 self.op.disks = self._UpgradeDiskNicMods(
2433 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2434 self.op.nics = self._UpgradeDiskNicMods(
2435 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2436
2437 if self.op.disks and self.op.disk_template is not None:
2438 raise errors.OpPrereqError("Disk template conversion and other disk"
2439 " changes not supported at the same time",
2440 errors.ECODE_INVAL)
2441
2442 if (self.op.disk_template and
2443 self.op.disk_template in constants.DTS_INT_MIRROR and
2444 self.op.remote_node is None):
2445 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2446 " one requires specifying a secondary node",
2447 errors.ECODE_INVAL)
2448
2449
2450 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2451 self._VerifyNicModification)
2452
2453 if self.op.pnode:
2454 (self.op.pnode_uuid, self.op.pnode) = \
2455 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
2456
2467
2469 if level == locking.LEVEL_NODEGROUP:
2470 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2471
2472
2473 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2474 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
2475 elif level == locking.LEVEL_NODE:
2476 self._LockInstancesNodes()
2477 if self.op.disk_template and self.op.remote_node:
2478 (self.op.remote_node_uuid, self.op.remote_node) = \
2479 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
2480 self.op.remote_node)
2481 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
2482 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2483
2484 self.needed_locks[locking.LEVEL_NODE_RES] = \
2485 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2486