1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 """Logical units dealing with instances."""
23
24 import OpenSSL
25 import copy
26 import logging
27 import os
28
29 from ganeti import compat
30 from ganeti import constants
31 from ganeti import errors
32 from ganeti import ht
33 from ganeti import hypervisor
34 from ganeti import locking
35 from ganeti.masterd import iallocator
36 from ganeti import masterd
37 from ganeti import netutils
38 from ganeti import objects
39 from ganeti import opcodes
40 from ganeti import pathutils
41 from ganeti import rpc
42 from ganeti import utils
43
44 from ganeti.cmdlib.base import NoHooksLU, LogicalUnit, ResultWithJobs
45
46 from ganeti.cmdlib.common import INSTANCE_DOWN, \
47 INSTANCE_NOT_RUNNING, CAN_CHANGE_INSTANCE_OFFLINE, CheckNodeOnline, \
48 ShareAll, GetDefaultIAllocator, CheckInstanceNodeGroups, \
49 LoadNodeEvacResult, CheckIAllocatorOrNode, CheckParamsNotGlobal, \
50 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
51 AnnotateDiskParams, GetUpdatedParams, ExpandInstanceName, \
52 ComputeIPolicySpecViolation, CheckInstanceState, ExpandNodeName
53 from ganeti.cmdlib.instance_storage import CreateDisks, \
54 CheckNodesFreeDiskPerVG, WipeDisks, WipeOrCleanupDisks, WaitForSync, \
55 IsExclusiveStorageEnabledNodeName, CreateSingleBlockDev, ComputeDisks, \
56 CheckRADOSFreeSpace, ComputeDiskSizePerVG, GenerateDiskTemplate, \
57 StartInstanceDisks, ShutdownInstanceDisks, AssembleInstanceDisks
58 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
59 GetClusterDomainSecret, BuildInstanceHookEnv, NICListToTuple, \
60 NICToTuple, CheckNodeNotDrained, RemoveInstance, CopyLockList, \
61 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
62 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
63 CheckInstanceBridgesExist, CheckNicsBridgesExist, CheckNodeHasOS
64
65 import ganeti.masterd.instance
66
67
68
69
70 _TApplyContModsCbChanges = \
71 ht.TMaybeListOf(ht.TAnd(ht.TIsLength(2), ht.TItems([
72 ht.TNonEmptyString,
73 ht.TAny,
74 ])))
78 """Ensures that a given hostname resolves to a 'sane' name.
79
80 The given name is required to be a prefix of the resolved hostname,
81 to prevent accidental mismatches.
82
83 @param lu: the logical unit on behalf of which we're checking
84 @param name: the name we should resolve and check
85 @return: the resolved hostname object
86
87 """
88 hostname = netutils.GetHostname(name=name)
89 if hostname.name != name:
90 lu.LogInfo("Resolved given name '%s' to '%s'", name, hostname.name)
91 if not utils.MatchNameComponent(name, [hostname.name]):
92 raise errors.OpPrereqError(("Resolved hostname '%s' does not look the"
93 " same as given hostname '%s'") %
94 (hostname.name, name), errors.ECODE_INVAL)
95 return hostname
96
99 """Generate error if opportunistic locking is not possible.
100
101 """
102 if op.opportunistic_locking and not op.iallocator:
103 raise errors.OpPrereqError("Opportunistic locking is only available in"
104 " combination with an instance allocator",
105 errors.ECODE_INVAL)
106
109 """Wrapper around IAReqInstanceAlloc.
110
111 @param op: The instance opcode
112 @param disks: The computed disks
113 @param nics: The computed nics
114 @param beparams: The full filled beparams
115 @param node_whitelist: List of nodes which should appear as online to the
116 allocator (unless the node is already marked offline)
117
118 @returns: A filled L{iallocator.IAReqInstanceAlloc}
119
120 """
121 spindle_use = beparams[constants.BE_SPINDLE_USE]
122 return iallocator.IAReqInstanceAlloc(name=op.instance_name,
123 disk_template=op.disk_template,
124 tags=op.tags,
125 os=op.os_type,
126 vcpus=beparams[constants.BE_VCPUS],
127 memory=beparams[constants.BE_MAXMEM],
128 spindle_use=spindle_use,
129 disks=disks,
130 nics=[n.ToDict() for n in nics],
131 hypervisor=op.hypervisor,
132 node_whitelist=node_whitelist)
133
151
154 """Computes the nics.
155
156 @param op: The instance opcode
157 @param cluster: Cluster configuration object
158 @param default_ip: The default ip to assign
159 @param cfg: An instance of the configuration object
160 @param ec_id: Execution context ID
161
162 @returns: The build up nics
163
164 """
165 nics = []
166 for nic in op.nics:
167 nic_mode_req = nic.get(constants.INIC_MODE, None)
168 nic_mode = nic_mode_req
169 if nic_mode is None or nic_mode == constants.VALUE_AUTO:
170 nic_mode = cluster.nicparams[constants.PP_DEFAULT][constants.NIC_MODE]
171
172 net = nic.get(constants.INIC_NETWORK, None)
173 link = nic.get(constants.NIC_LINK, None)
174 ip = nic.get(constants.INIC_IP, None)
175
176 if net is None or net.lower() == constants.VALUE_NONE:
177 net = None
178 else:
179 if nic_mode_req is not None or link is not None:
180 raise errors.OpPrereqError("If network is given, no mode or link"
181 " is allowed to be passed",
182 errors.ECODE_INVAL)
183
184
185 if ip is None or ip.lower() == constants.VALUE_NONE:
186 nic_ip = None
187 elif ip.lower() == constants.VALUE_AUTO:
188 if not op.name_check:
189 raise errors.OpPrereqError("IP address set to auto but name checks"
190 " have been skipped",
191 errors.ECODE_INVAL)
192 nic_ip = default_ip
193 else:
194
195
196 if ip.lower() == constants.NIC_IP_POOL:
197 if net is None:
198 raise errors.OpPrereqError("if ip=pool, parameter network"
199 " must be passed too",
200 errors.ECODE_INVAL)
201
202 elif not netutils.IPAddress.IsValid(ip):
203 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
204 errors.ECODE_INVAL)
205
206 nic_ip = ip
207
208
209 if nic_mode == constants.NIC_MODE_ROUTED and not nic_ip:
210 raise errors.OpPrereqError("Routed nic mode requires an ip address",
211 errors.ECODE_INVAL)
212
213
214 mac = nic.get(constants.INIC_MAC, constants.VALUE_AUTO)
215 if mac not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
216 mac = utils.NormalizeAndValidateMac(mac)
217
218 try:
219
220 cfg.ReserveMAC(mac, ec_id)
221 except errors.ReservationError:
222 raise errors.OpPrereqError("MAC address %s already in use"
223 " in cluster" % mac,
224 errors.ECODE_NOTUNIQUE)
225
226
227 nicparams = {}
228 if nic_mode_req:
229 nicparams[constants.NIC_MODE] = nic_mode
230 if link:
231 nicparams[constants.NIC_LINK] = link
232
233 check_params = cluster.SimpleFillNIC(nicparams)
234 objects.NIC.CheckParameterSyntax(check_params)
235 net_uuid = cfg.LookupNetwork(net)
236 name = nic.get(constants.INIC_NAME, None)
237 if name is not None and name.lower() == constants.VALUE_NONE:
238 name = None
239 nic_obj = objects.NIC(mac=mac, ip=nic_ip, name=name,
240 network=net_uuid, nicparams=nicparams)
241 nic_obj.uuid = cfg.GenerateUniqueID(ec_id)
242 nics.append(nic_obj)
243
244 return nics
245
248 """In case of conflicting IP address raise error.
249
250 @type ip: string
251 @param ip: IP address
252 @type node: string
253 @param node: node name
254
255 """
256 (conf_net, _) = lu.cfg.CheckIPInNodeGroup(ip, node)
257 if conf_net is not None:
258 raise errors.OpPrereqError(("The requested IP address (%s) belongs to"
259 " network %s, but the target NIC does not." %
260 (ip, conf_net)),
261 errors.ECODE_STATE)
262
263 return (None, None)
264
269 """Compute if instance specs meets the specs of ipolicy.
270
271 @type ipolicy: dict
272 @param ipolicy: The ipolicy to verify against
273 @param instance_spec: dict
274 @param instance_spec: The instance spec to verify
275 @type disk_template: string
276 @param disk_template: the disk template of the instance
277 @param _compute_fn: The function to verify ipolicy (unittest only)
278 @see: L{ComputeIPolicySpecViolation}
279
280 """
281 mem_size = instance_spec.get(constants.ISPEC_MEM_SIZE, None)
282 cpu_count = instance_spec.get(constants.ISPEC_CPU_COUNT, None)
283 disk_count = instance_spec.get(constants.ISPEC_DISK_COUNT, 0)
284 disk_sizes = instance_spec.get(constants.ISPEC_DISK_SIZE, [])
285 nic_count = instance_spec.get(constants.ISPEC_NIC_COUNT, 0)
286 spindle_use = instance_spec.get(constants.ISPEC_SPINDLE_USE, None)
287
288 return _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
289 disk_sizes, spindle_use, disk_template)
290
293 """Check whether an OS name conforms to the os variants specification.
294
295 @type os_obj: L{objects.OS}
296 @param os_obj: OS object to check
297 @type name: string
298 @param name: OS name passed by the user, to check for validity
299
300 """
301 variant = objects.OS.GetVariant(name)
302 if not os_obj.supported_variants:
303 if variant:
304 raise errors.OpPrereqError("OS '%s' doesn't support variants ('%s'"
305 " passed)" % (os_obj.name, variant),
306 errors.ECODE_INVAL)
307 return
308 if not variant:
309 raise errors.OpPrereqError("OS name must include a variant",
310 errors.ECODE_INVAL)
311
312 if variant not in os_obj.supported_variants:
313 raise errors.OpPrereqError("Unsupported OS variant", errors.ECODE_INVAL)
314
317 """Create an instance.
318
319 """
320 HPATH = "instance-add"
321 HTYPE = constants.HTYPE_INSTANCE
322 REQ_BGL = False
323
325 """Check arguments.
326
327 """
328
329
330 if self.op.no_install and self.op.start:
331 self.LogInfo("No-installation mode selected, disabling startup")
332 self.op.start = False
333
334 self.op.instance_name = \
335 netutils.Hostname.GetNormalizedName(self.op.instance_name)
336
337 if self.op.ip_check and not self.op.name_check:
338
339 raise errors.OpPrereqError("Cannot do IP address check without a name"
340 " check", errors.ECODE_INVAL)
341
342
343 for nic in self.op.nics:
344 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
345
346 utils.ValidateDeviceNames("NIC", self.op.nics)
347
348
349 utils.ValidateDeviceNames("disk", self.op.disks)
350
351 cluster = self.cfg.GetClusterInfo()
352 if not self.op.disk_template in cluster.enabled_disk_templates:
353 raise errors.OpPrereqError("Cannot create an instance with disk template"
354 " '%s', because it is not enabled in the"
355 " cluster. Enabled disk templates are: %s." %
356 (self.op.disk_template,
357 ",".join(cluster.enabled_disk_templates)))
358
359
360 has_adopt = has_no_adopt = False
361 for disk in self.op.disks:
362 if self.op.disk_template != constants.DT_EXT:
363 utils.ForceDictType(disk, constants.IDISK_PARAMS_TYPES)
364 if constants.IDISK_ADOPT in disk:
365 has_adopt = True
366 else:
367 has_no_adopt = True
368 if has_adopt and has_no_adopt:
369 raise errors.OpPrereqError("Either all disks are adopted or none is",
370 errors.ECODE_INVAL)
371 if has_adopt:
372 if self.op.disk_template not in constants.DTS_MAY_ADOPT:
373 raise errors.OpPrereqError("Disk adoption is not supported for the"
374 " '%s' disk template" %
375 self.op.disk_template,
376 errors.ECODE_INVAL)
377 if self.op.iallocator is not None:
378 raise errors.OpPrereqError("Disk adoption not allowed with an"
379 " iallocator script", errors.ECODE_INVAL)
380 if self.op.mode == constants.INSTANCE_IMPORT:
381 raise errors.OpPrereqError("Disk adoption not allowed for"
382 " instance import", errors.ECODE_INVAL)
383 else:
384 if self.op.disk_template in constants.DTS_MUST_ADOPT:
385 raise errors.OpPrereqError("Disk template %s requires disk adoption,"
386 " but no 'adopt' parameter given" %
387 self.op.disk_template,
388 errors.ECODE_INVAL)
389
390 self.adopt_disks = has_adopt
391
392
393 if self.op.name_check:
394 self.hostname1 = _CheckHostnameSane(self, self.op.instance_name)
395 self.op.instance_name = self.hostname1.name
396
397 self.check_ip = self.hostname1.ip
398 else:
399 self.check_ip = None
400
401
402 if (self.op.file_driver and
403 not self.op.file_driver in constants.FILE_DRIVER):
404 raise errors.OpPrereqError("Invalid file driver name '%s'" %
405 self.op.file_driver, errors.ECODE_INVAL)
406
407
408 if (not self.op.file_driver and
409 self.op.disk_template in [constants.DT_FILE,
410 constants.DT_SHARED_FILE]):
411 self.op.file_driver = constants.FD_DEFAULT
412
413 if self.op.disk_template == constants.DT_FILE:
414 opcodes.RequireFileStorage()
415 elif self.op.disk_template == constants.DT_SHARED_FILE:
416 opcodes.RequireSharedFileStorage()
417
418
419 CheckIAllocatorOrNode(self, "iallocator", "pnode")
420
421 if self.op.pnode is not None:
422 if self.op.disk_template in constants.DTS_INT_MIRROR:
423 if self.op.snode is None:
424 raise errors.OpPrereqError("The networked disk templates need"
425 " a mirror node", errors.ECODE_INVAL)
426 elif self.op.snode:
427 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
428 " template")
429 self.op.snode = None
430
431 _CheckOpportunisticLocking(self.op)
432
433 self._cds = GetClusterDomainSecret()
434
435 if self.op.mode == constants.INSTANCE_IMPORT:
436
437
438
439 self.op.force_variant = True
440
441 if self.op.no_install:
442 self.LogInfo("No-installation mode has no effect during import")
443
444 elif self.op.mode == constants.INSTANCE_CREATE:
445 if self.op.os_type is None:
446 raise errors.OpPrereqError("No guest OS specified",
447 errors.ECODE_INVAL)
448 if self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
449 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
450 " installation" % self.op.os_type,
451 errors.ECODE_STATE)
452 if self.op.disk_template is None:
453 raise errors.OpPrereqError("No disk template specified",
454 errors.ECODE_INVAL)
455
456 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
457
458 src_handshake = self.op.source_handshake
459 if not src_handshake:
460 raise errors.OpPrereqError("Missing source handshake",
461 errors.ECODE_INVAL)
462
463 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
464 src_handshake)
465 if errmsg:
466 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
467 errors.ECODE_INVAL)
468
469
470 self.source_x509_ca_pem = self.op.source_x509_ca
471 if not self.source_x509_ca_pem:
472 raise errors.OpPrereqError("Missing source X509 CA",
473 errors.ECODE_INVAL)
474
475 try:
476 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
477 self._cds)
478 except OpenSSL.crypto.Error, err:
479 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
480 (err, ), errors.ECODE_INVAL)
481
482 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
483 if errcode is not None:
484 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
485 errors.ECODE_INVAL)
486
487 self.source_x509_ca = cert
488
489 src_instance_name = self.op.source_instance_name
490 if not src_instance_name:
491 raise errors.OpPrereqError("Missing source instance name",
492 errors.ECODE_INVAL)
493
494 self.source_instance_name = \
495 netutils.GetHostname(name=src_instance_name).name
496
497 else:
498 raise errors.OpPrereqError("Invalid instance creation mode %r" %
499 self.op.mode, errors.ECODE_INVAL)
500
502 """ExpandNames for CreateInstance.
503
504 Figure out the right locks for instance creation.
505
506 """
507 self.needed_locks = {}
508
509 instance_name = self.op.instance_name
510
511
512 if instance_name in self.cfg.GetInstanceList():
513 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
514 instance_name, errors.ECODE_EXISTS)
515
516 self.add_locks[locking.LEVEL_INSTANCE] = instance_name
517
518 if self.op.iallocator:
519
520
521
522 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
523 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
524
525 if self.op.opportunistic_locking:
526 self.opportunistic_locks[locking.LEVEL_NODE] = True
527 else:
528 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
529 nodelist = [self.op.pnode]
530 if self.op.snode is not None:
531 self.op.snode = ExpandNodeName(self.cfg, self.op.snode)
532 nodelist.append(self.op.snode)
533 self.needed_locks[locking.LEVEL_NODE] = nodelist
534
535
536 if self.op.mode == constants.INSTANCE_IMPORT:
537 src_node = self.op.src_node
538 src_path = self.op.src_path
539
540 if src_path is None:
541 self.op.src_path = src_path = self.op.instance_name
542
543 if src_node is None:
544 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
545 self.needed_locks[locking.LEVEL_NODE_ALLOC] = locking.ALL_SET
546 self.op.src_node = None
547 if os.path.isabs(src_path):
548 raise errors.OpPrereqError("Importing an instance from a path"
549 " requires a source node option",
550 errors.ECODE_INVAL)
551 else:
552 self.op.src_node = src_node = ExpandNodeName(self.cfg, src_node)
553 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
554 self.needed_locks[locking.LEVEL_NODE].append(src_node)
555 if not os.path.isabs(src_path):
556 self.op.src_path = src_path = \
557 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
558
559 self.needed_locks[locking.LEVEL_NODE_RES] = \
560 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
561
562
563
564
565
566 if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
567
568
569
570
571 self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
572 else:
573 self.needed_locks[locking.LEVEL_NODEGROUP] = \
574 list(self.cfg.GetNodeGroupsFromNodes(
575 self.needed_locks[locking.LEVEL_NODE]))
576 self.share_locks[locking.LEVEL_NODEGROUP] = 1
577
579 if level == locking.LEVEL_NODE_RES and \
580 self.opportunistic_locks[locking.LEVEL_NODE]:
581
582
583 self.needed_locks[locking.LEVEL_NODE_RES] = \
584 self.owned_locks(locking.LEVEL_NODE)
585
587 """Run the allocator based on input opcode.
588
589 """
590 if self.op.opportunistic_locking:
591
592 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
593 else:
594 node_whitelist = None
595
596
597
598 req = _CreateInstanceAllocRequest(self.op, self.disks,
599 self.nics, self.be_full,
600 node_whitelist)
601 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
602
603 ial.Run(self.op.iallocator)
604
605 if not ial.success:
606
607 if self.op.opportunistic_locking:
608 ecode = errors.ECODE_TEMP_NORES
609 else:
610 ecode = errors.ECODE_NORES
611
612 raise errors.OpPrereqError("Can't compute nodes using"
613 " iallocator '%s': %s" %
614 (self.op.iallocator, ial.info),
615 ecode)
616
617 self.op.pnode = ial.result[0]
618 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
619 self.op.instance_name, self.op.iallocator,
620 utils.CommaJoin(ial.result))
621
622 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
623
624 if req.RequiredNodes() == 2:
625 self.op.snode = ial.result[1]
626
628 """Build hooks env.
629
630 This runs on master, primary and secondary nodes of the instance.
631
632 """
633 env = {
634 "ADD_MODE": self.op.mode,
635 }
636 if self.op.mode == constants.INSTANCE_IMPORT:
637 env["SRC_NODE"] = self.op.src_node
638 env["SRC_PATH"] = self.op.src_path
639 env["SRC_IMAGES"] = self.src_images
640
641 env.update(BuildInstanceHookEnv(
642 name=self.op.instance_name,
643 primary_node=self.op.pnode,
644 secondary_nodes=self.secondaries,
645 status=self.op.start,
646 os_type=self.op.os_type,
647 minmem=self.be_full[constants.BE_MINMEM],
648 maxmem=self.be_full[constants.BE_MAXMEM],
649 vcpus=self.be_full[constants.BE_VCPUS],
650 nics=NICListToTuple(self, self.nics),
651 disk_template=self.op.disk_template,
652 disks=[(d[constants.IDISK_NAME], d.get("uuid", ""),
653 d[constants.IDISK_SIZE], d[constants.IDISK_MODE])
654 for d in self.disks],
655 bep=self.be_full,
656 hvp=self.hv_full,
657 hypervisor_name=self.op.hypervisor,
658 tags=self.op.tags,
659 ))
660
661 return env
662
664 """Build hooks nodes.
665
666 """
667 nl = [self.cfg.GetMasterNode(), self.op.pnode] + self.secondaries
668 return nl, nl
669
716
718 """Use export parameters as defaults.
719
720 In case the opcode doesn't specify (as in override) some instance
721 parameters, then try to use them from the export information, if
722 that declares them.
723
724 """
725 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
726
727 if self.op.disk_template is None:
728 if einfo.has_option(constants.INISECT_INS, "disk_template"):
729 self.op.disk_template = einfo.get(constants.INISECT_INS,
730 "disk_template")
731 if self.op.disk_template not in constants.DISK_TEMPLATES:
732 raise errors.OpPrereqError("Disk template specified in configuration"
733 " file is not one of the allowed values:"
734 " %s" %
735 " ".join(constants.DISK_TEMPLATES),
736 errors.ECODE_INVAL)
737 else:
738 raise errors.OpPrereqError("No disk template specified and the export"
739 " is missing the disk_template information",
740 errors.ECODE_INVAL)
741
742 if not self.op.disks:
743 disks = []
744
745 for idx in range(constants.MAX_DISKS):
746 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
747 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
748 disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
749 disk = {
750 constants.IDISK_SIZE: disk_sz,
751 constants.IDISK_NAME: disk_name
752 }
753 disks.append(disk)
754 self.op.disks = disks
755 if not disks and self.op.disk_template != constants.DT_DISKLESS:
756 raise errors.OpPrereqError("No disk info specified and the export"
757 " is missing the disk information",
758 errors.ECODE_INVAL)
759
760 if not self.op.nics:
761 nics = []
762 for idx in range(constants.MAX_NICS):
763 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
764 ndict = {}
765 for name in [constants.INIC_IP,
766 constants.INIC_MAC, constants.INIC_NAME]:
767 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
768 ndict[name] = v
769 network = einfo.get(constants.INISECT_INS,
770 "nic%d_%s" % (idx, constants.INIC_NETWORK))
771
772
773 if network:
774 ndict[constants.INIC_NETWORK] = network
775 else:
776 for name in list(constants.NICS_PARAMETERS):
777 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
778 ndict[name] = v
779 nics.append(ndict)
780 else:
781 break
782 self.op.nics = nics
783
784 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
785 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
786
787 if (self.op.hypervisor is None and
788 einfo.has_option(constants.INISECT_INS, "hypervisor")):
789 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
790
791 if einfo.has_section(constants.INISECT_HYP):
792
793
794 for name, value in einfo.items(constants.INISECT_HYP):
795 if name not in self.op.hvparams:
796 self.op.hvparams[name] = value
797
798 if einfo.has_section(constants.INISECT_BEP):
799
800 for name, value in einfo.items(constants.INISECT_BEP):
801 if name not in self.op.beparams:
802 self.op.beparams[name] = value
803
804 if name == constants.BE_MEMORY:
805 if constants.BE_MAXMEM not in self.op.beparams:
806 self.op.beparams[constants.BE_MAXMEM] = value
807 if constants.BE_MINMEM not in self.op.beparams:
808 self.op.beparams[constants.BE_MINMEM] = value
809 else:
810
811 for name in constants.BES_PARAMETERS:
812 if (name not in self.op.beparams and
813 einfo.has_option(constants.INISECT_INS, name)):
814 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
815
816 if einfo.has_section(constants.INISECT_OSP):
817
818 for name, value in einfo.items(constants.INISECT_OSP):
819 if name not in self.op.osparams:
820 self.op.osparams[name] = value
821
847
849 """Calculate final instance file storage dir.
850
851 """
852
853 self.instance_file_storage_dir = None
854 if self.op.disk_template in constants.DTS_FILEBASED:
855
856 joinargs = []
857
858 if self.op.disk_template == constants.DT_SHARED_FILE:
859 get_fsd_fn = self.cfg.GetSharedFileStorageDir
860 else:
861 get_fsd_fn = self.cfg.GetFileStorageDir
862
863 cfg_storagedir = get_fsd_fn()
864 if not cfg_storagedir:
865 raise errors.OpPrereqError("Cluster file storage dir not defined",
866 errors.ECODE_STATE)
867 joinargs.append(cfg_storagedir)
868
869 if self.op.file_storage_dir is not None:
870 joinargs.append(self.op.file_storage_dir)
871
872 joinargs.append(self.op.instance_name)
873
874
875 self.instance_file_storage_dir = utils.PathJoin(*joinargs)
876
878 """Check prerequisites.
879
880 """
881
882
883 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
884 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
885 cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
886 if not owned_groups.issuperset(cur_groups):
887 raise errors.OpPrereqError("New instance %s's node groups changed since"
888 " locks were acquired, current groups are"
889 " are '%s', owning groups '%s'; retry the"
890 " operation" %
891 (self.op.instance_name,
892 utils.CommaJoin(cur_groups),
893 utils.CommaJoin(owned_groups)),
894 errors.ECODE_STATE)
895
896 self._CalculateFileStorageDir()
897
898 if self.op.mode == constants.INSTANCE_IMPORT:
899 export_info = self._ReadExportInfo()
900 self._ReadExportParams(export_info)
901 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
902 else:
903 self._old_instance_name = None
904
905 if (not self.cfg.GetVGName() and
906 self.op.disk_template not in constants.DTS_NOT_LVM):
907 raise errors.OpPrereqError("Cluster does not support lvm-based"
908 " instances", errors.ECODE_STATE)
909
910 if (self.op.hypervisor is None or
911 self.op.hypervisor == constants.VALUE_AUTO):
912 self.op.hypervisor = self.cfg.GetHypervisorType()
913
914 cluster = self.cfg.GetClusterInfo()
915 enabled_hvs = cluster.enabled_hypervisors
916 if self.op.hypervisor not in enabled_hvs:
917 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
918 " cluster (%s)" %
919 (self.op.hypervisor, ",".join(enabled_hvs)),
920 errors.ECODE_STATE)
921
922
923 for tag in self.op.tags:
924 objects.TaggableObject.ValidateTag(tag)
925
926
927 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
928 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
929 self.op.hvparams)
930 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
931 hv_type.CheckParameterSyntax(filled_hvp)
932 self.hv_full = filled_hvp
933
934 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
935 "instance", "cluster")
936
937
938 self.be_full = _ComputeFullBeParams(self.op, cluster)
939
940
941 self.os_full = cluster.SimpleFillOS(self.op.os_type, self.op.osparams)
942
943
944
945 if self.op.identify_defaults:
946 self._RevertToDefaults(cluster)
947
948
949 self.nics = _ComputeNics(self.op, cluster, self.check_ip, self.cfg,
950 self.proc.GetECId())
951
952
953 default_vg = self.cfg.GetVGName()
954 self.disks = ComputeDisks(self.op, default_vg)
955
956 if self.op.mode == constants.INSTANCE_IMPORT:
957 disk_images = []
958 for idx in range(len(self.disks)):
959 option = "disk%d_dump" % idx
960 if export_info.has_option(constants.INISECT_INS, option):
961
962 export_name = export_info.get(constants.INISECT_INS, option)
963 image = utils.PathJoin(self.op.src_path, export_name)
964 disk_images.append(image)
965 else:
966 disk_images.append(False)
967
968 self.src_images = disk_images
969
970 if self.op.instance_name == self._old_instance_name:
971 for idx, nic in enumerate(self.nics):
972 if nic.mac == constants.VALUE_AUTO:
973 nic_mac_ini = "nic%d_mac" % idx
974 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
975
976
977
978
979 if self.op.ip_check:
980 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
981 raise errors.OpPrereqError("IP %s of instance %s already in use" %
982 (self.check_ip, self.op.instance_name),
983 errors.ECODE_NOTUNIQUE)
984
985
986
987
988
989
990
991
992
993 for nic in self.nics:
994 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
995 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
996
997
998
999 if self.op.iallocator is not None:
1000 self._RunAllocator()
1001
1002
1003 keep_locks = filter(None, [self.op.pnode, self.op.snode, self.op.src_node])
1004 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
1005 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
1006 ReleaseLocks(self, locking.LEVEL_NODE_ALLOC)
1007
1008 ReleaseLocks(self, locking.LEVEL_NODEGROUP,
1009 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
1010
1011 assert (self.owned_locks(locking.LEVEL_NODE) ==
1012 self.owned_locks(locking.LEVEL_NODE_RES)), \
1013 "Node locks differ from node resource locks"
1014
1015
1016
1017
1018 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode)
1019 assert self.pnode is not None, \
1020 "Cannot retrieve locked node %s" % self.op.pnode
1021 if pnode.offline:
1022 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
1023 pnode.name, errors.ECODE_STATE)
1024 if pnode.drained:
1025 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
1026 pnode.name, errors.ECODE_STATE)
1027 if not pnode.vm_capable:
1028 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
1029 " '%s'" % pnode.name, errors.ECODE_STATE)
1030
1031 self.secondaries = []
1032
1033
1034
1035 for idx, nic in enumerate(self.nics):
1036 net_uuid = nic.network
1037 if net_uuid is not None:
1038 nobj = self.cfg.GetNetwork(net_uuid)
1039 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.name)
1040 if netparams is None:
1041 raise errors.OpPrereqError("No netparams found for network"
1042 " %s. Propably not connected to"
1043 " node's %s nodegroup" %
1044 (nobj.name, self.pnode.name),
1045 errors.ECODE_INVAL)
1046 self.LogInfo("NIC/%d inherits netparams %s" %
1047 (idx, netparams.values()))
1048 nic.nicparams = dict(netparams)
1049 if nic.ip is not None:
1050 if nic.ip.lower() == constants.NIC_IP_POOL:
1051 try:
1052 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
1053 except errors.ReservationError:
1054 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
1055 " from the address pool" % idx,
1056 errors.ECODE_STATE)
1057 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
1058 else:
1059 try:
1060 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId())
1061 except errors.ReservationError:
1062 raise errors.OpPrereqError("IP address %s already in use"
1063 " or does not belong to network %s" %
1064 (nic.ip, nobj.name),
1065 errors.ECODE_NOTUNIQUE)
1066
1067
1068 elif self.op.conflicts_check:
1069 _CheckForConflictingIp(self, nic.ip, self.pnode.name)
1070
1071
1072 if self.op.disk_template in constants.DTS_INT_MIRROR:
1073 if self.op.snode == pnode.name:
1074 raise errors.OpPrereqError("The secondary node cannot be the"
1075 " primary node", errors.ECODE_INVAL)
1076 CheckNodeOnline(self, self.op.snode)
1077 CheckNodeNotDrained(self, self.op.snode)
1078 CheckNodeVmCapable(self, self.op.snode)
1079 self.secondaries.append(self.op.snode)
1080
1081 snode = self.cfg.GetNodeInfo(self.op.snode)
1082 if pnode.group != snode.group:
1083 self.LogWarning("The primary and secondary nodes are in two"
1084 " different node groups; the disk parameters"
1085 " from the first disk's node group will be"
1086 " used")
1087
1088 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
1089 nodes = [pnode]
1090 if self.op.disk_template in constants.DTS_INT_MIRROR:
1091 nodes.append(snode)
1092 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
1093 if compat.any(map(has_es, nodes)):
1094 raise errors.OpPrereqError("Disk template %s not supported with"
1095 " exclusive storage" % self.op.disk_template,
1096 errors.ECODE_STATE)
1097
1098 nodenames = [pnode.name] + self.secondaries
1099
1100 if not self.adopt_disks:
1101 if self.op.disk_template == constants.DT_RBD:
1102
1103
1104
1105 CheckRADOSFreeSpace()
1106 elif self.op.disk_template == constants.DT_EXT:
1107
1108 pass
1109 else:
1110
1111 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1112 CheckNodesFreeDiskPerVG(self, nodenames, req_sizes)
1113
1114 elif self.op.disk_template == constants.DT_PLAIN:
1115 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1116 disk[constants.IDISK_ADOPT])
1117 for disk in self.disks])
1118 if len(all_lvs) != len(self.disks):
1119 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1120 errors.ECODE_INVAL)
1121 for lv_name in all_lvs:
1122 try:
1123
1124
1125 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1126 except errors.ReservationError:
1127 raise errors.OpPrereqError("LV named %s used by another instance" %
1128 lv_name, errors.ECODE_NOTUNIQUE)
1129
1130 vg_names = self.rpc.call_vg_list([pnode.name])[pnode.name]
1131 vg_names.Raise("Cannot get VG information from node %s" % pnode.name)
1132
1133 node_lvs = self.rpc.call_lv_list([pnode.name],
1134 vg_names.payload.keys())[pnode.name]
1135 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name)
1136 node_lvs = node_lvs.payload
1137
1138 delta = all_lvs.difference(node_lvs.keys())
1139 if delta:
1140 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1141 utils.CommaJoin(delta),
1142 errors.ECODE_INVAL)
1143 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1144 if online_lvs:
1145 raise errors.OpPrereqError("Online logical volumes found, cannot"
1146 " adopt: %s" % utils.CommaJoin(online_lvs),
1147 errors.ECODE_STATE)
1148
1149 for dsk in self.disks:
1150 dsk[constants.IDISK_SIZE] = \
1151 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1152 dsk[constants.IDISK_ADOPT])][0]))
1153
1154 elif self.op.disk_template == constants.DT_BLOCK:
1155
1156 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1157 for disk in self.disks])
1158 if len(all_disks) != len(self.disks):
1159 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1160 errors.ECODE_INVAL)
1161 baddisks = [d for d in all_disks
1162 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1163 if baddisks:
1164 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1165 " cannot be adopted" %
1166 (utils.CommaJoin(baddisks),
1167 constants.ADOPTABLE_BLOCKDEV_ROOT),
1168 errors.ECODE_INVAL)
1169
1170 node_disks = self.rpc.call_bdev_sizes([pnode.name],
1171 list(all_disks))[pnode.name]
1172 node_disks.Raise("Cannot get block device information from node %s" %
1173 pnode.name)
1174 node_disks = node_disks.payload
1175 delta = all_disks.difference(node_disks.keys())
1176 if delta:
1177 raise errors.OpPrereqError("Missing block device(s): %s" %
1178 utils.CommaJoin(delta),
1179 errors.ECODE_INVAL)
1180 for dsk in self.disks:
1181 dsk[constants.IDISK_SIZE] = \
1182 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1183
1184
1185 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1186 ispec = {
1187 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1188 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1189 constants.ISPEC_DISK_COUNT: len(self.disks),
1190 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1191 for disk in self.disks],
1192 constants.ISPEC_NIC_COUNT: len(self.nics),
1193 constants.ISPEC_SPINDLE_USE: spindle_use,
1194 }
1195
1196 group_info = self.cfg.GetNodeGroup(pnode.group)
1197 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1198 res = _ComputeIPolicyInstanceSpecViolation(ipolicy, ispec,
1199 self.op.disk_template)
1200 if not self.op.ignore_ipolicy and res:
1201 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1202 (pnode.group, group_info.name, utils.CommaJoin(res)))
1203 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1204
1205 CheckHVParams(self, nodenames, self.op.hypervisor, self.op.hvparams)
1206
1207 CheckNodeHasOS(self, pnode.name, self.op.os_type, self.op.force_variant)
1208
1209 CheckOSParams(self, True, nodenames, self.op.os_type, self.os_full)
1210
1211 CheckNicsBridgesExist(self, self.nics, self.pnode.name)
1212
1213
1214
1215
1216
1217
1218 if self.op.start:
1219 CheckNodeFreeMemory(self, self.pnode.name,
1220 "creating instance %s" % self.op.instance_name,
1221 self.be_full[constants.BE_MAXMEM],
1222 self.op.hypervisor)
1223
1224 self.dry_run_result = list(nodenames)
1225
1226 - def Exec(self, feedback_fn):
1227 """Create and add the instance to the cluster.
1228
1229 """
1230 instance = self.op.instance_name
1231 pnode_name = self.pnode.name
1232
1233 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1234 self.owned_locks(locking.LEVEL_NODE)), \
1235 "Node locks differ from node resource locks"
1236 assert not self.glm.is_owned(locking.LEVEL_NODE_ALLOC)
1237
1238 ht_kind = self.op.hypervisor
1239 if ht_kind in constants.HTS_REQ_PORT:
1240 network_port = self.cfg.AllocatePort()
1241 else:
1242 network_port = None
1243
1244
1245
1246
1247 node = self.cfg.GetNodeInfo(pnode_name)
1248 nodegroup = self.cfg.GetNodeGroup(node.group)
1249 disks = GenerateDiskTemplate(self,
1250 self.op.disk_template,
1251 instance, pnode_name,
1252 self.secondaries,
1253 self.disks,
1254 self.instance_file_storage_dir,
1255 self.op.file_driver,
1256 0,
1257 feedback_fn,
1258 self.cfg.GetGroupDiskParams(nodegroup))
1259
1260 iobj = objects.Instance(name=instance, os=self.op.os_type,
1261 primary_node=pnode_name,
1262 nics=self.nics, disks=disks,
1263 disk_template=self.op.disk_template,
1264 disks_active=False,
1265 admin_state=constants.ADMINST_DOWN,
1266 network_port=network_port,
1267 beparams=self.op.beparams,
1268 hvparams=self.op.hvparams,
1269 hypervisor=self.op.hypervisor,
1270 osparams=self.op.osparams,
1271 )
1272
1273 if self.op.tags:
1274 for tag in self.op.tags:
1275 iobj.AddTag(tag)
1276
1277 if self.adopt_disks:
1278 if self.op.disk_template == constants.DT_PLAIN:
1279
1280
1281 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1282 rename_to = []
1283 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1284 rename_to.append(t_dsk.logical_id)
1285 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1286 self.cfg.SetDiskID(t_dsk, pnode_name)
1287 result = self.rpc.call_blockdev_rename(pnode_name,
1288 zip(tmp_disks, rename_to))
1289 result.Raise("Failed to rename adoped LVs")
1290 else:
1291 feedback_fn("* creating instance disks...")
1292 try:
1293 CreateDisks(self, iobj)
1294 except errors.OpExecError:
1295 self.LogWarning("Device creation failed")
1296 self.cfg.ReleaseDRBDMinors(instance)
1297 raise
1298
1299 feedback_fn("adding instance %s to cluster config" % instance)
1300
1301 self.cfg.AddInstance(iobj, self.proc.GetECId())
1302
1303
1304
1305 del self.remove_locks[locking.LEVEL_INSTANCE]
1306
1307 if self.op.mode == constants.INSTANCE_IMPORT:
1308
1309 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node])
1310 else:
1311
1312 ReleaseLocks(self, locking.LEVEL_NODE)
1313
1314 disk_abort = False
1315 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1316 feedback_fn("* wiping instance disks...")
1317 try:
1318 WipeDisks(self, iobj)
1319 except errors.OpExecError, err:
1320 logging.exception("Wiping disks failed")
1321 self.LogWarning("Wiping instance disks failed (%s)", err)
1322 disk_abort = True
1323
1324 if disk_abort:
1325
1326 pass
1327 elif self.op.wait_for_sync:
1328 disk_abort = not WaitForSync(self, iobj)
1329 elif iobj.disk_template in constants.DTS_INT_MIRROR:
1330
1331 feedback_fn("* checking mirrors status")
1332 disk_abort = not WaitForSync(self, iobj, oneshot=True)
1333 else:
1334 disk_abort = False
1335
1336 if disk_abort:
1337 RemoveDisks(self, iobj)
1338 self.cfg.RemoveInstance(iobj.name)
1339
1340 self.remove_locks[locking.LEVEL_INSTANCE] = iobj.name
1341 raise errors.OpExecError("There are some degraded disks for"
1342 " this instance")
1343
1344
1345 iobj.disks_active = True
1346
1347
1348 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1349
1350 if iobj.disk_template != constants.DT_DISKLESS and not self.adopt_disks:
1351
1352
1353
1354 for disk in iobj.disks:
1355 self.cfg.SetDiskID(disk, pnode_name)
1356 if self.op.mode == constants.INSTANCE_CREATE:
1357 if not self.op.no_install:
1358 pause_sync = (iobj.disk_template in constants.DTS_INT_MIRROR and
1359 not self.op.wait_for_sync)
1360 if pause_sync:
1361 feedback_fn("* pausing disk sync to install instance OS")
1362 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1363 (iobj.disks,
1364 iobj), True)
1365 for idx, success in enumerate(result.payload):
1366 if not success:
1367 logging.warn("pause-sync of instance %s for disk %d failed",
1368 instance, idx)
1369
1370 feedback_fn("* running the instance OS create scripts...")
1371
1372 os_add_result = \
1373 self.rpc.call_instance_os_add(pnode_name, (iobj, None), False,
1374 self.op.debug_level)
1375 if pause_sync:
1376 feedback_fn("* resuming disk sync")
1377 result = self.rpc.call_blockdev_pause_resume_sync(pnode_name,
1378 (iobj.disks,
1379 iobj), False)
1380 for idx, success in enumerate(result.payload):
1381 if not success:
1382 logging.warn("resume-sync of instance %s for disk %d failed",
1383 instance, idx)
1384
1385 os_add_result.Raise("Could not add os for instance %s"
1386 " on node %s" % (instance, pnode_name))
1387
1388 else:
1389 if self.op.mode == constants.INSTANCE_IMPORT:
1390 feedback_fn("* running the instance OS import scripts...")
1391
1392 transfers = []
1393
1394 for idx, image in enumerate(self.src_images):
1395 if not image:
1396 continue
1397
1398
1399 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1400 constants.IEIO_FILE, (image, ),
1401 constants.IEIO_SCRIPT,
1402 (iobj.disks[idx], idx),
1403 None)
1404 transfers.append(dt)
1405
1406 import_result = \
1407 masterd.instance.TransferInstanceData(self, feedback_fn,
1408 self.op.src_node, pnode_name,
1409 self.pnode.secondary_ip,
1410 iobj, transfers)
1411 if not compat.all(import_result):
1412 self.LogWarning("Some disks for instance %s on node %s were not"
1413 " imported successfully" % (instance, pnode_name))
1414
1415 rename_from = self._old_instance_name
1416
1417 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1418 feedback_fn("* preparing remote import...")
1419
1420
1421
1422
1423 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1424 self.op.source_shutdown_timeout)
1425 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1426
1427 assert iobj.primary_node == self.pnode.name
1428 disk_results = \
1429 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1430 self.source_x509_ca,
1431 self._cds, timeouts)
1432 if not compat.all(disk_results):
1433
1434
1435 self.LogWarning("Some disks for instance %s on node %s were not"
1436 " imported successfully" % (instance, pnode_name))
1437
1438 rename_from = self.source_instance_name
1439
1440 else:
1441
1442 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1443 % self.op.mode)
1444
1445
1446 assert iobj.name == instance
1447 feedback_fn("Running rename script for %s" % instance)
1448 result = self.rpc.call_instance_run_rename(pnode_name, iobj,
1449 rename_from,
1450 self.op.debug_level)
1451 if result.fail_msg:
1452 self.LogWarning("Failed to run rename script for %s on node"
1453 " %s: %s" % (instance, pnode_name, result.fail_msg))
1454
1455 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1456
1457 if self.op.start:
1458 iobj.admin_state = constants.ADMINST_UP
1459 self.cfg.Update(iobj, feedback_fn)
1460 logging.info("Starting instance %s on node %s", instance, pnode_name)
1461 feedback_fn("* starting instance...")
1462 result = self.rpc.call_instance_start(pnode_name, (iobj, None, None),
1463 False, self.op.reason)
1464 result.Raise("Could not start instance")
1465
1466 return list(iobj.all_nodes)
1467
1470 """Rename an instance.
1471
1472 """
1473 HPATH = "instance-rename"
1474 HTYPE = constants.HTYPE_INSTANCE
1475
1477 """Check arguments.
1478
1479 """
1480 if self.op.ip_check and not self.op.name_check:
1481
1482 raise errors.OpPrereqError("IP address check requires a name check",
1483 errors.ECODE_INVAL)
1484
1486 """Build hooks env.
1487
1488 This runs on master, primary and secondary nodes of the instance.
1489
1490 """
1491 env = BuildInstanceHookEnvByObject(self, self.instance)
1492 env["INSTANCE_NEW_NAME"] = self.op.new_name
1493 return env
1494
1501
1503 """Check prerequisites.
1504
1505 This checks that the instance is in the cluster and is not running.
1506
1507 """
1508 self.op.instance_name = ExpandInstanceName(self.cfg,
1509 self.op.instance_name)
1510 instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1511 assert instance is not None
1512 CheckNodeOnline(self, instance.primary_node)
1513 CheckInstanceState(self, instance, INSTANCE_NOT_RUNNING,
1514 msg="cannot rename")
1515 self.instance = instance
1516
1517 new_name = self.op.new_name
1518 if self.op.name_check:
1519 hostname = _CheckHostnameSane(self, new_name)
1520 new_name = self.op.new_name = hostname.name
1521 if (self.op.ip_check and
1522 netutils.TcpPing(hostname.ip, constants.DEFAULT_NODED_PORT)):
1523 raise errors.OpPrereqError("IP %s of instance %s already in use" %
1524 (hostname.ip, new_name),
1525 errors.ECODE_NOTUNIQUE)
1526
1527 instance_list = self.cfg.GetInstanceList()
1528 if new_name in instance_list and new_name != instance.name:
1529 raise errors.OpPrereqError("Instance '%s' is already in the cluster" %
1530 new_name, errors.ECODE_EXISTS)
1531
1532 - def Exec(self, feedback_fn):
1533 """Rename the instance.
1534
1535 """
1536 inst = self.instance
1537 old_name = inst.name
1538
1539 rename_file_storage = False
1540 if (inst.disk_template in constants.DTS_FILEBASED and
1541 self.op.new_name != inst.name):
1542 old_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1543 rename_file_storage = True
1544
1545 self.cfg.RenameInstance(inst.name, self.op.new_name)
1546
1547
1548 assert self.REQ_BGL
1549 assert locking.BGL in self.owned_locks(locking.LEVEL_CLUSTER)
1550 self.glm.remove(locking.LEVEL_INSTANCE, old_name)
1551 self.glm.add(locking.LEVEL_INSTANCE, self.op.new_name)
1552
1553
1554 inst = self.cfg.GetInstanceInfo(self.op.new_name)
1555
1556 if rename_file_storage:
1557 new_file_storage_dir = os.path.dirname(inst.disks[0].logical_id[1])
1558 result = self.rpc.call_file_storage_dir_rename(inst.primary_node,
1559 old_file_storage_dir,
1560 new_file_storage_dir)
1561 result.Raise("Could not rename on node %s directory '%s' to '%s'"
1562 " (but the instance has been renamed in Ganeti)" %
1563 (inst.primary_node, old_file_storage_dir,
1564 new_file_storage_dir))
1565
1566 StartInstanceDisks(self, inst, None)
1567
1568 info = GetInstanceInfoText(inst)
1569 for (idx, disk) in enumerate(inst.disks):
1570 for node in inst.all_nodes:
1571 self.cfg.SetDiskID(disk, node)
1572 result = self.rpc.call_blockdev_setinfo(node, disk, info)
1573 if result.fail_msg:
1574 self.LogWarning("Error setting info on node %s for disk %s: %s",
1575 node, idx, result.fail_msg)
1576 try:
1577 result = self.rpc.call_instance_run_rename(inst.primary_node, inst,
1578 old_name, self.op.debug_level)
1579 msg = result.fail_msg
1580 if msg:
1581 msg = ("Could not run OS rename script for instance %s on node %s"
1582 " (but the instance has been renamed in Ganeti): %s" %
1583 (inst.name, inst.primary_node, msg))
1584 self.LogWarning(msg)
1585 finally:
1586 ShutdownInstanceDisks(self, inst)
1587
1588 return inst.name
1589
1592 """Remove an instance.
1593
1594 """
1595 HPATH = "instance-remove"
1596 HTYPE = constants.HTYPE_INSTANCE
1597 REQ_BGL = False
1598
1604
1612
1614 """Build hooks env.
1615
1616 This runs on master, primary and secondary nodes of the instance.
1617
1618 """
1619 env = BuildInstanceHookEnvByObject(self, self.instance)
1620 env["SHUTDOWN_TIMEOUT"] = self.op.shutdown_timeout
1621 return env
1622
1624 """Build hooks nodes.
1625
1626 """
1627 nl = [self.cfg.GetMasterNode()]
1628 nl_post = list(self.instance.all_nodes) + nl
1629 return (nl, nl_post)
1630
1632 """Check prerequisites.
1633
1634 This checks that the instance is in the cluster.
1635
1636 """
1637 self.instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1638 assert self.instance is not None, \
1639 "Cannot retrieve locked instance %s" % self.op.instance_name
1640
1641 - def Exec(self, feedback_fn):
1642 """Remove the instance.
1643
1644 """
1645 instance = self.instance
1646 logging.info("Shutting down instance %s on node %s",
1647 instance.name, instance.primary_node)
1648
1649 result = self.rpc.call_instance_shutdown(instance.primary_node, instance,
1650 self.op.shutdown_timeout,
1651 self.op.reason)
1652 msg = result.fail_msg
1653 if msg:
1654 if self.op.ignore_failures:
1655 feedback_fn("Warning: can't shutdown instance: %s" % msg)
1656 else:
1657 raise errors.OpExecError("Could not shutdown instance %s on"
1658 " node %s: %s" %
1659 (instance.name, instance.primary_node, msg))
1660
1661 assert (self.owned_locks(locking.LEVEL_NODE) ==
1662 self.owned_locks(locking.LEVEL_NODE_RES))
1663 assert not (set(instance.all_nodes) -
1664 self.owned_locks(locking.LEVEL_NODE)), \
1665 "Not owning correct locks"
1666
1667 RemoveInstance(self, feedback_fn, instance, self.op.ignore_failures)
1668
1671 """Move an instance by data-copying.
1672
1673 """
1674 HPATH = "instance-move"
1675 HTYPE = constants.HTYPE_INSTANCE
1676 REQ_BGL = False
1677
1685
1693
1695 """Build hooks env.
1696
1697 This runs on master, primary and secondary nodes of the instance.
1698
1699 """
1700 env = {
1701 "TARGET_NODE": self.op.target_node,
1702 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
1703 }
1704 env.update(BuildInstanceHookEnvByObject(self, self.instance))
1705 return env
1706
1708 """Build hooks nodes.
1709
1710 """
1711 nl = [
1712 self.cfg.GetMasterNode(),
1713 self.instance.primary_node,
1714 self.op.target_node,
1715 ]
1716 return (nl, nl)
1717
1719 """Check prerequisites.
1720
1721 This checks that the instance is in the cluster.
1722
1723 """
1724 self.instance = instance = self.cfg.GetInstanceInfo(self.op.instance_name)
1725 assert self.instance is not None, \
1726 "Cannot retrieve locked instance %s" % self.op.instance_name
1727
1728 if instance.disk_template not in constants.DTS_COPYABLE:
1729 raise errors.OpPrereqError("Disk template %s not suitable for copying" %
1730 instance.disk_template, errors.ECODE_STATE)
1731
1732 node = self.cfg.GetNodeInfo(self.op.target_node)
1733 assert node is not None, \
1734 "Cannot retrieve locked node %s" % self.op.target_node
1735
1736 self.target_node = target_node = node.name
1737
1738 if target_node == instance.primary_node:
1739 raise errors.OpPrereqError("Instance %s is already on the node %s" %
1740 (instance.name, target_node),
1741 errors.ECODE_STATE)
1742
1743 bep = self.cfg.GetClusterInfo().FillBE(instance)
1744
1745 for idx, dsk in enumerate(instance.disks):
1746 if dsk.dev_type not in (constants.LD_LV, constants.LD_FILE):
1747 raise errors.OpPrereqError("Instance disk %d has a complex layout,"
1748 " cannot copy" % idx, errors.ECODE_STATE)
1749
1750 CheckNodeOnline(self, target_node)
1751 CheckNodeNotDrained(self, target_node)
1752 CheckNodeVmCapable(self, target_node)
1753 cluster = self.cfg.GetClusterInfo()
1754 group_info = self.cfg.GetNodeGroup(node.group)
1755 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1756 CheckTargetNodeIPolicy(self, ipolicy, instance, node, self.cfg,
1757 ignore=self.op.ignore_ipolicy)
1758
1759 if instance.admin_state == constants.ADMINST_UP:
1760
1761 CheckNodeFreeMemory(self, target_node,
1762 "failing over instance %s" %
1763 instance.name, bep[constants.BE_MAXMEM],
1764 instance.hypervisor)
1765 else:
1766 self.LogInfo("Not checking memory on the secondary node as"
1767 " instance will not be started")
1768
1769
1770 CheckInstanceBridgesExist(self, instance, node=target_node)
1771
1772 - def Exec(self, feedback_fn):
1773 """Move an instance.
1774
1775 The move is done by shutting it down on its present node, copying
1776 the data over (slow) and starting it on the new node.
1777
1778 """
1779 instance = self.instance
1780
1781 source_node = instance.primary_node
1782 target_node = self.target_node
1783
1784 self.LogInfo("Shutting down instance %s on source node %s",
1785 instance.name, source_node)
1786
1787 assert (self.owned_locks(locking.LEVEL_NODE) ==
1788 self.owned_locks(locking.LEVEL_NODE_RES))
1789
1790 result = self.rpc.call_instance_shutdown(source_node, instance,
1791 self.op.shutdown_timeout,
1792 self.op.reason)
1793 msg = result.fail_msg
1794 if msg:
1795 if self.op.ignore_consistency:
1796 self.LogWarning("Could not shutdown instance %s on node %s."
1797 " Proceeding anyway. Please make sure node"
1798 " %s is down. Error details: %s",
1799 instance.name, source_node, source_node, msg)
1800 else:
1801 raise errors.OpExecError("Could not shutdown instance %s on"
1802 " node %s: %s" %
1803 (instance.name, source_node, msg))
1804
1805
1806 try:
1807 CreateDisks(self, instance, target_node=target_node)
1808 except errors.OpExecError:
1809 self.LogWarning("Device creation failed")
1810 self.cfg.ReleaseDRBDMinors(instance.name)
1811 raise
1812
1813 cluster_name = self.cfg.GetClusterInfo().cluster_name
1814
1815 errs = []
1816
1817 for idx, disk in enumerate(instance.disks):
1818 self.LogInfo("Copying data for disk %d", idx)
1819 result = self.rpc.call_blockdev_assemble(target_node, (disk, instance),
1820 instance.name, True, idx)
1821 if result.fail_msg:
1822 self.LogWarning("Can't assemble newly created disk %d: %s",
1823 idx, result.fail_msg)
1824 errs.append(result.fail_msg)
1825 break
1826 dev_path = result.payload
1827 result = self.rpc.call_blockdev_export(source_node, (disk, instance),
1828 target_node, dev_path,
1829 cluster_name)
1830 if result.fail_msg:
1831 self.LogWarning("Can't copy data over for disk %d: %s",
1832 idx, result.fail_msg)
1833 errs.append(result.fail_msg)
1834 break
1835
1836 if errs:
1837 self.LogWarning("Some disks failed to copy, aborting")
1838 try:
1839 RemoveDisks(self, instance, target_node=target_node)
1840 finally:
1841 self.cfg.ReleaseDRBDMinors(instance.name)
1842 raise errors.OpExecError("Errors during disk copy: %s" %
1843 (",".join(errs),))
1844
1845 instance.primary_node = target_node
1846 self.cfg.Update(instance, feedback_fn)
1847
1848 self.LogInfo("Removing the disks on the original node")
1849 RemoveDisks(self, instance, target_node=source_node)
1850
1851
1852 if instance.admin_state == constants.ADMINST_UP:
1853 self.LogInfo("Starting instance %s on node %s",
1854 instance.name, target_node)
1855
1856 disks_ok, _ = AssembleInstanceDisks(self, instance,
1857 ignore_secondaries=True)
1858 if not disks_ok:
1859 ShutdownInstanceDisks(self, instance)
1860 raise errors.OpExecError("Can't activate the instance's disks")
1861
1862 result = self.rpc.call_instance_start(target_node,
1863 (instance, None, None), False,
1864 self.op.reason)
1865 msg = result.fail_msg
1866 if msg:
1867 ShutdownInstanceDisks(self, instance)
1868 raise errors.OpExecError("Could not start instance %s on node %s: %s" %
1869 (instance.name, target_node, msg))
1870
1873 """Allocates multiple instances at the same time.
1874
1875 """
1876 REQ_BGL = False
1877
1879 """Check arguments.
1880
1881 """
1882 nodes = []
1883 for inst in self.op.instances:
1884 if inst.iallocator is not None:
1885 raise errors.OpPrereqError("iallocator are not allowed to be set on"
1886 " instance objects", errors.ECODE_INVAL)
1887 nodes.append(bool(inst.pnode))
1888 if inst.disk_template in constants.DTS_INT_MIRROR:
1889 nodes.append(bool(inst.snode))
1890
1891 has_nodes = compat.any(nodes)
1892 if compat.all(nodes) ^ has_nodes:
1893 raise errors.OpPrereqError("There are instance objects providing"
1894 " pnode/snode while others do not",
1895 errors.ECODE_INVAL)
1896
1897 if not has_nodes and self.op.iallocator is None:
1898 default_iallocator = self.cfg.GetDefaultIAllocator()
1899 if default_iallocator:
1900 self.op.iallocator = default_iallocator
1901 else:
1902 raise errors.OpPrereqError("No iallocator or nodes on the instances"
1903 " given and no cluster-wide default"
1904 " iallocator found; please specify either"
1905 " an iallocator or nodes on the instances"
1906 " or set a cluster-wide default iallocator",
1907 errors.ECODE_INVAL)
1908
1909 _CheckOpportunisticLocking(self.op)
1910
1911 dups = utils.FindDuplicates([op.instance_name for op in self.op.instances])
1912 if dups:
1913 raise errors.OpPrereqError("There are duplicate instance names: %s" %
1914 utils.CommaJoin(dups), errors.ECODE_INVAL)
1915
1917 """Calculate the locks.
1918
1919 """
1920 self.share_locks = ShareAll()
1921 self.needed_locks = {
1922
1923
1924 locking.LEVEL_NODE_ALLOC: locking.ALL_SET,
1925 }
1926
1927 if self.op.iallocator:
1928 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
1929 self.needed_locks[locking.LEVEL_NODE_RES] = locking.ALL_SET
1930
1931 if self.op.opportunistic_locking:
1932 self.opportunistic_locks[locking.LEVEL_NODE] = True
1933 else:
1934 nodeslist = []
1935 for inst in self.op.instances:
1936 inst.pnode = ExpandNodeName(self.cfg, inst.pnode)
1937 nodeslist.append(inst.pnode)
1938 if inst.snode is not None:
1939 inst.snode = ExpandNodeName(self.cfg, inst.snode)
1940 nodeslist.append(inst.snode)
1941
1942 self.needed_locks[locking.LEVEL_NODE] = nodeslist
1943
1944
1945 self.needed_locks[locking.LEVEL_NODE_RES] = list(nodeslist)
1946
1948 if level == locking.LEVEL_NODE_RES and \
1949 self.opportunistic_locks[locking.LEVEL_NODE]:
1950
1951
1952 self.needed_locks[locking.LEVEL_NODE_RES] = \
1953 self.owned_locks(locking.LEVEL_NODE)
1954
1956 """Check prerequisite.
1957
1958 """
1959 if self.op.iallocator:
1960 cluster = self.cfg.GetClusterInfo()
1961 default_vg = self.cfg.GetVGName()
1962 ec_id = self.proc.GetECId()
1963
1964 if self.op.opportunistic_locking:
1965
1966 node_whitelist = list(self.owned_locks(locking.LEVEL_NODE))
1967 else:
1968 node_whitelist = None
1969
1970 insts = [_CreateInstanceAllocRequest(op, ComputeDisks(op, default_vg),
1971 _ComputeNics(op, cluster, None,
1972 self.cfg, ec_id),
1973 _ComputeFullBeParams(op, cluster),
1974 node_whitelist)
1975 for op in self.op.instances]
1976
1977 req = iallocator.IAReqMultiInstanceAlloc(instances=insts)
1978 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1979
1980 ial.Run(self.op.iallocator)
1981
1982 if not ial.success:
1983 raise errors.OpPrereqError("Can't compute nodes using"
1984 " iallocator '%s': %s" %
1985 (self.op.iallocator, ial.info),
1986 errors.ECODE_NORES)
1987
1988 self.ia_result = ial.result
1989
1990 if self.op.dry_run:
1991 self.dry_run_result = objects.FillDict(self._ConstructPartialResult(), {
1992 constants.JOB_IDS_KEY: [],
1993 })
1994
2010
2011 - def Exec(self, feedback_fn):
2012 """Executes the opcode.
2013
2014 """
2015 jobs = []
2016 if self.op.iallocator:
2017 op2inst = dict((op.instance_name, op) for op in self.op.instances)
2018 (allocatable, failed) = self.ia_result
2019
2020 for (name, nodes) in allocatable:
2021 op = op2inst.pop(name)
2022
2023 if len(nodes) > 1:
2024 (op.pnode, op.snode) = nodes
2025 else:
2026 (op.pnode,) = nodes
2027
2028 jobs.append([op])
2029
2030 missing = set(op2inst.keys()) - set(failed)
2031 assert not missing, \
2032 "Iallocator did return incomplete result: %s" % \
2033 utils.CommaJoin(missing)
2034 else:
2035 jobs.extend([op] for op in self.op.instances)
2036
2037 return ResultWithJobs(jobs, **self._ConstructPartialResult())
2038
2041 """Data structure for network interface modifications.
2042
2043 Used by L{LUInstanceSetParams}.
2044
2045 """
2047 self.params = None
2048 self.filled = None
2049
2052 """Prepares a list of container modifications by adding a private data field.
2053
2054 @type mods: list of tuples; (operation, index, parameters)
2055 @param mods: List of modifications
2056 @type private_fn: callable or None
2057 @param private_fn: Callable for constructing a private data field for a
2058 modification
2059 @rtype: list
2060
2061 """
2062 if private_fn is None:
2063 fn = lambda: None
2064 else:
2065 fn = private_fn
2066
2067 return [(op, idx, params, fn()) for (op, idx, params) in mods]
2068
2071 """Checks if nodes have enough physical CPUs
2072
2073 This function checks if all given nodes have the needed number of
2074 physical CPUs. In case any node has less CPUs or we cannot get the
2075 information from the node, this function raises an OpPrereqError
2076 exception.
2077
2078 @type lu: C{LogicalUnit}
2079 @param lu: a logical unit from which we get configuration data
2080 @type nodenames: C{list}
2081 @param nodenames: the list of node names to check
2082 @type requested: C{int}
2083 @param requested: the minimum acceptable number of physical CPUs
2084 @raise errors.OpPrereqError: if the node doesn't have enough CPUs,
2085 or we cannot check the node
2086
2087 """
2088 nodeinfo = lu.rpc.call_node_info(nodenames, None, [hypervisor_name], None)
2089 for node in nodenames:
2090 info = nodeinfo[node]
2091 info.Raise("Cannot get current information from node %s" % node,
2092 prereq=True, ecode=errors.ECODE_ENVIRON)
2093 (_, _, (hv_info, )) = info.payload
2094 num_cpus = hv_info.get("cpu_total", None)
2095 if not isinstance(num_cpus, int):
2096 raise errors.OpPrereqError("Can't compute the number of physical CPUs"
2097 " on node %s, result was '%s'" %
2098 (node, num_cpus), errors.ECODE_ENVIRON)
2099 if requested > num_cpus:
2100 raise errors.OpPrereqError("Node %s has %s physical CPUs, but %s are "
2101 "required" % (node, num_cpus, requested),
2102 errors.ECODE_NORES)
2103
2106 """Return the item refered by the identifier.
2107
2108 @type identifier: string
2109 @param identifier: Item index or name or UUID
2110 @type kind: string
2111 @param kind: One-word item description
2112 @type container: list
2113 @param container: Container to get the item from
2114
2115 """
2116
2117 try:
2118 idx = int(identifier)
2119 if idx == -1:
2120
2121 absidx = len(container) - 1
2122 elif idx < 0:
2123 raise IndexError("Not accepting negative indices other than -1")
2124 elif idx > len(container):
2125 raise IndexError("Got %s index %s, but there are only %s" %
2126 (kind, idx, len(container)))
2127 else:
2128 absidx = idx
2129 return (absidx, container[idx])
2130 except ValueError:
2131 pass
2132
2133 for idx, item in enumerate(container):
2134 if item.uuid == identifier or item.name == identifier:
2135 return (idx, item)
2136
2137 raise errors.OpPrereqError("Cannot find %s with identifier %s" %
2138 (kind, identifier), errors.ECODE_NOENT)
2139
2140
2141 -def _ApplyContainerMods(kind, container, chgdesc, mods,
2142 create_fn, modify_fn, remove_fn):
2143 """Applies descriptions in C{mods} to C{container}.
2144
2145 @type kind: string
2146 @param kind: One-word item description
2147 @type container: list
2148 @param container: Container to modify
2149 @type chgdesc: None or list
2150 @param chgdesc: List of applied changes
2151 @type mods: list
2152 @param mods: Modifications as returned by L{_PrepareContainerMods}
2153 @type create_fn: callable
2154 @param create_fn: Callback for creating a new item (L{constants.DDM_ADD});
2155 receives absolute item index, parameters and private data object as added
2156 by L{_PrepareContainerMods}, returns tuple containing new item and changes
2157 as list
2158 @type modify_fn: callable
2159 @param modify_fn: Callback for modifying an existing item
2160 (L{constants.DDM_MODIFY}); receives absolute item index, item, parameters
2161 and private data object as added by L{_PrepareContainerMods}, returns
2162 changes as list
2163 @type remove_fn: callable
2164 @param remove_fn: Callback on removing item; receives absolute item index,
2165 item and private data object as added by L{_PrepareContainerMods}
2166
2167 """
2168 for (op, identifier, params, private) in mods:
2169 changes = None
2170
2171 if op == constants.DDM_ADD:
2172
2173
2174 try:
2175 idx = int(identifier)
2176 except ValueError:
2177 raise errors.OpPrereqError("Only possitive integer or -1 is accepted as"
2178 " identifier for %s" % constants.DDM_ADD,
2179 errors.ECODE_INVAL)
2180 if idx == -1:
2181 addidx = len(container)
2182 else:
2183 if idx < 0:
2184 raise IndexError("Not accepting negative indices other than -1")
2185 elif idx > len(container):
2186 raise IndexError("Got %s index %s, but there are only %s" %
2187 (kind, idx, len(container)))
2188 addidx = idx
2189
2190 if create_fn is None:
2191 item = params
2192 else:
2193 (item, changes) = create_fn(addidx, params, private)
2194
2195 if idx == -1:
2196 container.append(item)
2197 else:
2198 assert idx >= 0
2199 assert idx <= len(container)
2200
2201 container.insert(idx, item)
2202 else:
2203
2204 (absidx, item) = GetItemFromContainer(identifier, kind, container)
2205
2206 if op == constants.DDM_REMOVE:
2207 assert not params
2208
2209 if remove_fn is not None:
2210 remove_fn(absidx, item, private)
2211
2212 changes = [("%s/%s" % (kind, absidx), "remove")]
2213
2214 assert container[absidx] == item
2215 del container[absidx]
2216 elif op == constants.DDM_MODIFY:
2217 if modify_fn is not None:
2218 changes = modify_fn(absidx, item, params, private)
2219 else:
2220 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
2221
2222 assert _TApplyContModsCbChanges(changes)
2223
2224 if not (chgdesc is None or changes is None):
2225 chgdesc.extend(changes)
2226
2229 """Updates the C{iv_name} attribute of disks.
2230
2231 @type disks: list of L{objects.Disk}
2232
2233 """
2234 for (idx, disk) in enumerate(disks):
2235 disk.iv_name = "disk/%s" % (base_index + idx, )
2236
2239 """Modifies an instances's parameters.
2240
2241 """
2242 HPATH = "instance-modify"
2243 HTYPE = constants.HTYPE_INSTANCE
2244 REQ_BGL = False
2245
2246 @staticmethod
2248 assert ht.TList(mods)
2249 assert not mods or len(mods[0]) in (2, 3)
2250
2251 if mods and len(mods[0]) == 2:
2252 result = []
2253
2254 addremove = 0
2255 for op, params in mods:
2256 if op in (constants.DDM_ADD, constants.DDM_REMOVE):
2257 result.append((op, -1, params))
2258 addremove += 1
2259
2260 if addremove > 1:
2261 raise errors.OpPrereqError("Only one %s add or remove operation is"
2262 " supported at a time" % kind,
2263 errors.ECODE_INVAL)
2264 else:
2265 result.append((constants.DDM_MODIFY, op, params))
2266
2267 assert verify_fn(result)
2268 else:
2269 result = mods
2270
2271 return result
2272
2273 @staticmethod
2295
2297 """Verifies a disk modification.
2298
2299 """
2300 if op == constants.DDM_ADD:
2301 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
2302 if mode not in constants.DISK_ACCESS_SET:
2303 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
2304 errors.ECODE_INVAL)
2305
2306 size = params.get(constants.IDISK_SIZE, None)
2307 if size is None:
2308 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
2309 constants.IDISK_SIZE, errors.ECODE_INVAL)
2310
2311 try:
2312 size = int(size)
2313 except (TypeError, ValueError), err:
2314 raise errors.OpPrereqError("Invalid disk size parameter: %s" % err,
2315 errors.ECODE_INVAL)
2316
2317 params[constants.IDISK_SIZE] = size
2318 name = params.get(constants.IDISK_NAME, None)
2319 if name is not None and name.lower() == constants.VALUE_NONE:
2320 params[constants.IDISK_NAME] = None
2321
2322 elif op == constants.DDM_MODIFY:
2323 if constants.IDISK_SIZE in params:
2324 raise errors.OpPrereqError("Disk size change not possible, use"
2325 " grow-disk", errors.ECODE_INVAL)
2326
2327
2328
2329 if self.instance.disk_template != constants.DT_EXT:
2330 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
2331
2332 name = params.get(constants.IDISK_NAME, None)
2333 if name is not None and name.lower() == constants.VALUE_NONE:
2334 params[constants.IDISK_NAME] = None
2335
2336 @staticmethod
2338 """Verifies a network interface modification.
2339
2340 """
2341 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
2342 ip = params.get(constants.INIC_IP, None)
2343 name = params.get(constants.INIC_NAME, None)
2344 req_net = params.get(constants.INIC_NETWORK, None)
2345 link = params.get(constants.NIC_LINK, None)
2346 mode = params.get(constants.NIC_MODE, None)
2347 if name is not None and name.lower() == constants.VALUE_NONE:
2348 params[constants.INIC_NAME] = None
2349 if req_net is not None:
2350 if req_net.lower() == constants.VALUE_NONE:
2351 params[constants.INIC_NETWORK] = None
2352 req_net = None
2353 elif link is not None or mode is not None:
2354 raise errors.OpPrereqError("If network is given"
2355 " mode or link should not",
2356 errors.ECODE_INVAL)
2357
2358 if op == constants.DDM_ADD:
2359 macaddr = params.get(constants.INIC_MAC, None)
2360 if macaddr is None:
2361 params[constants.INIC_MAC] = constants.VALUE_AUTO
2362
2363 if ip is not None:
2364 if ip.lower() == constants.VALUE_NONE:
2365 params[constants.INIC_IP] = None
2366 else:
2367 if ip.lower() == constants.NIC_IP_POOL:
2368 if op == constants.DDM_ADD and req_net is None:
2369 raise errors.OpPrereqError("If ip=pool, parameter network"
2370 " cannot be none",
2371 errors.ECODE_INVAL)
2372 else:
2373 if not netutils.IPAddress.IsValid(ip):
2374 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
2375 errors.ECODE_INVAL)
2376
2377 if constants.INIC_MAC in params:
2378 macaddr = params[constants.INIC_MAC]
2379 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
2380 macaddr = utils.NormalizeAndValidateMac(macaddr)
2381
2382 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
2383 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
2384 " modifying an existing NIC",
2385 errors.ECODE_INVAL)
2386
2388 if not (self.op.nics or self.op.disks or self.op.disk_template or
2389 self.op.hvparams or self.op.beparams or self.op.os_name or
2390 self.op.osparams or self.op.offline is not None or
2391 self.op.runtime_mem or self.op.pnode):
2392 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
2393
2394 if self.op.hvparams:
2395 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
2396 "hypervisor", "instance", "cluster")
2397
2398 self.op.disks = self._UpgradeDiskNicMods(
2399 "disk", self.op.disks, opcodes.OpInstanceSetParams.TestDiskModifications)
2400 self.op.nics = self._UpgradeDiskNicMods(
2401 "NIC", self.op.nics, opcodes.OpInstanceSetParams.TestNicModifications)
2402
2403 if self.op.disks and self.op.disk_template is not None:
2404 raise errors.OpPrereqError("Disk template conversion and other disk"
2405 " changes not supported at the same time",
2406 errors.ECODE_INVAL)
2407
2408 if (self.op.disk_template and
2409 self.op.disk_template in constants.DTS_INT_MIRROR and
2410 self.op.remote_node is None):
2411 raise errors.OpPrereqError("Changing the disk template to a mirrored"
2412 " one requires specifying a secondary node",
2413 errors.ECODE_INVAL)
2414
2415
2416 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
2417 self._VerifyNicModification)
2418
2419 if self.op.pnode:
2420 self.op.pnode = ExpandNodeName(self.cfg, self.op.pnode)
2421
2432
2434 if level == locking.LEVEL_NODEGROUP:
2435 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
2436
2437
2438 self.needed_locks[locking.LEVEL_NODEGROUP] = \
2439 self.cfg.GetInstanceNodeGroups(self.op.instance_name)
2440 elif level == locking.LEVEL_NODE:
2441 self._LockInstancesNodes()
2442 if self.op.disk_template and self.op.remote_node:
2443 self.op.remote_node = ExpandNodeName(self.cfg, self.op.remote_node)
2444 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node)
2445 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
2446
2447 self.needed_locks[locking.LEVEL_NODE_RES] = \
2448 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
2449
2484
2491