1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30 """Logical unit setting parameters of a single instance."""
31
32 import copy
33 import logging
34 import os
35
36 from ganeti import compat
37 from ganeti import constants
38 from ganeti import errors
39 from ganeti import ht
40 from ganeti import hypervisor
41 from ganeti import locking
42 from ganeti.masterd import iallocator
43 from ganeti import netutils
44 from ganeti import objects
45 from ganeti import utils
46 import ganeti.rpc.node as rpc
47
48 from ganeti.cmdlib.base import LogicalUnit
49
50 from ganeti.cmdlib.common import INSTANCE_DOWN, \
51 INSTANCE_NOT_RUNNING, CheckNodeOnline, \
52 CheckParamsNotGlobal, \
53 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
54 GetUpdatedParams, CheckInstanceState, ExpandNodeUuidAndName, \
55 IsValidDiskAccessModeCombination, AnnotateDiskParams, \
56 CheckIAllocatorOrNode
57 from ganeti.cmdlib.instance_storage import CalculateFileStorageDir, \
58 CheckDiskExtProvider, CheckNodesFreeDiskPerVG, CheckRADOSFreeSpace, \
59 CheckSpindlesExclusiveStorage, ComputeDiskSizePerVG, ComputeDisksInfo, \
60 CreateDisks, CreateSingleBlockDev, GenerateDiskTemplate, \
61 IsExclusiveStorageEnabledNodeUuid, ShutdownInstanceDisks, \
62 WaitForSync, WipeOrCleanupDisks, AssembleInstanceDisks
63 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
64 NICToTuple, CheckNodeNotDrained, CopyLockList, \
65 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
66 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
67 UpdateMetadata, CheckForConflictingIp, \
68 PrepareContainerMods, ComputeInstanceCommunicationNIC, \
69 ApplyContainerMods, ComputeIPolicyInstanceSpecViolation, \
70 CheckNodesPhysicalCPUs
71 import ganeti.masterd.instance
75 """Data structure for network interface modifications.
76
77 Used by L{LUInstanceSetParams}.
78
79 """
81 self.params = None
82 self.filled = None
83
86 """Modifies an instances's parameters.
87
88 """
89 HPATH = "instance-modify"
90 HTYPE = constants.HTYPE_INSTANCE
91 REQ_BGL = False
92
94 """Find a disk object using the provided params.
95
96 Accept arguments as keywords and use the GetDiskInfo/GetDiskInfoByName
97 config functions to retrieve the disk info based on these arguments.
98
99 In case of an error, raise the appropriate exceptions.
100 """
101 if uuid:
102 disk = self.cfg.GetDiskInfo(uuid)
103 if disk is None:
104 raise errors.OpPrereqError("No disk was found with this UUID: %s" %
105 uuid, errors.ECODE_INVAL)
106 elif name:
107 disk = self.cfg.GetDiskInfoByName(name)
108 if disk is None:
109 raise errors.OpPrereqError("No disk was found with this name: %s" %
110 name, errors.ECODE_INVAL)
111 else:
112 raise errors.ProgrammerError("No disk UUID or name was given")
113
114 return disk
115
116 @staticmethod
118 assert ht.TList(mods)
119 assert not mods or len(mods[0]) in (2, 3)
120
121 if mods and len(mods[0]) == 2:
122 result = []
123
124 addremove = 0
125 for op, params in mods:
126 if op in (constants.DDM_ADD, constants.DDM_ATTACH,
127 constants.DDM_REMOVE, constants.DDM_DETACH):
128 result.append((op, -1, params))
129 addremove += 1
130
131 if addremove > 1:
132 raise errors.OpPrereqError("Only one %s add/attach/remove/detach "
133 "operation is supported at a time" %
134 kind, errors.ECODE_INVAL)
135 else:
136 result.append((constants.DDM_MODIFY, op, params))
137
138 assert verify_fn(result)
139 else:
140 result = mods
141 return result
142
143 @staticmethod
145 """Ensures requested disk/NIC modifications are valid.
146
147 Note that the 'attach' action needs a way to refer to the UUID of the disk,
148 since the disk name is not unique cluster-wide. However, the UUID of the
149 disk is not settable but rather generated by Ganeti automatically,
150 therefore it cannot be passed as an IDISK parameter. For this reason, this
151 function will override the checks to accept uuid parameters solely for the
152 attach action.
153 """
154
155 key_types_attach = key_types.copy()
156 key_types_attach['uuid'] = 'string'
157
158 for (op, _, params) in mods:
159 assert ht.TDict(params)
160
161
162
163 if key_types:
164 utils.ForceDictType(params, (key_types if op != constants.DDM_ATTACH
165 else key_types_attach))
166
167 if op in (constants.DDM_REMOVE, constants.DDM_DETACH):
168 if params:
169 raise errors.OpPrereqError("No settings should be passed when"
170 " removing or detaching a %s" % kind,
171 errors.ECODE_INVAL)
172 elif op in (constants.DDM_ADD, constants.DDM_ATTACH,
173 constants.DDM_MODIFY):
174 item_fn(op, params)
175 else:
176 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
177
179 """Verifies a disk modification.
180
181 """
182 disk_type = params.get(
183 constants.IDISK_TYPE,
184 self.cfg.GetInstanceDiskTemplate(self.instance.uuid))
185
186 if op == constants.DDM_ADD:
187 params[constants.IDISK_TYPE] = disk_type
188
189 if disk_type == constants.DT_DISKLESS:
190 raise errors.OpPrereqError(
191 "Must specify disk type on diskless instance", errors.ECODE_INVAL)
192
193 if disk_type != constants.DT_EXT:
194 utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
195
196 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
197 if mode not in constants.DISK_ACCESS_SET:
198 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
199 errors.ECODE_INVAL)
200
201 size = params.get(constants.IDISK_SIZE, None)
202 if size is None:
203 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
204 constants.IDISK_SIZE, errors.ECODE_INVAL)
205 size = int(size)
206
207 params[constants.IDISK_SIZE] = size
208 name = params.get(constants.IDISK_NAME, None)
209 if name is not None and name.lower() == constants.VALUE_NONE:
210 params[constants.IDISK_NAME] = None
211
212
213 if op in (constants.DDM_ADD, constants.DDM_ATTACH):
214 CheckSpindlesExclusiveStorage(params, excl_stor, True)
215 CheckDiskExtProvider(params, disk_type)
216
217
218 if not self.op.wait_for_sync and not self.instance.disks_active:
219 raise errors.OpPrereqError("Can't %s a disk to an instance with"
220 " deactivated disks and --no-wait-for-sync"
221 " given" % op, errors.ECODE_INVAL)
222
223
224 if disk_type in constants.DTS_HAVE_ACCESS:
225 access_type = params.get(constants.IDISK_ACCESS,
226 group_access_types[disk_type])
227 if not IsValidDiskAccessModeCombination(self.instance.hypervisor,
228 disk_type, access_type):
229 raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
230 " used with %s disk access param" %
231 (self.instance.hypervisor, access_type),
232 errors.ECODE_STATE)
233
234 if op == constants.DDM_ATTACH:
235 if len(params) != 1 or ('uuid' not in params and
236 constants.IDISK_NAME not in params):
237 raise errors.OpPrereqError("Only one argument is permitted in %s op,"
238 " either %s or uuid" % (constants.DDM_ATTACH,
239 constants.IDISK_NAME,
240 ),
241 errors.ECODE_INVAL)
242 self._CheckAttachDisk(params)
243
244 elif op == constants.DDM_MODIFY:
245 if constants.IDISK_SIZE in params:
246 raise errors.OpPrereqError("Disk size change not possible, use"
247 " grow-disk", errors.ECODE_INVAL)
248
249 disk_info = self.cfg.GetInstanceDisks(self.instance.uuid)
250
251
252
253 if not utils.AllDiskOfType(disk_info, [constants.DT_EXT]):
254 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
255 else:
256
257
258 for param in [constants.IDISK_ACCESS, constants.IDISK_PROVIDER]:
259 if param in params:
260 raise errors.OpPrereqError("Disk '%s' parameter change is"
261 " not possible" % param,
262 errors.ECODE_INVAL)
263
264 name = params.get(constants.IDISK_NAME, None)
265 if name is not None and name.lower() == constants.VALUE_NONE:
266 params[constants.IDISK_NAME] = None
267
268 @staticmethod
270 """Verifies a network interface modification.
271
272 """
273 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
274 ip = params.get(constants.INIC_IP, None)
275 name = params.get(constants.INIC_NAME, None)
276 req_net = params.get(constants.INIC_NETWORK, None)
277 link = params.get(constants.NIC_LINK, None)
278 mode = params.get(constants.NIC_MODE, None)
279 if name is not None and name.lower() == constants.VALUE_NONE:
280 params[constants.INIC_NAME] = None
281 if req_net is not None:
282 if req_net.lower() == constants.VALUE_NONE:
283 params[constants.INIC_NETWORK] = None
284 req_net = None
285 elif link is not None or mode is not None:
286 raise errors.OpPrereqError("If network is given"
287 " mode or link should not",
288 errors.ECODE_INVAL)
289
290 if op == constants.DDM_ADD:
291 macaddr = params.get(constants.INIC_MAC, None)
292 if macaddr is None:
293 params[constants.INIC_MAC] = constants.VALUE_AUTO
294
295 if ip is not None:
296 if ip.lower() == constants.VALUE_NONE:
297 params[constants.INIC_IP] = None
298 else:
299 if ip.lower() == constants.NIC_IP_POOL:
300 if op == constants.DDM_ADD and req_net is None:
301 raise errors.OpPrereqError("If ip=pool, parameter network"
302 " cannot be none",
303 errors.ECODE_INVAL)
304 else:
305 if not netutils.IPAddress.IsValid(ip):
306 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
307 errors.ECODE_INVAL)
308
309 if constants.INIC_MAC in params:
310 macaddr = params[constants.INIC_MAC]
311 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
312 macaddr = utils.NormalizeAndValidateMac(macaddr)
313
314 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
315 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
316 " modifying an existing NIC",
317 errors.ECODE_INVAL)
318
320 """Looks up uuid or name of disk if necessary."""
321 try:
322 return int(idx)
323 except ValueError:
324 pass
325 for i, d in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)):
326 if d.name == idx or d.uuid == idx:
327 return i
328 raise errors.OpPrereqError("Lookup of disk %r failed" % idx)
329
331 """Looks up uuid or name of disk if necessary."""
332 return [(op, self._LookupDiskIndex(idx), params)
333 for op, idx, params in self.op.disks]
334
336 if not (self.op.nics or self.op.disks or self.op.disk_template or
337 self.op.hvparams or self.op.beparams or self.op.os_name or
338 self.op.osparams or self.op.offline is not None or
339 self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
340 self.op.instance_communication is not None):
341 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
342
343 if self.op.hvparams:
344 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
345 "hypervisor", "instance", "cluster")
346
347 self.op.disks = self._UpgradeDiskNicMods(
348 "disk", self.op.disks,
349 ht.TSetParamsMods(ht.TIDiskParams))
350 self.op.nics = self._UpgradeDiskNicMods(
351 "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
352
353
354 if self.op.disk_template:
355 if self.op.disks:
356 raise errors.OpPrereqError("Disk template conversion and other disk"
357 " changes not supported at the same time",
358 errors.ECODE_INVAL)
359
360
361 if self.op.disk_template in constants.DTS_INT_MIRROR:
362 CheckIAllocatorOrNode(self, "iallocator", "remote_node")
363 elif self.op.remote_node:
364 self.LogWarning("Changing the disk template to a non-mirrored one,"
365 " the secondary node will be ignored")
366
367
368 self.op.remote_node = None
369
370
371 if self.op.disk_template in constants.DTS_FILEBASED:
372 self._FillFileDriver()
373
374
375 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
376 self._VerifyNicModification)
377
378 if self.op.pnode:
379 (self.op.pnode_uuid, self.op.pnode) = \
380 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
381
417
431
456
491
499
502
503 update_params_dict = dict([(key, params[key])
504 for key in constants.NICS_PARAMETERS
505 if key in params])
506
507 req_link = update_params_dict.get(constants.NIC_LINK, None)
508 req_mode = update_params_dict.get(constants.NIC_MODE, None)
509
510 new_net_uuid = None
511 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
512 if new_net_uuid_or_name:
513 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
514 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
515
516 if old_net_uuid:
517 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
518
519 if new_net_uuid:
520 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
521 if not netparams:
522 raise errors.OpPrereqError("No netparams found for the network"
523 " %s, probably not connected" %
524 new_net_obj.name, errors.ECODE_INVAL)
525 new_params = dict(netparams)
526 else:
527 new_params = GetUpdatedParams(old_params, update_params_dict)
528
529 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
530
531 new_filled_params = cluster.SimpleFillNIC(new_params)
532 objects.NIC.CheckParameterSyntax(new_filled_params)
533
534 new_mode = new_filled_params[constants.NIC_MODE]
535 if new_mode == constants.NIC_MODE_BRIDGED:
536 bridge = new_filled_params[constants.NIC_LINK]
537 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
538 if msg:
539 msg = "Error checking bridges on node '%s': %s" % \
540 (self.cfg.GetNodeName(pnode_uuid), msg)
541 if self.op.force:
542 self.warn.append(msg)
543 else:
544 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
545
546 elif new_mode == constants.NIC_MODE_ROUTED:
547 ip = params.get(constants.INIC_IP, old_ip)
548 if ip is None and not new_net_uuid:
549 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
550 " on a routed NIC if not attached to a"
551 " network", errors.ECODE_INVAL)
552
553 elif new_mode == constants.NIC_MODE_OVS:
554
555 self.LogInfo("OVS links are currently not checked for correctness")
556
557 if constants.INIC_MAC in params:
558 mac = params[constants.INIC_MAC]
559 if mac is None:
560 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
561 errors.ECODE_INVAL)
562 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
563
564 params[constants.INIC_MAC] = \
565 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
566 else:
567
568 try:
569 self.cfg.ReserveMAC(mac, self.proc.GetECId())
570 except errors.ReservationError:
571 raise errors.OpPrereqError("MAC address '%s' already in use"
572 " in cluster" % mac,
573 errors.ECODE_NOTUNIQUE)
574 elif new_net_uuid != old_net_uuid:
575
576 def get_net_prefix(net_uuid):
577 mac_prefix = None
578 if net_uuid:
579 nobj = self.cfg.GetNetwork(net_uuid)
580 mac_prefix = nobj.mac_prefix
581
582 return mac_prefix
583
584 new_prefix = get_net_prefix(new_net_uuid)
585 old_prefix = get_net_prefix(old_net_uuid)
586 if old_prefix != new_prefix:
587 params[constants.INIC_MAC] = \
588 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
589
590
591 new_ip = params.get(constants.INIC_IP, old_ip)
592 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
593 if new_ip:
594
595 if new_ip.lower() == constants.NIC_IP_POOL:
596 if new_net_uuid:
597 try:
598 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
599 except errors.ReservationError:
600 raise errors.OpPrereqError("Unable to get a free IP"
601 " from the address pool",
602 errors.ECODE_STATE)
603 self.LogInfo("Chose IP %s from network %s",
604 new_ip,
605 new_net_obj.name)
606 params[constants.INIC_IP] = new_ip
607 else:
608 raise errors.OpPrereqError("ip=pool, but no network found",
609 errors.ECODE_INVAL)
610
611 elif new_net_uuid:
612 try:
613 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
614 check=self.op.conflicts_check)
615 self.LogInfo("Reserving IP %s in network %s",
616 new_ip, new_net_obj.name)
617 except errors.ReservationError:
618 raise errors.OpPrereqError("IP %s not available in network %s" %
619 (new_ip, new_net_obj.name),
620 errors.ECODE_NOTUNIQUE)
621
622 elif self.op.conflicts_check:
623 CheckForConflictingIp(self, new_ip, pnode_uuid)
624
625
626 if old_ip and old_net_uuid:
627 try:
628 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
629 except errors.AddressPoolError:
630 logging.warning("Release IP %s not contained in network %s",
631 old_ip, old_net_obj.name)
632
633
634 elif (old_net_uuid is not None and
635 (req_link is not None or req_mode is not None)):
636 raise errors.OpPrereqError("Not allowed to change link or mode of"
637 " a NIC that is connected to a network",
638 errors.ECODE_INVAL)
639
640 private.params = new_params
641 private.filled = new_filled_params
642
644 """CheckPrereq checks related to a new disk template."""
645
646 pnode_uuid = self.instance.primary_node
647
648
649 disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
650 if disk_template == constants.DT_MIXED:
651 raise errors.OpPrereqError(
652 "Conversion from mixed is not yet supported.")
653
654 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
655 if utils.AnyDiskOfType(inst_disks, constants.DTS_NOT_CONVERTIBLE_FROM):
656 raise errors.OpPrereqError(
657 "Conversion from the '%s' disk template is not supported"
658 % self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
659 errors.ECODE_INVAL)
660
661 elif self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO:
662 raise errors.OpPrereqError("Conversion to the '%s' disk template is"
663 " not supported" % self.op.disk_template,
664 errors.ECODE_INVAL)
665
666 if (self.op.disk_template != constants.DT_EXT and
667 utils.AllDiskOfType(inst_disks, [self.op.disk_template])):
668 raise errors.OpPrereqError("Instance already has disk template %s" %
669 self.op.disk_template, errors.ECODE_INVAL)
670
671 if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
672 enabled_dts = utils.CommaJoin(self.cluster.enabled_disk_templates)
673 raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
674 " cluster (enabled templates: %s)" %
675 (self.op.disk_template, enabled_dts),
676 errors.ECODE_STATE)
677
678 default_vg = self.cfg.GetVGName()
679 if (not default_vg and
680 self.op.disk_template not in constants.DTS_NOT_LVM):
681 raise errors.OpPrereqError("Disk template conversions to lvm-based"
682 " instances are not supported by the cluster",
683 errors.ECODE_STATE)
684
685 CheckInstanceState(self, self.instance, INSTANCE_DOWN,
686 msg="cannot change disk template")
687
688
689 self.disks_info = ComputeDisksInfo(inst_disks, self.op.disk_template,
690 default_vg, self.op.ext_params)
691
692
693 if self.op.disk_template in constants.DTS_INT_MIRROR \
694 and self.op.remote_node_uuid:
695 if self.op.remote_node_uuid == pnode_uuid:
696 raise errors.OpPrereqError("Given new secondary node %s is the same"
697 " as the primary node of the instance" %
698 self.op.remote_node, errors.ECODE_STATE)
699 CheckNodeOnline(self, self.op.remote_node_uuid)
700 CheckNodeNotDrained(self, self.op.remote_node_uuid)
701 CheckNodeVmCapable(self, self.op.remote_node_uuid)
702
703 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
704 snode_group = self.cfg.GetNodeGroup(snode_info.group)
705 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
706 snode_group)
707 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
708 ignore=self.op.ignore_ipolicy)
709 if pnode_info.group != snode_info.group:
710 self.LogWarning("The primary and secondary nodes are in two"
711 " different node groups; the disk parameters"
712 " from the first disk's node group will be"
713 " used")
714
715
716 pnode_group = self.cfg.GetNodeGroup(pnode_info.group)
717 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
718 pnode_group)
719 allowed_dts = ipolicy[constants.IPOLICY_DTS]
720 if self.op.disk_template not in allowed_dts:
721 raise errors.OpPrereqError("Disk template '%s' in not allowed (allowed"
722 " templates: %s)" % (self.op.disk_template,
723 utils.CommaJoin(allowed_dts)),
724 errors.ECODE_STATE)
725
726 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
727
728 nodes = [pnode_info]
729 if self.op.disk_template in constants.DTS_INT_MIRROR \
730 and self.op.remote_node_uuid:
731 assert snode_info
732 nodes.append(snode_info)
733 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
734 if compat.any(map(has_es, nodes)):
735 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
736 " storage is enabled" % (
737 self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
738 self.op.disk_template))
739 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
740
741
742
743 if (self.op.disk_template == constants.DT_PLAIN and
744 utils.AllDiskOfType(inst_disks, [constants.DT_DRBD8])):
745
746
747 pass
748 elif (self.op.disk_template == constants.DT_DRBD8 and
749 utils.AllDiskOfType(inst_disks, [constants.DT_PLAIN])):
750
751
752 if self.op.remote_node_uuid:
753 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info)
754 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], req_sizes)
755 elif self.op.disk_template in constants.DTS_LVM:
756
757 node_uuids = [pnode_uuid]
758 if self.op.remote_node_uuid:
759 node_uuids.append(self.op.remote_node_uuid)
760 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info)
761 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
762 elif self.op.disk_template == constants.DT_RBD:
763
764 CheckRADOSFreeSpace()
765 elif self.op.disk_template == constants.DT_EXT:
766
767 pass
768 else:
769
770 pass
771
773 """CheckPrereq checks related to disk changes.
774
775 @type ispec: dict
776 @param ispec: instance specs to be updated with the new disks
777
778 """
779 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
780
781 inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
782 excl_stor = compat.any(
783 rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes).values()
784 )
785
786
787 node_info = self.cfg.GetNodeInfo(self.instance.primary_node)
788 node_group = self.cfg.GetNodeGroup(node_info.group)
789 group_disk_params = self.cfg.GetGroupDiskParams(node_group)
790
791 group_access_types = dict(
792 (dt, group_disk_params[dt].get(
793 constants.RBD_ACCESS, constants.DISK_KERNELSPACE))
794 for dt in constants.DISK_TEMPLATES)
795
796
797
798 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor,
799 group_access_types)
800
801
802 self._CheckMods("disk", self.op.disks, {}, ver_fn)
803
804 self.diskmod = PrepareContainerMods(self.op.disks, None)
805
806 def _PrepareDiskMod(_, disk, params, __):
807 disk.name = params.get(constants.IDISK_NAME, None)
808
809
810 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
811 disks = copy.deepcopy(inst_disks)
812 ApplyContainerMods("disk", disks, None, self.diskmod, None, None,
813 _PrepareDiskMod, None, None)
814 utils.ValidateDeviceNames("disk", disks)
815 if len(disks) > constants.MAX_DISKS:
816 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
817 " more" % constants.MAX_DISKS,
818 errors.ECODE_STATE)
819 disk_sizes = [disk.size for disk in inst_disks]
820 disk_sizes.extend(params["size"] for (op, idx, params, private) in
821 self.diskmod if op == constants.DDM_ADD)
822 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
823 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
824
825
826 if self.op.offline is not None:
827 if self.op.offline:
828 msg = "can't change to offline without being down first"
829 else:
830 msg = "can't change to online (down) without being offline first"
831 CheckInstanceState(self, self.instance, INSTANCE_NOT_RUNNING,
832 msg=msg)
833
834 @staticmethod
836 """Create a NIC mod that adds or removes the instance
837 communication NIC to a running instance.
838
839 The NICS are dynamically created using the Dynamic Device
840 Modification (DDM). This function produces a NIC modification
841 (mod) that inserts an additional NIC meant for instance
842 communication in or removes an existing instance communication NIC
843 from a running instance, using DDM.
844
845 @type cfg: L{config.ConfigWriter}
846 @param cfg: cluster configuration
847
848 @type instance_communication: boolean
849 @param instance_communication: whether instance communication is
850 enabled or disabled
851
852 @type instance: L{objects.Instance}
853 @param instance: instance to which the NIC mod will be applied to
854
855 @rtype: (L{constants.DDM_ADD}, -1, parameters) or
856 (L{constants.DDM_REMOVE}, -1, parameters) or
857 L{None}
858 @return: DDM mod containing an action to add or remove the NIC, or
859 None if nothing needs to be done
860
861 """
862 nic_name = ComputeInstanceCommunicationNIC(instance.name)
863
864 instance_communication_nic = None
865
866 for nic in instance.nics:
867 if nic.name == nic_name:
868 instance_communication_nic = nic
869 break
870
871 if instance_communication and not instance_communication_nic:
872 action = constants.DDM_ADD
873 params = {constants.INIC_NAME: nic_name,
874 constants.INIC_MAC: constants.VALUE_GENERATE,
875 constants.INIC_IP: constants.NIC_IP_POOL,
876 constants.INIC_NETWORK:
877 cfg.GetInstanceCommunicationNetwork()}
878 elif not instance_communication and instance_communication_nic:
879 action = constants.DDM_REMOVE
880 params = None
881 else:
882 action = None
883 params = None
884
885 if action is not None:
886 return (action, -1, params)
887 else:
888 return None
889
896
898 if self.op.hotplug or self.op.hotplug_if_possible:
899 result = self.rpc.call_hotplug_supported(self.instance.primary_node,
900 self.instance)
901 if result.fail_msg:
902 if self.op.hotplug:
903 result.Raise("Hotplug is not possible: %s" % result.fail_msg,
904 prereq=True, ecode=errors.ECODE_STATE)
905 else:
906 self.LogWarning(result.fail_msg)
907 self.op.hotplug = False
908 self.LogInfo("Modification will take place without hotplugging.")
909 else:
910 self.op.hotplug = True
911
922
940
954
987
989
990 instance_os = (self.op.os_name
991 if self.op.os_name and not self.op.force
992 else self.instance.os)
993
994 if self.op.osparams or self.op.osparams_private:
995 public_parms = self.op.osparams or {}
996 private_parms = self.op.osparams_private or {}
997 dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
998
999 if dupe_keys:
1000 raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
1001 utils.CommaJoin(dupe_keys))
1002
1003 self.os_inst = GetUpdatedParams(self.instance.osparams,
1004 public_parms)
1005 self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
1006 private_parms)
1007
1008 CheckOSParams(self, True, node_uuids, instance_os,
1009 objects.FillDict(self.os_inst,
1010 self.os_inst_private),
1011 self.op.force_variant)
1012
1013 else:
1014 self.os_inst = {}
1015 self.os_inst_private = {}
1016
1017 - def _ProcessMem(self, cluster_hvparams, be_old, pnode_uuid):
1018
1019 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
1020 self.be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
1021 mem_check_list = [pnode_uuid]
1022 if self.be_new[constants.BE_AUTO_BALANCE]:
1023
1024 mem_check_list.extend(
1025 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid))
1026 instance_info = self._GetInstanceInfo(cluster_hvparams)
1027 hvspecs = [(self.instance.hypervisor,
1028 cluster_hvparams)]
1029 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
1030 hvspecs)
1031 pninfo = nodeinfo[pnode_uuid]
1032 msg = pninfo.fail_msg
1033 if msg:
1034
1035 self.warn.append("Can't get info from primary node %s: %s" %
1036 (self.cfg.GetNodeName(pnode_uuid), msg))
1037 else:
1038 (_, _, (pnhvinfo, )) = pninfo.payload
1039 if not isinstance(pnhvinfo.get("memory_free", None), int):
1040 self.warn.append("Node data from primary node %s doesn't contain"
1041 " free memory information" %
1042 self.cfg.GetNodeName(pnode_uuid))
1043 elif instance_info.fail_msg:
1044 self.warn.append("Can't get instance runtime information: %s" %
1045 instance_info.fail_msg)
1046 else:
1047 if instance_info.payload:
1048 current_mem = int(instance_info.payload["memory"])
1049 else:
1050
1051
1052
1053
1054 current_mem = 0
1055
1056 miss_mem = (self.be_new[constants.BE_MAXMEM] - current_mem -
1057 pnhvinfo["memory_free"])
1058 if miss_mem > 0:
1059 raise errors.OpPrereqError("This change will prevent the instance"
1060 " from starting, due to %d MB of memory"
1061 " missing on its primary node" %
1062 miss_mem, errors.ECODE_NORES)
1063
1064 if self.be_new[constants.BE_AUTO_BALANCE]:
1065 secondary_nodes = \
1066 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
1067 for node_uuid, nres in nodeinfo.items():
1068 if node_uuid not in secondary_nodes:
1069 continue
1070 nres.Raise("Can't get info from secondary node %s" %
1071 self.cfg.GetNodeName(node_uuid), prereq=True,
1072 ecode=errors.ECODE_STATE)
1073 (_, _, (nhvinfo, )) = nres.payload
1074 if not isinstance(nhvinfo.get("memory_free", None), int):
1075 raise errors.OpPrereqError("Secondary node %s didn't return free"
1076 " memory information" %
1077 self.cfg.GetNodeName(node_uuid),
1078 errors.ECODE_STATE)
1079
1080 elif self.be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
1081 raise errors.OpPrereqError("This change will prevent the instance"
1082 " from failover to its secondary node"
1083 " %s, due to not enough memory" %
1084 self.cfg.GetNodeName(node_uuid),
1085 errors.ECODE_STATE)
1086
1087 if self.op.runtime_mem:
1088 remote_info = self.rpc.call_instance_info(
1089 self.instance.primary_node, self.instance.name,
1090 self.instance.hypervisor,
1091 cluster_hvparams)
1092 remote_info.Raise("Error checking node %s" %
1093 self.cfg.GetNodeName(self.instance.primary_node),
1094 prereq=True)
1095 if not remote_info.payload:
1096 raise errors.OpPrereqError("Instance %s is not running" %
1097 self.instance.name, errors.ECODE_STATE)
1098
1099 current_memory = remote_info.payload["memory"]
1100 if (not self.op.force and
1101 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
1102 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
1103 raise errors.OpPrereqError("Instance %s must have memory between %d"
1104 " and %d MB of memory unless --force is"
1105 " given" %
1106 (self.instance.name,
1107 self.be_proposed[constants.BE_MINMEM],
1108 self.be_proposed[constants.BE_MAXMEM]),
1109 errors.ECODE_INVAL)
1110
1111 delta = self.op.runtime_mem - current_memory
1112 if delta > 0:
1113 CheckNodeFreeMemory(
1114 self, self.instance.primary_node,
1115 "ballooning memory for instance %s" % self.instance.name, delta,
1116 self.instance.hypervisor,
1117 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1118
1120 """Check prerequisites.
1121
1122 This only checks the instance list against the existing names.
1123
1124 """
1125 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
1126 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1127 self.cluster = self.cfg.GetClusterInfo()
1128 cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
1129
1130 self.op.disks = self._LookupDiskMods()
1131
1132 assert self.instance is not None, \
1133 "Cannot retrieve locked instance %s" % self.op.instance_name
1134
1135 self.warn = []
1136
1137 if (self.op.pnode_uuid is not None and
1138 self.op.pnode_uuid != self.instance.primary_node and
1139 not self.op.force):
1140 instance_info = self._GetInstanceInfo(cluster_hvparams)
1141
1142 if instance_info.fail_msg:
1143 self.warn.append("Can't get instance runtime information: %s" %
1144 instance_info.fail_msg)
1145 elif instance_info.payload:
1146 raise errors.OpPrereqError(
1147 "Instance is still running on %s" %
1148 self.cfg.GetNodeName(self.instance.primary_node),
1149 errors.ECODE_STATE)
1150 pnode_uuid = self.instance.primary_node
1151 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
1152
1153 node_uuids = list(self.cfg.GetInstanceNodes(self.instance.uuid))
1154 pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
1155
1156 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
1157 group_info = self.cfg.GetNodeGroup(pnode_info.group)
1158
1159
1160 ispec = {}
1161
1162 self._CheckHotplug()
1163
1164 self._PrepareNicCommunication()
1165
1166
1167 assert not (self.op.disk_template and self.op.disks), \
1168 "Can't modify disk template and apply disk changes at the same time"
1169
1170 if self.op.disk_template:
1171 self._PreCheckDiskTemplate(pnode_info)
1172
1173 self._PreCheckDisks(ispec)
1174
1175 self._ProcessHVParams(node_uuids)
1176 be_old = self._ProcessBeParams()
1177
1178 self._ValidateCpuParams()
1179 self._ProcessOsParams(node_uuids)
1180 self._ProcessMem(cluster_hvparams, be_old, pnode_uuid)
1181
1182
1183 cluster = self.cluster
1184
1185 def _PrepareNicCreate(_, params, private):
1186 self._PrepareNicModification(params, private, None, None,
1187 {}, cluster, pnode_uuid)
1188 return (None, None)
1189
1190 def _PrepareNicAttach(_, __, ___):
1191 raise errors.OpPrereqError("Attach operation is not supported for NICs",
1192 errors.ECODE_INVAL)
1193
1194 def _PrepareNicMod(_, nic, params, private):
1195 self._PrepareNicModification(params, private, nic.ip, nic.network,
1196 nic.nicparams, cluster, pnode_uuid)
1197 return None
1198
1199 def _PrepareNicRemove(_, params, __):
1200 ip = params.ip
1201 net = params.network
1202 if net is not None and ip is not None:
1203 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
1204
1205 def _PrepareNicDetach(_, __, ___):
1206 raise errors.OpPrereqError("Detach operation is not supported for NICs",
1207 errors.ECODE_INVAL)
1208
1209
1210 nics = [nic.Copy() for nic in self.instance.nics]
1211 ApplyContainerMods("NIC", nics, None, self.nicmod, _PrepareNicCreate,
1212 _PrepareNicAttach, _PrepareNicMod, _PrepareNicRemove,
1213 _PrepareNicDetach)
1214 if len(nics) > constants.MAX_NICS:
1215 raise errors.OpPrereqError("Instance has too many network interfaces"
1216 " (%d), cannot add more" % constants.MAX_NICS,
1217 errors.ECODE_STATE)
1218
1219
1220 self._nic_chgdesc = []
1221 if self.nicmod:
1222
1223 nics = [nic.Copy() for nic in self.instance.nics]
1224 ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
1225 self._CreateNewNic, None, self._ApplyNicMods,
1226 self._RemoveNic, None)
1227
1228 utils.ValidateDeviceNames("NIC", nics)
1229 self._new_nics = nics
1230 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
1231 else:
1232 self._new_nics = None
1233 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
1234
1235 if not self.op.ignore_ipolicy:
1236 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
1237 group_info)
1238
1239
1240 ispec[constants.ISPEC_SPINDLE_USE] = \
1241 self.be_new.get(constants.BE_SPINDLE_USE, None)
1242 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
1243 None)
1244
1245
1246 if self.op.disk_template:
1247 count = ispec[constants.ISPEC_DISK_COUNT]
1248 new_disk_types = [self.op.disk_template] * count
1249 else:
1250 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1251 add_disk_count = ispec[constants.ISPEC_DISK_COUNT] - len(old_disks)
1252 dev_type = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
1253 if dev_type == constants.DT_DISKLESS and add_disk_count != 0:
1254 raise errors.ProgrammerError(
1255 "Conversion from diskless instance not possible and should have"
1256 " been caught")
1257
1258 new_disk_types = ([d.dev_type for d in old_disks] +
1259 [dev_type] * add_disk_count)
1260 ispec_max = ispec.copy()
1261 ispec_max[constants.ISPEC_MEM_SIZE] = \
1262 self.be_new.get(constants.BE_MAXMEM, None)
1263 res_max = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
1264 new_disk_types)
1265 ispec_min = ispec.copy()
1266 ispec_min[constants.ISPEC_MEM_SIZE] = \
1267 self.be_new.get(constants.BE_MINMEM, None)
1268 res_min = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
1269 new_disk_types)
1270
1271 if (res_max or res_min):
1272
1273
1274 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1275 (group_info, group_info.name,
1276 utils.CommaJoin(set(res_max + res_min))))
1277 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1278
1280 """Converts the disks of an instance to another type.
1281
1282 This function converts the disks of an instance. It supports
1283 conversions among all the available disk types except conversions
1284 between the LVM-based disk types, that use their separate code path.
1285 Also, this method does not support conversions that include the 'diskless'
1286 template and those targeting the 'blockdev' template.
1287
1288 @type feedback_fn: callable
1289 @param feedback_fn: function used to send feedback back to the caller
1290
1291 @rtype: NoneType
1292 @return: None
1293 @raise errors.OpPrereqError: in case of failure
1294
1295 """
1296 template_info = self.op.disk_template
1297 if self.op.disk_template == constants.DT_EXT:
1298 template_info = ":".join([self.op.disk_template,
1299 self.op.ext_params["provider"]])
1300
1301 old_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
1302 feedback_fn("Converting disk template from '%s' to '%s'" %
1303 (old_template, template_info))
1304
1305 assert not (old_template in constants.DTS_NOT_CONVERTIBLE_FROM or
1306 self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO), \
1307 ("Unsupported disk template conversion from '%s' to '%s'" %
1308 (old_template, self.op.disk_template))
1309
1310 pnode_uuid = self.instance.primary_node
1311 snode_uuid = []
1312 if self.op.remote_node_uuid:
1313 snode_uuid = [self.op.remote_node_uuid]
1314
1315 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1316
1317 feedback_fn("Generating new '%s' disk template..." % template_info)
1318 file_storage_dir = CalculateFileStorageDir(
1319 self.op.disk_template, self.cfg, self.instance.name,
1320 file_storage_dir=self.op.file_storage_dir)
1321 new_disks = GenerateDiskTemplate(self,
1322 self.op.disk_template,
1323 self.instance.uuid,
1324 pnode_uuid,
1325 snode_uuid,
1326 self.disks_info,
1327 file_storage_dir,
1328 self.op.file_driver,
1329 0,
1330 feedback_fn,
1331 self.diskparams)
1332
1333
1334 feedback_fn("Creating new empty disks of type '%s'..." % template_info)
1335 try:
1336 CreateDisks(self, self.instance, disk_template=self.op.disk_template,
1337 disks=new_disks)
1338 except errors.OpExecError:
1339 self.LogWarning("Device creation failed")
1340 for disk in new_disks:
1341 self.cfg.ReleaseDRBDMinors(disk.uuid)
1342 raise
1343
1344
1345 feedback_fn("Populating the new empty disks of type '%s'..." %
1346 template_info)
1347 for idx, (old, new) in enumerate(zip(old_disks, new_disks)):
1348 feedback_fn(" - copying data from disk %s (%s), size %s" %
1349 (idx, old.dev_type,
1350 utils.FormatUnit(new.size, "h")))
1351 if old.dev_type == constants.DT_DRBD8:
1352 old = old.children[0]
1353 result = self.rpc.call_blockdev_convert(pnode_uuid, (old, self.instance),
1354 (new, self.instance))
1355 msg = result.fail_msg
1356 if msg:
1357
1358
1359
1360 if self.op.disk_template == constants.DT_DRBD8:
1361 new = new.children[0]
1362 self.Log(" - ERROR: Could not copy disk '%s' to '%s'" %
1363 (old.logical_id[1], new.logical_id[1]))
1364 try:
1365 self.LogInfo("Some disks failed to copy")
1366 self.LogInfo("The instance will not be affected, aborting operation")
1367 self.LogInfo("Removing newly created disks of type '%s'..." %
1368 template_info)
1369 RemoveDisks(self, self.instance, disks=new_disks)
1370 self.LogInfo("Newly created disks removed successfully")
1371 finally:
1372 for disk in new_disks:
1373 self.cfg.ReleaseDRBDMinors(disk.uuid)
1374 result.Raise("Error while converting the instance's template")
1375
1376
1377 for disk in old_disks:
1378 if disk.dev_type == constants.DT_DRBD8:
1379 tcp_port = disk.logical_id[2]
1380 self.cfg.AddTcpUdpPort(tcp_port)
1381
1382
1383 feedback_fn("Detaching old disks (%s) from the instance and removing"
1384 " them from cluster config" % old_template)
1385 for old_disk in old_disks:
1386 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
1387
1388
1389 feedback_fn("Adding new disks (%s) to cluster config and attaching"
1390 " them to the instance" % template_info)
1391 for (idx, new_disk) in enumerate(new_disks):
1392 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
1393
1394
1395 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1396
1397
1398 ReleaseLocks(self, locking.LEVEL_NODE)
1399
1400 disk_abort = not WaitForSync(self, self.instance,
1401 oneshot=not self.op.wait_for_sync)
1402 if disk_abort:
1403 raise errors.OpExecError("There are some degraded disks for"
1404 " this instance, please cleanup manually")
1405
1406 feedback_fn("Removing old block devices of type '%s'..." % old_template)
1407 RemoveDisks(self, self.instance, disks=old_disks)
1408
1409
1410
1412 """Converts an instance from plain to drbd.
1413
1414 """
1415 feedback_fn("Converting disk template from 'plain' to 'drbd'")
1416
1417 if not self.op.remote_node_uuid:
1418 feedback_fn("Using %s to choose new secondary" % self.op.iallocator)
1419
1420 req = iallocator.IAReqInstanceAllocateSecondary(
1421 name=self.op.instance_name)
1422 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
1423 ial.Run(self.op.iallocator)
1424
1425 if not ial.success:
1426 raise errors.OpPrereqError("Can's find secondary node using"
1427 " iallocator %s: %s" %
1428 (self.op.iallocator, ial.info),
1429 errors.ECODE_NORES)
1430 feedback_fn("%s choose %s as new secondary"
1431 % (self.op.iallocator, ial.result))
1432 self.op.remote_node = ial.result
1433 self.op.remote_node_uuid = self.cfg.GetNodeInfoByName(ial.result).uuid
1434
1435 pnode_uuid = self.instance.primary_node
1436 snode_uuid = self.op.remote_node_uuid
1437 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1438
1439 assert utils.AnyDiskOfType(old_disks, [constants.DT_PLAIN])
1440
1441 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
1442 self.instance.uuid, pnode_uuid,
1443 [snode_uuid], self.disks_info,
1444 None, None, 0,
1445 feedback_fn, self.diskparams)
1446 anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
1447 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
1448 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
1449 info = GetInstanceInfoText(self.instance)
1450 feedback_fn("Creating additional volumes...")
1451
1452 for disk in anno_disks:
1453
1454 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
1455 info, True, p_excl_stor)
1456 for child in disk.children:
1457 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
1458 s_excl_stor)
1459
1460
1461 feedback_fn("Renaming original volumes...")
1462 rename_list = [(o, n.children[0].logical_id)
1463 for (o, n) in zip(old_disks, new_disks)]
1464 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
1465 result.Raise("Failed to rename original LVs")
1466
1467 feedback_fn("Initializing DRBD devices...")
1468
1469 try:
1470 for disk in anno_disks:
1471 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
1472 (snode_uuid, s_excl_stor)]:
1473 f_create = node_uuid == pnode_uuid
1474 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
1475 f_create, excl_stor)
1476 except errors.GenericError, e:
1477 feedback_fn("Initializing of DRBD devices failed;"
1478 " renaming back original volumes...")
1479 rename_back_list = [(n.children[0], o.logical_id)
1480 for (n, o) in zip(new_disks, old_disks)]
1481 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
1482 result.Raise("Failed to rename LVs back after error %s" % str(e))
1483 raise
1484
1485
1486 for old_disk in old_disks:
1487 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
1488
1489
1490 for (idx, new_disk) in enumerate(new_disks):
1491 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
1492
1493
1494 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1495
1496
1497 ReleaseLocks(self, locking.LEVEL_NODE)
1498
1499
1500 disk_abort = not WaitForSync(self, self.instance,
1501 oneshot=not self.op.wait_for_sync)
1502 if disk_abort:
1503 raise errors.OpExecError("There are some degraded disks for"
1504 " this instance, please cleanup manually")
1505
1506
1507
1509 """Converts an instance from drbd to plain.
1510
1511 """
1512 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
1513 disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1514 assert len(secondary_nodes) == 1
1515 assert utils.AnyDiskOfType(disks, [constants.DT_DRBD8])
1516
1517 feedback_fn("Converting disk template from 'drbd' to 'plain'")
1518
1519 old_disks = AnnotateDiskParams(self.instance, disks, self.cfg)
1520 new_disks = [d.children[0] for d in disks]
1521
1522
1523 for parent, child in zip(old_disks, new_disks):
1524 child.size = parent.size
1525 child.mode = parent.mode
1526 child.name = parent.name
1527 child.nodes = [self.instance.primary_node]
1528
1529
1530 for disk in old_disks:
1531 tcp_port = disk.logical_id[2]
1532 self.cfg.AddTcpUdpPort(tcp_port)
1533
1534
1535 for old_disk in old_disks:
1536 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
1537
1538
1539 for (idx, new_disk) in enumerate(new_disks):
1540 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
1541
1542
1543 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1544
1545
1546 ReleaseLocks(self, locking.LEVEL_NODE)
1547
1548 feedback_fn("Removing volumes on the secondary node...")
1549 RemoveDisks(self, self.instance, disks=old_disks,
1550 target_node_uuid=secondary_nodes[0])
1551
1552 feedback_fn("Removing unneeded volumes on the primary node...")
1553 meta_disks = []
1554 for idx, disk in enumerate(old_disks):
1555 meta_disks.append(disk.children[1])
1556 RemoveDisks(self, self.instance, disks=meta_disks)
1557
1559 self.LogInfo("Trying to hotplug device...")
1560 msg = "hotplug:"
1561 result = self.rpc.call_hotplug_device(self.instance.primary_node,
1562 self.instance, action, dev_type,
1563 (device, self.instance),
1564 extra, seq)
1565 if result.fail_msg:
1566 self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
1567 self.LogInfo("Continuing execution..")
1568 msg += "failed"
1569 else:
1570 self.LogInfo("Hotplug done.")
1571 msg += "done"
1572 return msg
1573
1580
1582 file_path = CalculateFileStorageDir(
1583 disk_type, self.cfg, self.instance.name,
1584 file_storage_dir=self.op.file_storage_dir)
1585
1586 self._FillFileDriver()
1587
1588 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
1589 return \
1590 GenerateDiskTemplate(self, disk_type, self.instance.uuid,
1591 self.instance.primary_node, secondary_nodes,
1592 [params], file_path, self.op.file_driver, idx,
1593 self.Log, self.diskparams)[0]
1594
1596 """Creates a new disk.
1597
1598 """
1599
1600 disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
1601 disk = self._GenerateDiskTemplateWrapper(idx, disk_template,
1602 params)
1603 new_disks = CreateDisks(self, self.instance, disks=[disk])
1604 self.cfg.AddInstanceDisk(self.instance.uuid, disk, idx)
1605
1606
1607 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1608
1609 if self.cluster.prealloc_wipe_disks:
1610
1611 WipeOrCleanupDisks(self, self.instance,
1612 disks=[(idx, disk, 0)],
1613 cleanup=new_disks)
1614
1615 changes = [
1616 ("disk/%d" % idx,
1617 "add:size=%s,mode=%s" % (disk.size, disk.mode)),
1618 ]
1619 if self.op.hotplug:
1620 result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
1621 (disk, self.instance),
1622 self.instance, True, idx)
1623 if result.fail_msg:
1624 changes.append(("disk/%d" % idx, "assemble:failed"))
1625 self.LogWarning("Can't assemble newly created disk %d: %s",
1626 idx, result.fail_msg)
1627 else:
1628 _, link_name, uri = result.payload
1629 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
1630 constants.HOTPLUG_TARGET_DISK,
1631 disk, (link_name, uri), idx)
1632 changes.append(("disk/%d" % idx, msg))
1633
1634 return (disk, changes)
1635
1636 - def _PostAddDisk(self, _, disk):
1637 if not WaitForSync(self, self.instance, disks=[disk],
1638 oneshot=not self.op.wait_for_sync):
1639 raise errors.OpExecError("Failed to sync disks of %s" %
1640 self.instance.name)
1641
1642
1643
1644 if not self.instance.disks_active:
1645 ShutdownInstanceDisks(self, self.instance, disks=[disk])
1646
1648 """Attaches an existing disk to an instance.
1649
1650 """
1651 uuid = params.get("uuid", None)
1652 name = params.get(constants.IDISK_NAME, None)
1653
1654 disk = self.GenericGetDiskInfo(uuid, name)
1655
1656
1657 if disk.dev_type in (constants.DTS_INSTANCE_DEPENDENT_PATH):
1658
1659 params[constants.IDISK_SIZE] = disk.size
1660 params[constants.IDISK_MODE] = str(disk.mode)
1661 dummy_disk = self._GenerateDiskTemplateWrapper(idx, disk.dev_type, params)
1662 new_logical_id = dummy_disk.logical_id
1663 result = self.rpc.call_blockdev_rename(self.instance.primary_node,
1664 [(disk, new_logical_id)])
1665 result.Raise("Failed before attach")
1666 self.cfg.SetDiskLogicalID(disk.uuid, new_logical_id)
1667 disk.logical_id = new_logical_id
1668
1669
1670 self.cfg.AttachInstanceDisk(self.instance.uuid, disk.uuid, idx)
1671
1672
1673 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1674
1675 changes = [
1676 ("disk/%d" % idx,
1677 "attach:size=%s,mode=%s" % (disk.size, disk.mode)),
1678 ]
1679
1680 disks_ok, _, payloads = AssembleInstanceDisks(self, self.instance,
1681 disks=[disk])
1682 if not disks_ok:
1683 changes.append(("disk/%d" % idx, "assemble:failed"))
1684 return disk, changes
1685
1686 if self.op.hotplug:
1687 _, link_name, uri = payloads[0]
1688 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
1689 constants.HOTPLUG_TARGET_DISK,
1690 disk, (link_name, uri), idx)
1691 changes.append(("disk/%d" % idx, msg))
1692
1693 return (disk, changes)
1694
1727
1752
1754 """Detaches a disk from an instance.
1755
1756 """
1757 hotmsg = ""
1758 if self.op.hotplug:
1759 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
1760 constants.HOTPLUG_TARGET_DISK,
1761 root, None, idx)
1762
1763
1764 ShutdownInstanceDisks(self, self.instance, [root])
1765
1766
1767
1768
1769
1770
1771
1772 if root.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE):
1773 file_driver = root.logical_id[0]
1774 instance_path, disk_name = os.path.split(root.logical_id[1])
1775 new_path = os.path.join(os.path.dirname(instance_path), disk_name)
1776 new_logical_id = (file_driver, new_path)
1777 result = self.rpc.call_blockdev_rename(self.instance.primary_node,
1778 [(root, new_logical_id)])
1779 result.Raise("Failed before detach")
1780
1781 self.cfg.SetDiskLogicalID(root.uuid, new_logical_id)
1782
1783
1784 self.cfg.DetachInstanceDisk(self.instance.uuid, root.uuid)
1785
1786
1787 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1788
1789 return hotmsg
1790
1792 """Creates data structure for a new network interface.
1793
1794 """
1795 mac = params[constants.INIC_MAC]
1796 ip = params.get(constants.INIC_IP, None)
1797 net = params.get(constants.INIC_NETWORK, None)
1798 name = params.get(constants.INIC_NAME, None)
1799 net_uuid = self.cfg.LookupNetwork(net)
1800
1801 nicparams = private.filled
1802 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
1803 nicparams=nicparams)
1804 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1805
1806 changes = [
1807 ("nic.%d" % idx,
1808 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
1809 (mac, ip, private.filled[constants.NIC_MODE],
1810 private.filled[constants.NIC_LINK], net)),
1811 ]
1812
1813 if self.op.hotplug:
1814 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
1815 constants.HOTPLUG_TARGET_NIC,
1816 nobj, None, idx)
1817 changes.append(("nic.%d" % idx, msg))
1818
1819 return (nobj, changes)
1820
1822 """Modifies a network interface.
1823
1824 """
1825 changes = []
1826
1827 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
1828 if key in params:
1829 changes.append(("nic.%s/%d" % (key, idx), params[key]))
1830 setattr(nic, key, params[key])
1831
1832 new_net = params.get(constants.INIC_NETWORK, nic.network)
1833 new_net_uuid = self.cfg.LookupNetwork(new_net)
1834 if new_net_uuid != nic.network:
1835 changes.append(("nic.network/%d" % idx, new_net))
1836 nic.network = new_net_uuid
1837
1838 if private.filled:
1839 nic.nicparams = private.filled
1840
1841 for (key, val) in nic.nicparams.items():
1842 changes.append(("nic.%s/%d" % (key, idx), val))
1843
1844 if self.op.hotplug:
1845 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
1846 constants.HOTPLUG_TARGET_NIC,
1847 nic, None, idx)
1848 changes.append(("nic/%d" % idx, msg))
1849
1850 return changes
1851
1857
1858 - def Exec(self, feedback_fn):
1859 """Modifies an instance.
1860
1861 All parameters take effect only at the next restart of the instance.
1862
1863 """
1864 self.feedback_fn = feedback_fn
1865
1866
1867
1868 for warn in self.warn:
1869 feedback_fn("WARNING: %s" % warn)
1870
1871 assert ((self.op.disk_template is None) ^
1872 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
1873 "Not owning any node resource locks"
1874
1875 result = []
1876
1877
1878 if self.op.pnode_uuid:
1879 self.instance.primary_node = self.op.pnode_uuid
1880
1881
1882 if self.op.runtime_mem:
1883 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
1884 self.instance,
1885 self.op.runtime_mem)
1886 rpcres.Raise("Cannot modify instance runtime memory")
1887 result.append(("runtime_memory", self.op.runtime_mem))
1888
1889
1890 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1891 ApplyContainerMods("disk", inst_disks, result, self.diskmod,
1892 self._CreateNewDisk, self._AttachDisk, self._ModifyDisk,
1893 self._RemoveDisk, self._DetachDisk,
1894 post_add_fn=self._PostAddDisk)
1895
1896 if self.op.disk_template:
1897 if __debug__:
1898 check_nodes = set(self.cfg.GetInstanceNodes(self.instance.uuid))
1899 if self.op.remote_node_uuid:
1900 check_nodes.add(self.op.remote_node_uuid)
1901 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
1902 owned = self.owned_locks(level)
1903 assert not (check_nodes - owned), \
1904 ("Not owning the correct locks, owning %r, expected at least %r" %
1905 (owned, check_nodes))
1906
1907 r_shut = ShutdownInstanceDisks(self, self.instance)
1908 if not r_shut:
1909 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
1910 " proceed with disk template conversion")
1911
1912 mode = (self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
1913 self.op.disk_template)
1914 try:
1915 if mode in self._DISK_CONVERSIONS:
1916 self._DISK_CONVERSIONS[mode](self, feedback_fn)
1917 else:
1918 self._ConvertInstanceDisks(feedback_fn)
1919 except:
1920 for disk in inst_disks:
1921 self.cfg.ReleaseDRBDMinors(disk.uuid)
1922 raise
1923 result.append(("disk_template", self.op.disk_template))
1924
1925 disk_info = self.cfg.GetInstanceDisks(self.instance.uuid)
1926 assert utils.AllDiskOfType(disk_info, [self.op.disk_template]), \
1927 ("Expected disk template '%s', found '%s'" %
1928 (self.op.disk_template,
1929 self.cfg.GetInstanceDiskTemplate(self.instance.uuid)))
1930
1931
1932
1933 ReleaseLocks(self, locking.LEVEL_NODE)
1934 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1935
1936
1937 if self._new_nics is not None:
1938 self.instance.nics = self._new_nics
1939 result.extend(self._nic_chgdesc)
1940
1941
1942 if self.op.hvparams:
1943 self.instance.hvparams = self.hv_inst
1944 for key, val in self.op.hvparams.iteritems():
1945 result.append(("hv/%s" % key, val))
1946
1947
1948 if self.op.beparams:
1949 self.instance.beparams = self.be_inst
1950 for key, val in self.op.beparams.iteritems():
1951 result.append(("be/%s" % key, val))
1952
1953
1954 if self.op.os_name:
1955 self.instance.os = self.op.os_name
1956
1957
1958 if self.op.osparams:
1959 self.instance.osparams = self.os_inst
1960 for key, val in self.op.osparams.iteritems():
1961 result.append(("os/%s" % key, val))
1962
1963 if self.op.osparams_private:
1964 self.instance.osparams_private = self.os_inst_private
1965 for key, val in self.op.osparams_private.iteritems():
1966
1967 result.append(("os_private/%s" % key, repr(val)))
1968
1969 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
1970
1971 if self.op.offline is None:
1972
1973 pass
1974 elif self.op.offline:
1975
1976 self.instance = self.cfg.MarkInstanceOffline(self.instance.uuid)
1977 result.append(("admin_state", constants.ADMINST_OFFLINE))
1978 else:
1979
1980 self.instance = self.cfg.MarkInstanceDown(self.instance.uuid)
1981 result.append(("admin_state", constants.ADMINST_DOWN))
1982
1983 UpdateMetadata(feedback_fn, self.rpc, self.instance)
1984
1985 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
1986 self.owned_locks(locking.LEVEL_NODE)), \
1987 "All node locks should have been released by now"
1988
1989 return result
1990
1991 _DISK_CONVERSIONS = {
1992 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
1993 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
1994 }
1995