1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30 """Logical unit setting parameters of a single instance."""
31
32 import copy
33 import logging
34 import os
35
36 from ganeti import compat
37 from ganeti import constants
38 from ganeti import errors
39 from ganeti import ht
40 from ganeti import hypervisor
41 from ganeti import locking
42 from ganeti import netutils
43 from ganeti import objects
44 from ganeti import utils
45 import ganeti.rpc.node as rpc
46
47 from ganeti.cmdlib.base import LogicalUnit
48
49 from ganeti.cmdlib.common import INSTANCE_DOWN, \
50 INSTANCE_NOT_RUNNING, CheckNodeOnline, \
51 CheckParamsNotGlobal, \
52 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
53 GetUpdatedParams, CheckInstanceState, ExpandNodeUuidAndName, \
54 IsValidDiskAccessModeCombination, AnnotateDiskParams
55 from ganeti.cmdlib.instance_storage import CalculateFileStorageDir, \
56 CheckDiskExtProvider, CheckNodesFreeDiskPerVG, CheckRADOSFreeSpace, \
57 CheckSpindlesExclusiveStorage, ComputeDiskSizePerVG, ComputeDisksInfo, \
58 CreateDisks, CreateSingleBlockDev, GenerateDiskTemplate, \
59 IsExclusiveStorageEnabledNodeUuid, ShutdownInstanceDisks, \
60 WaitForSync, WipeOrCleanupDisks, AssembleInstanceDisks
61 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
62 NICToTuple, CheckNodeNotDrained, CopyLockList, \
63 ReleaseLocks, CheckNodeVmCapable, CheckTargetNodeIPolicy, \
64 GetInstanceInfoText, RemoveDisks, CheckNodeFreeMemory, \
65 UpdateMetadata, CheckForConflictingIp, \
66 PrepareContainerMods, ComputeInstanceCommunicationNIC, \
67 ApplyContainerMods, ComputeIPolicyInstanceSpecViolation, \
68 CheckNodesPhysicalCPUs
69 import ganeti.masterd.instance
73 """Data structure for network interface modifications.
74
75 Used by L{LUInstanceSetParams}.
76
77 """
79 self.params = None
80 self.filled = None
81
84 """Modifies an instances's parameters.
85
86 """
87 HPATH = "instance-modify"
88 HTYPE = constants.HTYPE_INSTANCE
89 REQ_BGL = False
90
92 """Find a disk object using the provided params.
93
94 Accept arguments as keywords and use the GetDiskInfo/GetDiskInfoByName
95 config functions to retrieve the disk info based on these arguments.
96
97 In case of an error, raise the appropriate exceptions.
98 """
99 if uuid:
100 disk = self.cfg.GetDiskInfo(uuid)
101 if disk is None:
102 raise errors.OpPrereqError("No disk was found with this UUID: %s" %
103 uuid, errors.ECODE_INVAL)
104 elif name:
105 disk = self.cfg.GetDiskInfoByName(name)
106 if disk is None:
107 raise errors.OpPrereqError("No disk was found with this name: %s" %
108 name, errors.ECODE_INVAL)
109 else:
110 raise errors.ProgrammerError("No disk UUID or name was given")
111
112 return disk
113
114 @staticmethod
116 assert ht.TList(mods)
117 assert not mods or len(mods[0]) in (2, 3)
118
119 if mods and len(mods[0]) == 2:
120 result = []
121
122 addremove = 0
123 for op, params in mods:
124 if op in (constants.DDM_ADD, constants.DDM_ATTACH,
125 constants.DDM_REMOVE, constants.DDM_DETACH):
126 result.append((op, -1, params))
127 addremove += 1
128
129 if addremove > 1:
130 raise errors.OpPrereqError("Only one %s add/attach/remove/detach "
131 "operation is supported at a time" %
132 kind, errors.ECODE_INVAL)
133 else:
134 result.append((constants.DDM_MODIFY, op, params))
135
136 assert verify_fn(result)
137 else:
138 result = mods
139 return result
140
141 @staticmethod
143 """Ensures requested disk/NIC modifications are valid.
144
145 Note that the 'attach' action needs a way to refer to the UUID of the disk,
146 since the disk name is not unique cluster-wide. However, the UUID of the
147 disk is not settable but rather generated by Ganeti automatically,
148 therefore it cannot be passed as an IDISK parameter. For this reason, this
149 function will override the checks to accept uuid parameters solely for the
150 attach action.
151 """
152
153 key_types_attach = key_types.copy()
154 key_types_attach['uuid'] = 'string'
155
156 for (op, _, params) in mods:
157 assert ht.TDict(params)
158
159
160
161 if key_types:
162 utils.ForceDictType(params, (key_types if op != constants.DDM_ATTACH
163 else key_types_attach))
164
165 if op in (constants.DDM_REMOVE, constants.DDM_DETACH):
166 if params:
167 raise errors.OpPrereqError("No settings should be passed when"
168 " removing or detaching a %s" % kind,
169 errors.ECODE_INVAL)
170 elif op in (constants.DDM_ADD, constants.DDM_ATTACH,
171 constants.DDM_MODIFY):
172 item_fn(op, params)
173 else:
174 raise errors.ProgrammerError("Unhandled operation '%s'" % op)
175
177 """Verifies a disk modification.
178
179 """
180 disk_type = params.get(
181 constants.IDISK_TYPE,
182 self.cfg.GetInstanceDiskTemplate(self.instance.uuid))
183
184 if op == constants.DDM_ADD:
185 params[constants.IDISK_TYPE] = disk_type
186
187 if disk_type == constants.DT_DISKLESS:
188 raise errors.OpPrereqError(
189 "Must specify disk type on diskless instance", errors.ECODE_INVAL)
190
191 if disk_type != constants.DT_EXT:
192 utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)
193
194 mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)
195 if mode not in constants.DISK_ACCESS_SET:
196 raise errors.OpPrereqError("Invalid disk access mode '%s'" % mode,
197 errors.ECODE_INVAL)
198
199 size = params.get(constants.IDISK_SIZE, None)
200 if size is None:
201 raise errors.OpPrereqError("Required disk parameter '%s' missing" %
202 constants.IDISK_SIZE, errors.ECODE_INVAL)
203 size = int(size)
204
205 params[constants.IDISK_SIZE] = size
206 name = params.get(constants.IDISK_NAME, None)
207 if name is not None and name.lower() == constants.VALUE_NONE:
208 params[constants.IDISK_NAME] = None
209
210
211 if op in (constants.DDM_ADD, constants.DDM_ATTACH):
212 CheckSpindlesExclusiveStorage(params, excl_stor, True)
213 CheckDiskExtProvider(params, disk_type)
214
215
216 if not self.op.wait_for_sync and not self.instance.disks_active:
217 raise errors.OpPrereqError("Can't %s a disk to an instance with"
218 " deactivated disks and --no-wait-for-sync"
219 " given" % op, errors.ECODE_INVAL)
220
221
222 if disk_type in constants.DTS_HAVE_ACCESS:
223 access_type = params.get(constants.IDISK_ACCESS,
224 group_access_types[disk_type])
225 if not IsValidDiskAccessModeCombination(self.instance.hypervisor,
226 disk_type, access_type):
227 raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
228 " used with %s disk access param" %
229 (self.instance.hypervisor, access_type),
230 errors.ECODE_STATE)
231
232 if op == constants.DDM_ATTACH:
233 if len(params) != 1 or ('uuid' not in params and
234 constants.IDISK_NAME not in params):
235 raise errors.OpPrereqError("Only one argument is permitted in %s op,"
236 " either %s or uuid" % (constants.DDM_ATTACH,
237 constants.IDISK_NAME,
238 ),
239 errors.ECODE_INVAL)
240 self._CheckAttachDisk(params)
241
242 elif op == constants.DDM_MODIFY:
243 if constants.IDISK_SIZE in params:
244 raise errors.OpPrereqError("Disk size change not possible, use"
245 " grow-disk", errors.ECODE_INVAL)
246
247 disk_info = self.cfg.GetInstanceDisks(self.instance.uuid)
248
249
250
251 if not utils.AllDiskOfType(disk_info, [constants.DT_EXT]):
252 utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)
253 else:
254
255
256 for param in [constants.IDISK_ACCESS, constants.IDISK_PROVIDER]:
257 if param in params:
258 raise errors.OpPrereqError("Disk '%s' parameter change is"
259 " not possible" % param,
260 errors.ECODE_INVAL)
261
262 name = params.get(constants.IDISK_NAME, None)
263 if name is not None and name.lower() == constants.VALUE_NONE:
264 params[constants.IDISK_NAME] = None
265
266 @staticmethod
268 """Verifies a network interface modification.
269
270 """
271 if op in (constants.DDM_ADD, constants.DDM_MODIFY):
272 ip = params.get(constants.INIC_IP, None)
273 name = params.get(constants.INIC_NAME, None)
274 req_net = params.get(constants.INIC_NETWORK, None)
275 link = params.get(constants.NIC_LINK, None)
276 mode = params.get(constants.NIC_MODE, None)
277 if name is not None and name.lower() == constants.VALUE_NONE:
278 params[constants.INIC_NAME] = None
279 if req_net is not None:
280 if req_net.lower() == constants.VALUE_NONE:
281 params[constants.INIC_NETWORK] = None
282 req_net = None
283 elif link is not None or mode is not None:
284 raise errors.OpPrereqError("If network is given"
285 " mode or link should not",
286 errors.ECODE_INVAL)
287
288 if op == constants.DDM_ADD:
289 macaddr = params.get(constants.INIC_MAC, None)
290 if macaddr is None:
291 params[constants.INIC_MAC] = constants.VALUE_AUTO
292
293 if ip is not None:
294 if ip.lower() == constants.VALUE_NONE:
295 params[constants.INIC_IP] = None
296 else:
297 if ip.lower() == constants.NIC_IP_POOL:
298 if op == constants.DDM_ADD and req_net is None:
299 raise errors.OpPrereqError("If ip=pool, parameter network"
300 " cannot be none",
301 errors.ECODE_INVAL)
302 else:
303 if not netutils.IPAddress.IsValid(ip):
304 raise errors.OpPrereqError("Invalid IP address '%s'" % ip,
305 errors.ECODE_INVAL)
306
307 if constants.INIC_MAC in params:
308 macaddr = params[constants.INIC_MAC]
309 if macaddr not in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
310 macaddr = utils.NormalizeAndValidateMac(macaddr)
311
312 if op == constants.DDM_MODIFY and macaddr == constants.VALUE_AUTO:
313 raise errors.OpPrereqError("'auto' is not a valid MAC address when"
314 " modifying an existing NIC",
315 errors.ECODE_INVAL)
316
318 """Looks up uuid or name of disk if necessary."""
319 try:
320 return int(idx)
321 except ValueError:
322 pass
323 for i, d in enumerate(self.cfg.GetInstanceDisks(self.instance.uuid)):
324 if d.name == idx or d.uuid == idx:
325 return i
326 raise errors.OpPrereqError("Lookup of disk %r failed" % idx)
327
329 """Looks up uuid or name of disk if necessary."""
330 return [(op, self._LookupDiskIndex(idx), params)
331 for op, idx, params in self.op.disks]
332
334 if not (self.op.nics or self.op.disks or self.op.disk_template or
335 self.op.hvparams or self.op.beparams or self.op.os_name or
336 self.op.osparams or self.op.offline is not None or
337 self.op.runtime_mem or self.op.pnode or self.op.osparams_private or
338 self.op.instance_communication is not None):
339 raise errors.OpPrereqError("No changes submitted", errors.ECODE_INVAL)
340
341 if self.op.hvparams:
342 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS,
343 "hypervisor", "instance", "cluster")
344
345 self.op.disks = self._UpgradeDiskNicMods(
346 "disk", self.op.disks,
347 ht.TSetParamsMods(ht.TIDiskParams))
348 self.op.nics = self._UpgradeDiskNicMods(
349 "NIC", self.op.nics, ht.TSetParamsMods(ht.TINicParams))
350
351
352 if self.op.disk_template:
353 if self.op.disks:
354 raise errors.OpPrereqError("Disk template conversion and other disk"
355 " changes not supported at the same time",
356 errors.ECODE_INVAL)
357
358
359 if self.op.disk_template in constants.DTS_INT_MIRROR:
360 if not self.op.remote_node:
361 raise errors.OpPrereqError("Changing the disk template to a mirrored"
362 " one requires specifying a secondary"
363 " node", errors.ECODE_INVAL)
364 elif self.op.remote_node:
365 self.LogWarning("Changing the disk template to a non-mirrored one,"
366 " the secondary node will be ignored")
367
368
369 self.op.remote_node = None
370
371
372 if self.op.disk_template in constants.DTS_FILEBASED:
373 self._FillFileDriver()
374
375
376 self._CheckMods("NIC", self.op.nics, constants.INIC_PARAMS_TYPES,
377 self._VerifyNicModification)
378
379 if self.op.pnode:
380 (self.op.pnode_uuid, self.op.pnode) = \
381 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
382
418
432
434 if level == locking.LEVEL_NODEGROUP:
435 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
436
437
438 self.needed_locks[locking.LEVEL_NODEGROUP] = \
439 self.cfg.GetInstanceNodeGroups(self.op.instance_uuid)
440 elif level == locking.LEVEL_NODE:
441 self._LockInstancesNodes()
442 if self.op.disk_template and self.op.remote_node:
443 (self.op.remote_node_uuid, self.op.remote_node) = \
444 ExpandNodeUuidAndName(self.cfg, self.op.remote_node_uuid,
445 self.op.remote_node)
446 self.needed_locks[locking.LEVEL_NODE].append(self.op.remote_node_uuid)
447 elif level == locking.LEVEL_NODE_RES and self.op.disk_template:
448
449 self.needed_locks[locking.LEVEL_NODE_RES] = \
450 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
451
486
494
497
498 update_params_dict = dict([(key, params[key])
499 for key in constants.NICS_PARAMETERS
500 if key in params])
501
502 req_link = update_params_dict.get(constants.NIC_LINK, None)
503 req_mode = update_params_dict.get(constants.NIC_MODE, None)
504
505 new_net_uuid = None
506 new_net_uuid_or_name = params.get(constants.INIC_NETWORK, old_net_uuid)
507 if new_net_uuid_or_name:
508 new_net_uuid = self.cfg.LookupNetwork(new_net_uuid_or_name)
509 new_net_obj = self.cfg.GetNetwork(new_net_uuid)
510
511 if old_net_uuid:
512 old_net_obj = self.cfg.GetNetwork(old_net_uuid)
513
514 if new_net_uuid:
515 netparams = self.cfg.GetGroupNetParams(new_net_uuid, pnode_uuid)
516 if not netparams:
517 raise errors.OpPrereqError("No netparams found for the network"
518 " %s, probably not connected" %
519 new_net_obj.name, errors.ECODE_INVAL)
520 new_params = dict(netparams)
521 else:
522 new_params = GetUpdatedParams(old_params, update_params_dict)
523
524 utils.ForceDictType(new_params, constants.NICS_PARAMETER_TYPES)
525
526 new_filled_params = cluster.SimpleFillNIC(new_params)
527 objects.NIC.CheckParameterSyntax(new_filled_params)
528
529 new_mode = new_filled_params[constants.NIC_MODE]
530 if new_mode == constants.NIC_MODE_BRIDGED:
531 bridge = new_filled_params[constants.NIC_LINK]
532 msg = self.rpc.call_bridges_exist(pnode_uuid, [bridge]).fail_msg
533 if msg:
534 msg = "Error checking bridges on node '%s': %s" % \
535 (self.cfg.GetNodeName(pnode_uuid), msg)
536 if self.op.force:
537 self.warn.append(msg)
538 else:
539 raise errors.OpPrereqError(msg, errors.ECODE_ENVIRON)
540
541 elif new_mode == constants.NIC_MODE_ROUTED:
542 ip = params.get(constants.INIC_IP, old_ip)
543 if ip is None and not new_net_uuid:
544 raise errors.OpPrereqError("Cannot set the NIC IP address to None"
545 " on a routed NIC if not attached to a"
546 " network", errors.ECODE_INVAL)
547
548 elif new_mode == constants.NIC_MODE_OVS:
549
550 self.LogInfo("OVS links are currently not checked for correctness")
551
552 if constants.INIC_MAC in params:
553 mac = params[constants.INIC_MAC]
554 if mac is None:
555 raise errors.OpPrereqError("Cannot unset the NIC MAC address",
556 errors.ECODE_INVAL)
557 elif mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
558
559 params[constants.INIC_MAC] = \
560 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
561 else:
562
563 try:
564 self.cfg.ReserveMAC(mac, self.proc.GetECId())
565 except errors.ReservationError:
566 raise errors.OpPrereqError("MAC address '%s' already in use"
567 " in cluster" % mac,
568 errors.ECODE_NOTUNIQUE)
569 elif new_net_uuid != old_net_uuid:
570
571 def get_net_prefix(net_uuid):
572 mac_prefix = None
573 if net_uuid:
574 nobj = self.cfg.GetNetwork(net_uuid)
575 mac_prefix = nobj.mac_prefix
576
577 return mac_prefix
578
579 new_prefix = get_net_prefix(new_net_uuid)
580 old_prefix = get_net_prefix(old_net_uuid)
581 if old_prefix != new_prefix:
582 params[constants.INIC_MAC] = \
583 self.cfg.GenerateMAC(new_net_uuid, self.proc.GetECId())
584
585
586 new_ip = params.get(constants.INIC_IP, old_ip)
587 if (new_ip, new_net_uuid) != (old_ip, old_net_uuid):
588 if new_ip:
589
590 if new_ip.lower() == constants.NIC_IP_POOL:
591 if new_net_uuid:
592 try:
593 new_ip = self.cfg.GenerateIp(new_net_uuid, self.proc.GetECId())
594 except errors.ReservationError:
595 raise errors.OpPrereqError("Unable to get a free IP"
596 " from the address pool",
597 errors.ECODE_STATE)
598 self.LogInfo("Chose IP %s from network %s",
599 new_ip,
600 new_net_obj.name)
601 params[constants.INIC_IP] = new_ip
602 else:
603 raise errors.OpPrereqError("ip=pool, but no network found",
604 errors.ECODE_INVAL)
605
606 elif new_net_uuid:
607 try:
608 self.cfg.ReserveIp(new_net_uuid, new_ip, self.proc.GetECId(),
609 check=self.op.conflicts_check)
610 self.LogInfo("Reserving IP %s in network %s",
611 new_ip, new_net_obj.name)
612 except errors.ReservationError:
613 raise errors.OpPrereqError("IP %s not available in network %s" %
614 (new_ip, new_net_obj.name),
615 errors.ECODE_NOTUNIQUE)
616
617 elif self.op.conflicts_check:
618 CheckForConflictingIp(self, new_ip, pnode_uuid)
619
620
621 if old_ip and old_net_uuid:
622 try:
623 self.cfg.ReleaseIp(old_net_uuid, old_ip, self.proc.GetECId())
624 except errors.AddressPoolError:
625 logging.warning("Release IP %s not contained in network %s",
626 old_ip, old_net_obj.name)
627
628
629 elif (old_net_uuid is not None and
630 (req_link is not None or req_mode is not None)):
631 raise errors.OpPrereqError("Not allowed to change link or mode of"
632 " a NIC that is connected to a network",
633 errors.ECODE_INVAL)
634
635 private.params = new_params
636 private.filled = new_filled_params
637
639 """CheckPrereq checks related to a new disk template."""
640
641 pnode_uuid = self.instance.primary_node
642
643
644 disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
645 if disk_template == constants.DT_MIXED:
646 raise errors.OpPrereqError(
647 "Conversion from mixed is not yet supported.")
648
649 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
650 if utils.AnyDiskOfType(inst_disks, constants.DTS_NOT_CONVERTIBLE_FROM):
651 raise errors.OpPrereqError(
652 "Conversion from the '%s' disk template is not supported"
653 % self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
654 errors.ECODE_INVAL)
655
656 elif self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO:
657 raise errors.OpPrereqError("Conversion to the '%s' disk template is"
658 " not supported" % self.op.disk_template,
659 errors.ECODE_INVAL)
660
661 if (self.op.disk_template != constants.DT_EXT and
662 utils.AllDiskOfType(inst_disks, [self.op.disk_template])):
663 raise errors.OpPrereqError("Instance already has disk template %s" %
664 self.op.disk_template, errors.ECODE_INVAL)
665
666 if not self.cluster.IsDiskTemplateEnabled(self.op.disk_template):
667 enabled_dts = utils.CommaJoin(self.cluster.enabled_disk_templates)
668 raise errors.OpPrereqError("Disk template '%s' is not enabled for this"
669 " cluster (enabled templates: %s)" %
670 (self.op.disk_template, enabled_dts),
671 errors.ECODE_STATE)
672
673 default_vg = self.cfg.GetVGName()
674 if (not default_vg and
675 self.op.disk_template not in constants.DTS_NOT_LVM):
676 raise errors.OpPrereqError("Disk template conversions to lvm-based"
677 " instances are not supported by the cluster",
678 errors.ECODE_STATE)
679
680 CheckInstanceState(self, self.instance, INSTANCE_DOWN,
681 msg="cannot change disk template")
682
683
684 self.disks_info = ComputeDisksInfo(inst_disks, self.op.disk_template,
685 default_vg, self.op.ext_params)
686
687
688 if self.op.disk_template in constants.DTS_INT_MIRROR:
689 if self.op.remote_node_uuid == pnode_uuid:
690 raise errors.OpPrereqError("Given new secondary node %s is the same"
691 " as the primary node of the instance" %
692 self.op.remote_node, errors.ECODE_STATE)
693 CheckNodeOnline(self, self.op.remote_node_uuid)
694 CheckNodeNotDrained(self, self.op.remote_node_uuid)
695 CheckNodeVmCapable(self, self.op.remote_node_uuid)
696
697 snode_info = self.cfg.GetNodeInfo(self.op.remote_node_uuid)
698 snode_group = self.cfg.GetNodeGroup(snode_info.group)
699 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
700 snode_group)
701 CheckTargetNodeIPolicy(self, ipolicy, self.instance, snode_info, self.cfg,
702 ignore=self.op.ignore_ipolicy)
703 if pnode_info.group != snode_info.group:
704 self.LogWarning("The primary and secondary nodes are in two"
705 " different node groups; the disk parameters"
706 " from the first disk's node group will be"
707 " used")
708
709
710 pnode_group = self.cfg.GetNodeGroup(pnode_info.group)
711 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
712 pnode_group)
713 allowed_dts = ipolicy[constants.IPOLICY_DTS]
714 if self.op.disk_template not in allowed_dts:
715 raise errors.OpPrereqError("Disk template '%s' in not allowed (allowed"
716 " templates: %s)" % (self.op.disk_template,
717 utils.CommaJoin(allowed_dts)),
718 errors.ECODE_STATE)
719
720 if not self.op.disk_template in constants.DTS_EXCL_STORAGE:
721
722 nodes = [pnode_info]
723 if self.op.disk_template in constants.DTS_INT_MIRROR:
724 assert snode_info
725 nodes.append(snode_info)
726 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
727 if compat.any(map(has_es, nodes)):
728 errmsg = ("Cannot convert disk template from %s to %s when exclusive"
729 " storage is enabled" % (
730 self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
731 self.op.disk_template))
732 raise errors.OpPrereqError(errmsg, errors.ECODE_STATE)
733
734
735
736 if (self.op.disk_template == constants.DT_PLAIN and
737 utils.AllDiskOfType(inst_disks, [constants.DT_DRBD8])):
738
739
740 pass
741 elif (self.op.disk_template == constants.DT_DRBD8 and
742 utils.AllDiskOfType(inst_disks, [constants.DT_PLAIN])):
743
744
745 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info)
746 CheckNodesFreeDiskPerVG(self, [self.op.remote_node_uuid], req_sizes)
747 elif self.op.disk_template in constants.DTS_LVM:
748
749 node_uuids = [pnode_uuid]
750 if self.op.remote_node_uuid:
751 node_uuids.append(self.op.remote_node_uuid)
752 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks_info)
753 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
754 elif self.op.disk_template == constants.DT_RBD:
755
756 CheckRADOSFreeSpace()
757 elif self.op.disk_template == constants.DT_EXT:
758
759 pass
760 else:
761
762 pass
763
765 """CheckPrereq checks related to disk changes.
766
767 @type ispec: dict
768 @param ispec: instance specs to be updated with the new disks
769
770 """
771 self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)
772
773 inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)
774 excl_stor = compat.any(
775 rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes).values()
776 )
777
778
779 node_info = self.cfg.GetNodeInfo(self.instance.primary_node)
780 node_group = self.cfg.GetNodeGroup(node_info.group)
781 group_disk_params = self.cfg.GetGroupDiskParams(node_group)
782
783 group_access_types = dict(
784 (dt, group_disk_params[dt].get(
785 constants.RBD_ACCESS, constants.DISK_KERNELSPACE))
786 for dt in constants.DISK_TEMPLATES)
787
788
789
790 ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor,
791 group_access_types)
792
793
794 self._CheckMods("disk", self.op.disks, {}, ver_fn)
795
796 self.diskmod = PrepareContainerMods(self.op.disks, None)
797
798 def _PrepareDiskMod(_, disk, params, __):
799 disk.name = params.get(constants.IDISK_NAME, None)
800
801
802 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
803 disks = copy.deepcopy(inst_disks)
804 ApplyContainerMods("disk", disks, None, self.diskmod, None, None,
805 _PrepareDiskMod, None, None)
806 utils.ValidateDeviceNames("disk", disks)
807 if len(disks) > constants.MAX_DISKS:
808 raise errors.OpPrereqError("Instance has too many disks (%d), cannot add"
809 " more" % constants.MAX_DISKS,
810 errors.ECODE_STATE)
811 disk_sizes = [disk.size for disk in inst_disks]
812 disk_sizes.extend(params["size"] for (op, idx, params, private) in
813 self.diskmod if op == constants.DDM_ADD)
814 ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)
815 ispec[constants.ISPEC_DISK_SIZE] = disk_sizes
816
817
818 if self.op.offline is not None:
819 if self.op.offline:
820 msg = "can't change to offline without being down first"
821 else:
822 msg = "can't change to online (down) without being offline first"
823 CheckInstanceState(self, self.instance, INSTANCE_NOT_RUNNING,
824 msg=msg)
825
826 @staticmethod
828 """Create a NIC mod that adds or removes the instance
829 communication NIC to a running instance.
830
831 The NICS are dynamically created using the Dynamic Device
832 Modification (DDM). This function produces a NIC modification
833 (mod) that inserts an additional NIC meant for instance
834 communication in or removes an existing instance communication NIC
835 from a running instance, using DDM.
836
837 @type cfg: L{config.ConfigWriter}
838 @param cfg: cluster configuration
839
840 @type instance_communication: boolean
841 @param instance_communication: whether instance communication is
842 enabled or disabled
843
844 @type instance: L{objects.Instance}
845 @param instance: instance to which the NIC mod will be applied to
846
847 @rtype: (L{constants.DDM_ADD}, -1, parameters) or
848 (L{constants.DDM_REMOVE}, -1, parameters) or
849 L{None}
850 @return: DDM mod containing an action to add or remove the NIC, or
851 None if nothing needs to be done
852
853 """
854 nic_name = ComputeInstanceCommunicationNIC(instance.name)
855
856 instance_communication_nic = None
857
858 for nic in instance.nics:
859 if nic.name == nic_name:
860 instance_communication_nic = nic
861 break
862
863 if instance_communication and not instance_communication_nic:
864 action = constants.DDM_ADD
865 params = {constants.INIC_NAME: nic_name,
866 constants.INIC_MAC: constants.VALUE_GENERATE,
867 constants.INIC_IP: constants.NIC_IP_POOL,
868 constants.INIC_NETWORK:
869 cfg.GetInstanceCommunicationNetwork()}
870 elif not instance_communication and instance_communication_nic:
871 action = constants.DDM_REMOVE
872 params = None
873 else:
874 action = None
875 params = None
876
877 if action is not None:
878 return (action, -1, params)
879 else:
880 return None
881
888
890 if self.op.hotplug or self.op.hotplug_if_possible:
891 result = self.rpc.call_hotplug_supported(self.instance.primary_node,
892 self.instance)
893 if result.fail_msg:
894 if self.op.hotplug:
895 result.Raise("Hotplug is not possible: %s" % result.fail_msg,
896 prereq=True, ecode=errors.ECODE_STATE)
897 else:
898 self.LogWarning(result.fail_msg)
899 self.op.hotplug = False
900 self.LogInfo("Modification will take place without hotplugging.")
901 else:
902 self.op.hotplug = True
903
914
932
946
979
981
982 instance_os = (self.op.os_name
983 if self.op.os_name and not self.op.force
984 else self.instance.os)
985
986 if self.op.osparams or self.op.osparams_private:
987 public_parms = self.op.osparams or {}
988 private_parms = self.op.osparams_private or {}
989 dupe_keys = utils.GetRepeatedKeys(public_parms, private_parms)
990
991 if dupe_keys:
992 raise errors.OpPrereqError("OS parameters repeated multiple times: %s" %
993 utils.CommaJoin(dupe_keys))
994
995 self.os_inst = GetUpdatedParams(self.instance.osparams,
996 public_parms)
997 self.os_inst_private = GetUpdatedParams(self.instance.osparams_private,
998 private_parms)
999
1000 CheckOSParams(self, True, node_uuids, instance_os,
1001 objects.FillDict(self.os_inst,
1002 self.os_inst_private),
1003 self.op.force_variant)
1004
1005 else:
1006 self.os_inst = {}
1007 self.os_inst_private = {}
1008
1009 - def _ProcessMem(self, cluster_hvparams, be_old, pnode_uuid):
1010
1011 if (constants.BE_MAXMEM in self.op.beparams and not self.op.force and
1012 self.be_new[constants.BE_MAXMEM] > be_old[constants.BE_MAXMEM]):
1013 mem_check_list = [pnode_uuid]
1014 if self.be_new[constants.BE_AUTO_BALANCE]:
1015
1016 mem_check_list.extend(
1017 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid))
1018 instance_info = self._GetInstanceInfo(cluster_hvparams)
1019 hvspecs = [(self.instance.hypervisor,
1020 cluster_hvparams)]
1021 nodeinfo = self.rpc.call_node_info(mem_check_list, None,
1022 hvspecs)
1023 pninfo = nodeinfo[pnode_uuid]
1024 msg = pninfo.fail_msg
1025 if msg:
1026
1027 self.warn.append("Can't get info from primary node %s: %s" %
1028 (self.cfg.GetNodeName(pnode_uuid), msg))
1029 else:
1030 (_, _, (pnhvinfo, )) = pninfo.payload
1031 if not isinstance(pnhvinfo.get("memory_free", None), int):
1032 self.warn.append("Node data from primary node %s doesn't contain"
1033 " free memory information" %
1034 self.cfg.GetNodeName(pnode_uuid))
1035 elif instance_info.fail_msg:
1036 self.warn.append("Can't get instance runtime information: %s" %
1037 instance_info.fail_msg)
1038 else:
1039 if instance_info.payload:
1040 current_mem = int(instance_info.payload["memory"])
1041 else:
1042
1043
1044
1045
1046 current_mem = 0
1047
1048 miss_mem = (self.be_new[constants.BE_MAXMEM] - current_mem -
1049 pnhvinfo["memory_free"])
1050 if miss_mem > 0:
1051 raise errors.OpPrereqError("This change will prevent the instance"
1052 " from starting, due to %d MB of memory"
1053 " missing on its primary node" %
1054 miss_mem, errors.ECODE_NORES)
1055
1056 if self.be_new[constants.BE_AUTO_BALANCE]:
1057 secondary_nodes = \
1058 self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
1059 for node_uuid, nres in nodeinfo.items():
1060 if node_uuid not in secondary_nodes:
1061 continue
1062 nres.Raise("Can't get info from secondary node %s" %
1063 self.cfg.GetNodeName(node_uuid), prereq=True,
1064 ecode=errors.ECODE_STATE)
1065 (_, _, (nhvinfo, )) = nres.payload
1066 if not isinstance(nhvinfo.get("memory_free", None), int):
1067 raise errors.OpPrereqError("Secondary node %s didn't return free"
1068 " memory information" %
1069 self.cfg.GetNodeName(node_uuid),
1070 errors.ECODE_STATE)
1071
1072 elif self.be_new[constants.BE_MAXMEM] > nhvinfo["memory_free"]:
1073 raise errors.OpPrereqError("This change will prevent the instance"
1074 " from failover to its secondary node"
1075 " %s, due to not enough memory" %
1076 self.cfg.GetNodeName(node_uuid),
1077 errors.ECODE_STATE)
1078
1079 if self.op.runtime_mem:
1080 remote_info = self.rpc.call_instance_info(
1081 self.instance.primary_node, self.instance.name,
1082 self.instance.hypervisor,
1083 cluster_hvparams)
1084 remote_info.Raise("Error checking node %s" %
1085 self.cfg.GetNodeName(self.instance.primary_node),
1086 prereq=True)
1087 if not remote_info.payload:
1088 raise errors.OpPrereqError("Instance %s is not running" %
1089 self.instance.name, errors.ECODE_STATE)
1090
1091 current_memory = remote_info.payload["memory"]
1092 if (not self.op.force and
1093 (self.op.runtime_mem > self.be_proposed[constants.BE_MAXMEM] or
1094 self.op.runtime_mem < self.be_proposed[constants.BE_MINMEM])):
1095 raise errors.OpPrereqError("Instance %s must have memory between %d"
1096 " and %d MB of memory unless --force is"
1097 " given" %
1098 (self.instance.name,
1099 self.be_proposed[constants.BE_MINMEM],
1100 self.be_proposed[constants.BE_MAXMEM]),
1101 errors.ECODE_INVAL)
1102
1103 delta = self.op.runtime_mem - current_memory
1104 if delta > 0:
1105 CheckNodeFreeMemory(
1106 self, self.instance.primary_node,
1107 "ballooning memory for instance %s" % self.instance.name, delta,
1108 self.instance.hypervisor,
1109 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
1110
1112 """Check prerequisites.
1113
1114 This only checks the instance list against the existing names.
1115
1116 """
1117 assert self.op.instance_name in self.owned_locks(locking.LEVEL_INSTANCE)
1118 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
1119 self.cluster = self.cfg.GetClusterInfo()
1120 cluster_hvparams = self.cluster.hvparams[self.instance.hypervisor]
1121
1122 self.op.disks = self._LookupDiskMods()
1123
1124 assert self.instance is not None, \
1125 "Cannot retrieve locked instance %s" % self.op.instance_name
1126
1127 self.warn = []
1128
1129 if (self.op.pnode_uuid is not None and
1130 self.op.pnode_uuid != self.instance.primary_node and
1131 not self.op.force):
1132 instance_info = self._GetInstanceInfo(cluster_hvparams)
1133
1134 if instance_info.fail_msg:
1135 self.warn.append("Can't get instance runtime information: %s" %
1136 instance_info.fail_msg)
1137 elif instance_info.payload:
1138 raise errors.OpPrereqError(
1139 "Instance is still running on %s" %
1140 self.cfg.GetNodeName(self.instance.primary_node),
1141 errors.ECODE_STATE)
1142 pnode_uuid = self.instance.primary_node
1143 assert pnode_uuid in self.owned_locks(locking.LEVEL_NODE)
1144
1145 node_uuids = list(self.cfg.GetInstanceNodes(self.instance.uuid))
1146 pnode_info = self.cfg.GetNodeInfo(pnode_uuid)
1147
1148 assert pnode_info.group in self.owned_locks(locking.LEVEL_NODEGROUP)
1149 group_info = self.cfg.GetNodeGroup(pnode_info.group)
1150
1151
1152 ispec = {}
1153
1154 self._CheckHotplug()
1155
1156 self._PrepareNicCommunication()
1157
1158
1159 assert not (self.op.disk_template and self.op.disks), \
1160 "Can't modify disk template and apply disk changes at the same time"
1161
1162 if self.op.disk_template:
1163 self._PreCheckDiskTemplate(pnode_info)
1164
1165 self._PreCheckDisks(ispec)
1166
1167 self._ProcessHVParams(node_uuids)
1168 be_old = self._ProcessBeParams()
1169
1170 self._ValidateCpuParams()
1171 self._ProcessOsParams(node_uuids)
1172 self._ProcessMem(cluster_hvparams, be_old, pnode_uuid)
1173
1174
1175 cluster = self.cluster
1176
1177 def _PrepareNicCreate(_, params, private):
1178 self._PrepareNicModification(params, private, None, None,
1179 {}, cluster, pnode_uuid)
1180 return (None, None)
1181
1182 def _PrepareNicAttach(_, __, ___):
1183 raise errors.OpPrereqError("Attach operation is not supported for NICs",
1184 errors.ECODE_INVAL)
1185
1186 def _PrepareNicMod(_, nic, params, private):
1187 self._PrepareNicModification(params, private, nic.ip, nic.network,
1188 nic.nicparams, cluster, pnode_uuid)
1189 return None
1190
1191 def _PrepareNicRemove(_, params, __):
1192 ip = params.ip
1193 net = params.network
1194 if net is not None and ip is not None:
1195 self.cfg.ReleaseIp(net, ip, self.proc.GetECId())
1196
1197 def _PrepareNicDetach(_, __, ___):
1198 raise errors.OpPrereqError("Detach operation is not supported for NICs",
1199 errors.ECODE_INVAL)
1200
1201
1202 nics = [nic.Copy() for nic in self.instance.nics]
1203 ApplyContainerMods("NIC", nics, None, self.nicmod, _PrepareNicCreate,
1204 _PrepareNicAttach, _PrepareNicMod, _PrepareNicRemove,
1205 _PrepareNicDetach)
1206 if len(nics) > constants.MAX_NICS:
1207 raise errors.OpPrereqError("Instance has too many network interfaces"
1208 " (%d), cannot add more" % constants.MAX_NICS,
1209 errors.ECODE_STATE)
1210
1211
1212 self._nic_chgdesc = []
1213 if self.nicmod:
1214
1215 nics = [nic.Copy() for nic in self.instance.nics]
1216 ApplyContainerMods("NIC", nics, self._nic_chgdesc, self.nicmod,
1217 self._CreateNewNic, None, self._ApplyNicMods,
1218 self._RemoveNic, None)
1219
1220 utils.ValidateDeviceNames("NIC", nics)
1221 self._new_nics = nics
1222 ispec[constants.ISPEC_NIC_COUNT] = len(self._new_nics)
1223 else:
1224 self._new_nics = None
1225 ispec[constants.ISPEC_NIC_COUNT] = len(self.instance.nics)
1226
1227 if not self.op.ignore_ipolicy:
1228 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(self.cluster,
1229 group_info)
1230
1231
1232 ispec[constants.ISPEC_SPINDLE_USE] = \
1233 self.be_new.get(constants.BE_SPINDLE_USE, None)
1234 ispec[constants.ISPEC_CPU_COUNT] = self.be_new.get(constants.BE_VCPUS,
1235 None)
1236
1237
1238 if self.op.disk_template:
1239 count = ispec[constants.ISPEC_DISK_COUNT]
1240 new_disk_types = [self.op.disk_template] * count
1241 else:
1242 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1243 add_disk_count = ispec[constants.ISPEC_DISK_COUNT] - len(old_disks)
1244 dev_type = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
1245 if dev_type == constants.DT_DISKLESS and add_disk_count != 0:
1246 raise errors.ProgrammerError(
1247 "Conversion from diskless instance not possible and should have"
1248 " been caught")
1249
1250 new_disk_types = ([d.dev_type for d in old_disks] +
1251 [dev_type] * add_disk_count)
1252 ispec_max = ispec.copy()
1253 ispec_max[constants.ISPEC_MEM_SIZE] = \
1254 self.be_new.get(constants.BE_MAXMEM, None)
1255 res_max = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_max,
1256 new_disk_types)
1257 ispec_min = ispec.copy()
1258 ispec_min[constants.ISPEC_MEM_SIZE] = \
1259 self.be_new.get(constants.BE_MINMEM, None)
1260 res_min = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec_min,
1261 new_disk_types)
1262
1263 if (res_max or res_min):
1264
1265
1266 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1267 (group_info, group_info.name,
1268 utils.CommaJoin(set(res_max + res_min))))
1269 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1270
1272 """Converts the disks of an instance to another type.
1273
1274 This function converts the disks of an instance. It supports
1275 conversions among all the available disk types except conversions
1276 between the LVM-based disk types, that use their separate code path.
1277 Also, this method does not support conversions that include the 'diskless'
1278 template and those targeting the 'blockdev' template.
1279
1280 @type feedback_fn: callable
1281 @param feedback_fn: function used to send feedback back to the caller
1282
1283 @rtype: NoneType
1284 @return: None
1285 @raise errors.OpPrereqError: in case of failure
1286
1287 """
1288 template_info = self.op.disk_template
1289 if self.op.disk_template == constants.DT_EXT:
1290 template_info = ":".join([self.op.disk_template,
1291 self.op.ext_params["provider"]])
1292
1293 old_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
1294 feedback_fn("Converting disk template from '%s' to '%s'" %
1295 (old_template, template_info))
1296
1297 assert not (old_template in constants.DTS_NOT_CONVERTIBLE_FROM or
1298 self.op.disk_template in constants.DTS_NOT_CONVERTIBLE_TO), \
1299 ("Unsupported disk template conversion from '%s' to '%s'" %
1300 (old_template, self.op.disk_template))
1301
1302 pnode_uuid = self.instance.primary_node
1303 snode_uuid = []
1304 if self.op.remote_node_uuid:
1305 snode_uuid = [self.op.remote_node_uuid]
1306
1307 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1308
1309 feedback_fn("Generating new '%s' disk template..." % template_info)
1310 file_storage_dir = CalculateFileStorageDir(
1311 self.op.disk_template, self.cfg, self.instance.name,
1312 file_storage_dir=self.op.file_storage_dir)
1313 new_disks = GenerateDiskTemplate(self,
1314 self.op.disk_template,
1315 self.instance.uuid,
1316 pnode_uuid,
1317 snode_uuid,
1318 self.disks_info,
1319 file_storage_dir,
1320 self.op.file_driver,
1321 0,
1322 feedback_fn,
1323 self.diskparams)
1324
1325
1326 feedback_fn("Creating new empty disks of type '%s'..." % template_info)
1327 try:
1328 CreateDisks(self, self.instance, disk_template=self.op.disk_template,
1329 disks=new_disks)
1330 except errors.OpExecError:
1331 self.LogWarning("Device creation failed")
1332 for disk in new_disks:
1333 self.cfg.ReleaseDRBDMinors(disk.uuid)
1334 raise
1335
1336
1337 feedback_fn("Populating the new empty disks of type '%s'..." %
1338 template_info)
1339 for idx, (old, new) in enumerate(zip(old_disks, new_disks)):
1340 feedback_fn(" - copying data from disk %s (%s), size %s" %
1341 (idx, old.dev_type,
1342 utils.FormatUnit(new.size, "h")))
1343 if old.dev_type == constants.DT_DRBD8:
1344 old = old.children[0]
1345 result = self.rpc.call_blockdev_convert(pnode_uuid, (old, self.instance),
1346 (new, self.instance))
1347 msg = result.fail_msg
1348 if msg:
1349
1350
1351
1352 if self.op.disk_template == constants.DT_DRBD8:
1353 new = new.children[0]
1354 self.Log(" - ERROR: Could not copy disk '%s' to '%s'" %
1355 (old.logical_id[1], new.logical_id[1]))
1356 try:
1357 self.LogInfo("Some disks failed to copy")
1358 self.LogInfo("The instance will not be affected, aborting operation")
1359 self.LogInfo("Removing newly created disks of type '%s'..." %
1360 template_info)
1361 RemoveDisks(self, self.instance, disks=new_disks)
1362 self.LogInfo("Newly created disks removed successfully")
1363 finally:
1364 for disk in new_disks:
1365 self.cfg.ReleaseDRBDMinors(disk.uuid)
1366 result.Raise("Error while converting the instance's template")
1367
1368
1369 for disk in old_disks:
1370 if disk.dev_type == constants.DT_DRBD8:
1371 tcp_port = disk.logical_id[2]
1372 self.cfg.AddTcpUdpPort(tcp_port)
1373
1374
1375 feedback_fn("Detaching old disks (%s) from the instance and removing"
1376 " them from cluster config" % old_template)
1377 for old_disk in old_disks:
1378 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
1379
1380
1381 feedback_fn("Adding new disks (%s) to cluster config and attaching"
1382 " them to the instance" % template_info)
1383 for (idx, new_disk) in enumerate(new_disks):
1384 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
1385
1386
1387 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1388
1389
1390 ReleaseLocks(self, locking.LEVEL_NODE)
1391
1392 disk_abort = not WaitForSync(self, self.instance,
1393 oneshot=not self.op.wait_for_sync)
1394 if disk_abort:
1395 raise errors.OpExecError("There are some degraded disks for"
1396 " this instance, please cleanup manually")
1397
1398 feedback_fn("Removing old block devices of type '%s'..." % old_template)
1399 RemoveDisks(self, self.instance, disks=old_disks)
1400
1401
1402
1404 """Converts an instance from plain to drbd.
1405
1406 """
1407 feedback_fn("Converting disk template from 'plain' to 'drbd'")
1408
1409 pnode_uuid = self.instance.primary_node
1410 snode_uuid = self.op.remote_node_uuid
1411 old_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1412
1413 assert utils.AnyDiskOfType(old_disks, [constants.DT_PLAIN])
1414
1415 new_disks = GenerateDiskTemplate(self, self.op.disk_template,
1416 self.instance.uuid, pnode_uuid,
1417 [snode_uuid], self.disks_info,
1418 None, None, 0,
1419 feedback_fn, self.diskparams)
1420 anno_disks = rpc.AnnotateDiskParams(new_disks, self.diskparams)
1421 p_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, pnode_uuid)
1422 s_excl_stor = IsExclusiveStorageEnabledNodeUuid(self.cfg, snode_uuid)
1423 info = GetInstanceInfoText(self.instance)
1424 feedback_fn("Creating additional volumes...")
1425
1426 for disk in anno_disks:
1427
1428 CreateSingleBlockDev(self, pnode_uuid, self.instance, disk.children[1],
1429 info, True, p_excl_stor)
1430 for child in disk.children:
1431 CreateSingleBlockDev(self, snode_uuid, self.instance, child, info, True,
1432 s_excl_stor)
1433
1434
1435 feedback_fn("Renaming original volumes...")
1436 rename_list = [(o, n.children[0].logical_id)
1437 for (o, n) in zip(old_disks, new_disks)]
1438 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_list)
1439 result.Raise("Failed to rename original LVs")
1440
1441 feedback_fn("Initializing DRBD devices...")
1442
1443 try:
1444 for disk in anno_disks:
1445 for (node_uuid, excl_stor) in [(pnode_uuid, p_excl_stor),
1446 (snode_uuid, s_excl_stor)]:
1447 f_create = node_uuid == pnode_uuid
1448 CreateSingleBlockDev(self, node_uuid, self.instance, disk, info,
1449 f_create, excl_stor)
1450 except errors.GenericError, e:
1451 feedback_fn("Initializing of DRBD devices failed;"
1452 " renaming back original volumes...")
1453 rename_back_list = [(n.children[0], o.logical_id)
1454 for (n, o) in zip(new_disks, old_disks)]
1455 result = self.rpc.call_blockdev_rename(pnode_uuid, rename_back_list)
1456 result.Raise("Failed to rename LVs back after error %s" % str(e))
1457 raise
1458
1459
1460 for old_disk in old_disks:
1461 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
1462
1463
1464 for (idx, new_disk) in enumerate(new_disks):
1465 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
1466
1467
1468 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1469
1470
1471 ReleaseLocks(self, locking.LEVEL_NODE)
1472
1473
1474 disk_abort = not WaitForSync(self, self.instance,
1475 oneshot=not self.op.wait_for_sync)
1476 if disk_abort:
1477 raise errors.OpExecError("There are some degraded disks for"
1478 " this instance, please cleanup manually")
1479
1480
1481
1483 """Converts an instance from drbd to plain.
1484
1485 """
1486 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
1487 disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1488 assert len(secondary_nodes) == 1
1489 assert utils.AnyDiskOfType(disks, [constants.DT_DRBD8])
1490
1491 feedback_fn("Converting disk template from 'drbd' to 'plain'")
1492
1493 old_disks = AnnotateDiskParams(self.instance, disks, self.cfg)
1494 new_disks = [d.children[0] for d in disks]
1495
1496
1497 for parent, child in zip(old_disks, new_disks):
1498 child.size = parent.size
1499 child.mode = parent.mode
1500 child.name = parent.name
1501 child.nodes = [self.instance.primary_node]
1502
1503
1504 for disk in old_disks:
1505 tcp_port = disk.logical_id[2]
1506 self.cfg.AddTcpUdpPort(tcp_port)
1507
1508
1509 for old_disk in old_disks:
1510 self.cfg.RemoveInstanceDisk(self.instance.uuid, old_disk.uuid)
1511
1512
1513 for (idx, new_disk) in enumerate(new_disks):
1514 self.cfg.AddInstanceDisk(self.instance.uuid, new_disk, idx=idx)
1515
1516
1517 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1518
1519
1520 ReleaseLocks(self, locking.LEVEL_NODE)
1521
1522 feedback_fn("Removing volumes on the secondary node...")
1523 RemoveDisks(self, self.instance, disks=old_disks,
1524 target_node_uuid=secondary_nodes[0])
1525
1526 feedback_fn("Removing unneeded volumes on the primary node...")
1527 meta_disks = []
1528 for idx, disk in enumerate(old_disks):
1529 meta_disks.append(disk.children[1])
1530 RemoveDisks(self, self.instance, disks=meta_disks)
1531
1533 self.LogInfo("Trying to hotplug device...")
1534 msg = "hotplug:"
1535 result = self.rpc.call_hotplug_device(self.instance.primary_node,
1536 self.instance, action, dev_type,
1537 (device, self.instance),
1538 extra, seq)
1539 if result.fail_msg:
1540 self.LogWarning("Could not hotplug device: %s" % result.fail_msg)
1541 self.LogInfo("Continuing execution..")
1542 msg += "failed"
1543 else:
1544 self.LogInfo("Hotplug done.")
1545 msg += "done"
1546 return msg
1547
1554
1556 file_path = CalculateFileStorageDir(
1557 disk_type, self.cfg, self.instance.name,
1558 file_storage_dir=self.op.file_storage_dir)
1559
1560 self._FillFileDriver()
1561
1562 secondary_nodes = self.cfg.GetInstanceSecondaryNodes(self.instance.uuid)
1563 return \
1564 GenerateDiskTemplate(self, disk_type, self.instance.uuid,
1565 self.instance.primary_node, secondary_nodes,
1566 [params], file_path, self.op.file_driver, idx,
1567 self.Log, self.diskparams)[0]
1568
1570 """Creates a new disk.
1571
1572 """
1573
1574 disk_template = self.cfg.GetInstanceDiskTemplate(self.instance.uuid)
1575 disk = self._GenerateDiskTemplateWrapper(idx, disk_template,
1576 params)
1577 new_disks = CreateDisks(self, self.instance, disks=[disk])
1578 self.cfg.AddInstanceDisk(self.instance.uuid, disk, idx)
1579
1580
1581 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1582
1583 if self.cluster.prealloc_wipe_disks:
1584
1585 WipeOrCleanupDisks(self, self.instance,
1586 disks=[(idx, disk, 0)],
1587 cleanup=new_disks)
1588
1589 changes = [
1590 ("disk/%d" % idx,
1591 "add:size=%s,mode=%s" % (disk.size, disk.mode)),
1592 ]
1593 if self.op.hotplug:
1594 result = self.rpc.call_blockdev_assemble(self.instance.primary_node,
1595 (disk, self.instance),
1596 self.instance, True, idx)
1597 if result.fail_msg:
1598 changes.append(("disk/%d" % idx, "assemble:failed"))
1599 self.LogWarning("Can't assemble newly created disk %d: %s",
1600 idx, result.fail_msg)
1601 else:
1602 _, link_name, uri = result.payload
1603 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
1604 constants.HOTPLUG_TARGET_DISK,
1605 disk, (link_name, uri), idx)
1606 changes.append(("disk/%d" % idx, msg))
1607
1608 return (disk, changes)
1609
1610 - def _PostAddDisk(self, _, disk):
1611 if not WaitForSync(self, self.instance, disks=[disk],
1612 oneshot=not self.op.wait_for_sync):
1613 raise errors.OpExecError("Failed to sync disks of %s" %
1614 self.instance.name)
1615
1616
1617
1618 if not self.instance.disks_active:
1619 ShutdownInstanceDisks(self, self.instance, disks=[disk])
1620
1622 """Attaches an existing disk to an instance.
1623
1624 """
1625 uuid = params.get("uuid", None)
1626 name = params.get(constants.IDISK_NAME, None)
1627
1628 disk = self.GenericGetDiskInfo(uuid, name)
1629
1630
1631 if disk.dev_type in (constants.DTS_INSTANCE_DEPENDENT_PATH):
1632
1633 params[constants.IDISK_SIZE] = disk.size
1634 params[constants.IDISK_MODE] = str(disk.mode)
1635 dummy_disk = self._GenerateDiskTemplateWrapper(idx, disk.dev_type, params)
1636 new_logical_id = dummy_disk.logical_id
1637 result = self.rpc.call_blockdev_rename(self.instance.primary_node,
1638 [(disk, new_logical_id)])
1639 result.Raise("Failed before attach")
1640 self.cfg.SetDiskLogicalID(disk.uuid, new_logical_id)
1641 disk.logical_id = new_logical_id
1642
1643
1644 self.cfg.AttachInstanceDisk(self.instance.uuid, disk.uuid, idx)
1645
1646
1647 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1648
1649 changes = [
1650 ("disk/%d" % idx,
1651 "attach:size=%s,mode=%s" % (disk.size, disk.mode)),
1652 ]
1653
1654 disks_ok, _, payloads = AssembleInstanceDisks(self, self.instance,
1655 disks=[disk])
1656 if not disks_ok:
1657 changes.append(("disk/%d" % idx, "assemble:failed"))
1658 return disk, changes
1659
1660 if self.op.hotplug:
1661 _, link_name, uri = payloads[0]
1662 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
1663 constants.HOTPLUG_TARGET_DISK,
1664 disk, (link_name, uri), idx)
1665 changes.append(("disk/%d" % idx, msg))
1666
1667 return (disk, changes)
1668
1701
1726
1728 """Detaches a disk from an instance.
1729
1730 """
1731 hotmsg = ""
1732 if self.op.hotplug:
1733 hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,
1734 constants.HOTPLUG_TARGET_DISK,
1735 root, None, idx)
1736
1737
1738 ShutdownInstanceDisks(self, self.instance, [root])
1739
1740
1741
1742
1743
1744
1745
1746 if root.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE):
1747 file_driver = root.logical_id[0]
1748 instance_path, disk_name = os.path.split(root.logical_id[1])
1749 new_path = os.path.join(os.path.dirname(instance_path), disk_name)
1750 new_logical_id = (file_driver, new_path)
1751 result = self.rpc.call_blockdev_rename(self.instance.primary_node,
1752 [(root, new_logical_id)])
1753 result.Raise("Failed before detach")
1754
1755 self.cfg.SetDiskLogicalID(root.uuid, new_logical_id)
1756
1757
1758 self.cfg.DetachInstanceDisk(self.instance.uuid, root.uuid)
1759
1760
1761 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
1762
1763 return hotmsg
1764
1766 """Creates data structure for a new network interface.
1767
1768 """
1769 mac = params[constants.INIC_MAC]
1770 ip = params.get(constants.INIC_IP, None)
1771 net = params.get(constants.INIC_NETWORK, None)
1772 name = params.get(constants.INIC_NAME, None)
1773 net_uuid = self.cfg.LookupNetwork(net)
1774
1775 nicparams = private.filled
1776 nobj = objects.NIC(mac=mac, ip=ip, network=net_uuid, name=name,
1777 nicparams=nicparams)
1778 nobj.uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1779
1780 changes = [
1781 ("nic.%d" % idx,
1782 "add:mac=%s,ip=%s,mode=%s,link=%s,network=%s" %
1783 (mac, ip, private.filled[constants.NIC_MODE],
1784 private.filled[constants.NIC_LINK], net)),
1785 ]
1786
1787 if self.op.hotplug:
1788 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,
1789 constants.HOTPLUG_TARGET_NIC,
1790 nobj, None, idx)
1791 changes.append(("nic.%d" % idx, msg))
1792
1793 return (nobj, changes)
1794
1796 """Modifies a network interface.
1797
1798 """
1799 changes = []
1800
1801 for key in [constants.INIC_MAC, constants.INIC_IP, constants.INIC_NAME]:
1802 if key in params:
1803 changes.append(("nic.%s/%d" % (key, idx), params[key]))
1804 setattr(nic, key, params[key])
1805
1806 new_net = params.get(constants.INIC_NETWORK, nic.network)
1807 new_net_uuid = self.cfg.LookupNetwork(new_net)
1808 if new_net_uuid != nic.network:
1809 changes.append(("nic.network/%d" % idx, new_net))
1810 nic.network = new_net_uuid
1811
1812 if private.filled:
1813 nic.nicparams = private.filled
1814
1815 for (key, val) in nic.nicparams.items():
1816 changes.append(("nic.%s/%d" % (key, idx), val))
1817
1818 if self.op.hotplug:
1819 msg = self._HotplugDevice(constants.HOTPLUG_ACTION_MODIFY,
1820 constants.HOTPLUG_TARGET_NIC,
1821 nic, None, idx)
1822 changes.append(("nic/%d" % idx, msg))
1823
1824 return changes
1825
1831
1832 - def Exec(self, feedback_fn):
1833 """Modifies an instance.
1834
1835 All parameters take effect only at the next restart of the instance.
1836
1837 """
1838 self.feedback_fn = feedback_fn
1839
1840
1841
1842 for warn in self.warn:
1843 feedback_fn("WARNING: %s" % warn)
1844
1845 assert ((self.op.disk_template is None) ^
1846 bool(self.owned_locks(locking.LEVEL_NODE_RES))), \
1847 "Not owning any node resource locks"
1848
1849 result = []
1850
1851
1852 if self.op.pnode_uuid:
1853 self.instance.primary_node = self.op.pnode_uuid
1854
1855
1856 if self.op.runtime_mem:
1857 rpcres = self.rpc.call_instance_balloon_memory(self.instance.primary_node,
1858 self.instance,
1859 self.op.runtime_mem)
1860 rpcres.Raise("Cannot modify instance runtime memory")
1861 result.append(("runtime_memory", self.op.runtime_mem))
1862
1863
1864 inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)
1865 ApplyContainerMods("disk", inst_disks, result, self.diskmod,
1866 self._CreateNewDisk, self._AttachDisk, self._ModifyDisk,
1867 self._RemoveDisk, self._DetachDisk,
1868 post_add_fn=self._PostAddDisk)
1869
1870 if self.op.disk_template:
1871 if __debug__:
1872 check_nodes = set(self.cfg.GetInstanceNodes(self.instance.uuid))
1873 if self.op.remote_node_uuid:
1874 check_nodes.add(self.op.remote_node_uuid)
1875 for level in [locking.LEVEL_NODE, locking.LEVEL_NODE_RES]:
1876 owned = self.owned_locks(level)
1877 assert not (check_nodes - owned), \
1878 ("Not owning the correct locks, owning %r, expected at least %r" %
1879 (owned, check_nodes))
1880
1881 r_shut = ShutdownInstanceDisks(self, self.instance)
1882 if not r_shut:
1883 raise errors.OpExecError("Cannot shutdown instance disks, unable to"
1884 " proceed with disk template conversion")
1885
1886 mode = (self.cfg.GetInstanceDiskTemplate(self.instance.uuid),
1887 self.op.disk_template)
1888 try:
1889 if mode in self._DISK_CONVERSIONS:
1890 self._DISK_CONVERSIONS[mode](self, feedback_fn)
1891 else:
1892 self._ConvertInstanceDisks(feedback_fn)
1893 except:
1894 for disk in inst_disks:
1895 self.cfg.ReleaseDRBDMinors(disk.uuid)
1896 raise
1897 result.append(("disk_template", self.op.disk_template))
1898
1899 disk_info = self.cfg.GetInstanceDisks(self.instance.uuid)
1900 assert utils.AllDiskOfType(disk_info, [self.op.disk_template]), \
1901 ("Expected disk template '%s', found '%s'" %
1902 (self.op.disk_template,
1903 self.cfg.GetInstanceDiskTemplate(self.instance.uuid)))
1904
1905
1906
1907 ReleaseLocks(self, locking.LEVEL_NODE)
1908 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1909
1910
1911 if self._new_nics is not None:
1912 self.instance.nics = self._new_nics
1913 result.extend(self._nic_chgdesc)
1914
1915
1916 if self.op.hvparams:
1917 self.instance.hvparams = self.hv_inst
1918 for key, val in self.op.hvparams.iteritems():
1919 result.append(("hv/%s" % key, val))
1920
1921
1922 if self.op.beparams:
1923 self.instance.beparams = self.be_inst
1924 for key, val in self.op.beparams.iteritems():
1925 result.append(("be/%s" % key, val))
1926
1927
1928 if self.op.os_name:
1929 self.instance.os = self.op.os_name
1930
1931
1932 if self.op.osparams:
1933 self.instance.osparams = self.os_inst
1934 for key, val in self.op.osparams.iteritems():
1935 result.append(("os/%s" % key, val))
1936
1937 if self.op.osparams_private:
1938 self.instance.osparams_private = self.os_inst_private
1939 for key, val in self.op.osparams_private.iteritems():
1940
1941 result.append(("os_private/%s" % key, repr(val)))
1942
1943 self.cfg.Update(self.instance, feedback_fn, self.proc.GetECId())
1944
1945 if self.op.offline is None:
1946
1947 pass
1948 elif self.op.offline:
1949
1950 self.instance = self.cfg.MarkInstanceOffline(self.instance.uuid)
1951 result.append(("admin_state", constants.ADMINST_OFFLINE))
1952 else:
1953
1954 self.instance = self.cfg.MarkInstanceDown(self.instance.uuid)
1955 result.append(("admin_state", constants.ADMINST_DOWN))
1956
1957 UpdateMetadata(feedback_fn, self.rpc, self.instance)
1958
1959 assert not (self.owned_locks(locking.LEVEL_NODE_RES) or
1960 self.owned_locks(locking.LEVEL_NODE)), \
1961 "All node locks should have been released by now"
1962
1963 return result
1964
1965 _DISK_CONVERSIONS = {
1966 (constants.DT_PLAIN, constants.DT_DRBD8): _ConvertPlainToDrbd,
1967 (constants.DT_DRBD8, constants.DT_PLAIN): _ConvertDrbdToPlain,
1968 }
1969