1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30 """Logical unit for creating a single instance."""
31
32 import OpenSSL
33 import logging
34 import os
35
36
37 from ganeti import compat
38 from ganeti import constants
39 from ganeti import errors
40 from ganeti import hypervisor
41 from ganeti import locking
42 from ganeti.masterd import iallocator
43 from ganeti import masterd
44 from ganeti import netutils
45 from ganeti import objects
46 from ganeti import pathutils
47 from ganeti import utils
48 from ganeti import serializer
49
50 from ganeti.cmdlib.base import LogicalUnit
51
52 from ganeti.cmdlib.common import \
53 CheckNodeOnline, \
54 CheckParamsNotGlobal, \
55 IsExclusiveStorageEnabledNode, CheckHVParams, CheckOSParams, \
56 ExpandNodeUuidAndName, \
57 IsValidDiskAccessModeCombination, \
58 CheckDiskTemplateEnabled, CheckIAllocatorOrNode, CheckOSImage
59 from ganeti.cmdlib.instance_helpervm import RunWithHelperVM
60 from ganeti.cmdlib.instance_storage import CalculateFileStorageDir, \
61 CheckNodesFreeDiskPerVG, CheckRADOSFreeSpace, CheckSpindlesExclusiveStorage, \
62 ComputeDiskSizePerVG, CreateDisks, \
63 GenerateDiskTemplate, CommitDisks, \
64 WaitForSync, ComputeDisks, \
65 ImageDisks, WipeDisks
66 from ganeti.cmdlib.instance_utils import \
67 CheckNodeNotDrained, CopyLockList, \
68 ReleaseLocks, CheckNodeVmCapable, \
69 RemoveDisks, CheckNodeFreeMemory, \
70 UpdateMetadata, CheckForConflictingIp, \
71 ComputeInstanceCommunicationNIC, \
72 ComputeIPolicyInstanceSpecViolation, \
73 CheckHostnameSane, CheckOpportunisticLocking, \
74 ComputeFullBeParams, ComputeNics, GetClusterDomainSecret, \
75 CheckInstanceExistence, CreateInstanceAllocRequest, BuildInstanceHookEnv, \
76 NICListToTuple, CheckNicsBridgesExist, CheckCompressionTool
77 import ganeti.masterd.instance
78
79
81 """Create an instance.
82
83 """
84 HPATH = "instance-add"
85 HTYPE = constants.HTYPE_INSTANCE
86 REQ_BGL = False
87
101
143
145 """ Check validity of VLANs if given
146
147 """
148 for nic in self.op.nics:
149 vlan = nic.get(constants.INIC_VLAN, None)
150 if vlan:
151 if vlan[0] == ".":
152
153
154 if not vlan[1:].isdigit():
155 vlanlist = vlan[1:].split(':')
156 for vl in vlanlist:
157 if not vl.isdigit():
158 raise errors.OpPrereqError("Specified VLAN parameter is "
159 "invalid : %s" % vlan,
160 errors.ECODE_INVAL)
161 elif vlan[0] == ":":
162
163 vlanlist = vlan[1:].split(':')
164 for vl in vlanlist:
165 if not vl.isdigit():
166 raise errors.OpPrereqError("Specified VLAN parameter is invalid"
167 " : %s" % vlan, errors.ECODE_INVAL)
168 elif vlan.isdigit():
169
170
171 nic[constants.INIC_VLAN] = "." + vlan
172 else:
173 raise errors.OpPrereqError("Specified VLAN parameter is invalid"
174 " : %s" % vlan, errors.ECODE_INVAL)
175
177 """Check arguments.
178
179 """
180 if self.op.forthcoming and self.op.commit:
181 raise errors.OpPrereqError("Forthcoming generation and commiting are"
182 " mutually exclusive", errors.ECODE_INVAL)
183
184
185
186 if self.op.no_install and self.op.start:
187 self.LogInfo("No-installation mode selected, disabling startup")
188 self.op.start = False
189
190 self.op.instance_name = \
191 netutils.Hostname.GetNormalizedName(self.op.instance_name)
192
193 if self.op.ip_check and not self.op.name_check:
194
195 raise errors.OpPrereqError("Cannot do IP address check without a name"
196 " check", errors.ECODE_INVAL)
197
198
199 if self.op.name_check:
200 self.hostname = CheckHostnameSane(self, self.op.instance_name)
201 self.op.instance_name = self.hostname.name
202
203 self.check_ip = self.hostname.ip
204 else:
205 self.check_ip = None
206
207
208 if self.op.instance_communication:
209 nic_name = ComputeInstanceCommunicationNIC(self.op.instance_name)
210
211 for nic in self.op.nics:
212 if nic.get(constants.INIC_NAME, None) == nic_name:
213 break
214 else:
215 self.op.nics.append({constants.INIC_NAME: nic_name,
216 constants.INIC_MAC: constants.VALUE_GENERATE,
217 constants.INIC_IP: constants.NIC_IP_POOL,
218 constants.INIC_NETWORK:
219 self.cfg.GetInstanceCommunicationNetwork()})
220
221
222 if self.op.helper_startup_timeout is None:
223 self.op.helper_startup_timeout = constants.HELPER_VM_STARTUP
224
225 if self.op.helper_shutdown_timeout is None:
226 self.op.helper_shutdown_timeout = constants.HELPER_VM_SHUTDOWN
227
228
229 for nic in self.op.nics:
230 utils.ForceDictType(nic, constants.INIC_PARAMS_TYPES)
231
232 utils.ValidateDeviceNames("NIC", self.op.nics)
233
234 self._CheckVLANArguments()
235
236 self._CheckDiskArguments()
237 assert self.op.disk_template is not None
238
239
240 if (self.op.file_driver and
241 not self.op.file_driver in constants.FILE_DRIVER):
242 raise errors.OpPrereqError("Invalid file driver name '%s'" %
243 self.op.file_driver, errors.ECODE_INVAL)
244
245
246 if (not self.op.file_driver and
247 self.op.disk_template in constants.DTS_FILEBASED):
248 self.op.file_driver = constants.FD_DEFAULT
249
250
251 CheckIAllocatorOrNode(self, "iallocator", "pnode")
252
253 if self.op.pnode is not None:
254 if self.op.disk_template in constants.DTS_INT_MIRROR:
255 if self.op.snode is None:
256 raise errors.OpPrereqError("The networked disk templates need"
257 " a mirror node", errors.ECODE_INVAL)
258 elif self.op.snode:
259 self.LogWarning("Secondary node will be ignored on non-mirrored disk"
260 " template")
261 self.op.snode = None
262
263 CheckOpportunisticLocking(self.op)
264
265 if self.op.mode == constants.INSTANCE_IMPORT:
266
267
268
269 self.op.force_variant = True
270
271 if self.op.no_install:
272 self.LogInfo("No-installation mode has no effect during import")
273
274 if objects.GetOSImage(self.op.osparams):
275 self.LogInfo("OS image has no effect during import")
276 elif self.op.mode == constants.INSTANCE_CREATE:
277 os_image = CheckOSImage(self.op)
278
279 if self.op.os_type is None and os_image is None:
280 raise errors.OpPrereqError("No guest OS or OS image specified",
281 errors.ECODE_INVAL)
282
283 if self.op.os_type is not None \
284 and self.op.os_type in self.cfg.GetClusterInfo().blacklisted_os:
285 raise errors.OpPrereqError("Guest OS '%s' is not allowed for"
286 " installation" % self.op.os_type,
287 errors.ECODE_STATE)
288 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
289 if objects.GetOSImage(self.op.osparams):
290 self.LogInfo("OS image has no effect during import")
291
292 self._cds = GetClusterDomainSecret()
293
294
295 src_handshake = self.op.source_handshake
296 if not src_handshake:
297 raise errors.OpPrereqError("Missing source handshake",
298 errors.ECODE_INVAL)
299
300 errmsg = masterd.instance.CheckRemoteExportHandshake(self._cds,
301 src_handshake)
302 if errmsg:
303 raise errors.OpPrereqError("Invalid handshake: %s" % errmsg,
304 errors.ECODE_INVAL)
305
306
307 self.source_x509_ca_pem = self.op.source_x509_ca
308 if not self.source_x509_ca_pem:
309 raise errors.OpPrereqError("Missing source X509 CA",
310 errors.ECODE_INVAL)
311
312 try:
313 (cert, _) = utils.LoadSignedX509Certificate(self.source_x509_ca_pem,
314 self._cds)
315 except OpenSSL.crypto.Error, err:
316 raise errors.OpPrereqError("Unable to load source X509 CA (%s)" %
317 (err, ), errors.ECODE_INVAL)
318
319 (errcode, msg) = utils.VerifyX509Certificate(cert, None, None)
320 if errcode is not None:
321 raise errors.OpPrereqError("Invalid source X509 CA (%s)" % (msg, ),
322 errors.ECODE_INVAL)
323
324 self.source_x509_ca = cert
325
326 src_instance_name = self.op.source_instance_name
327 if not src_instance_name:
328 raise errors.OpPrereqError("Missing source instance name",
329 errors.ECODE_INVAL)
330
331 self.source_instance_name = \
332 netutils.GetHostname(name=src_instance_name).name
333
334 else:
335 raise errors.OpPrereqError("Invalid instance creation mode %r" %
336 self.op.mode, errors.ECODE_INVAL)
337
339 """ExpandNames for CreateInstance.
340
341 Figure out the right locks for instance creation.
342
343 """
344 self.needed_locks = {}
345
346 if self.op.commit:
347 (uuid, name) = self.cfg.ExpandInstanceName(self.op.instance_name)
348 if name is None:
349 raise errors.OpPrereqError("Instance %s unknown" %
350 self.op.instance_name,
351 errors.ECODE_INVAL)
352 self.op.instance_name = name
353 if not self.cfg.GetInstanceInfo(uuid).forthcoming:
354 raise errors.OpPrereqError("Instance %s (with uuid %s) not forthcoming"
355 " but --commit was passed." % (name, uuid),
356 errors.ECODE_STATE)
357 logging.debug("Verified that instance %s with uuid %s is forthcoming",
358 name, uuid)
359 else:
360
361
362 CheckInstanceExistence(self, self.op.instance_name)
363
364 self.add_locks[locking.LEVEL_INSTANCE] = self.op.instance_name
365
366 if self.op.commit:
367 (uuid, _) = self.cfg.ExpandInstanceName(self.op.instance_name)
368 self.needed_locks[locking.LEVEL_NODE] = self.cfg.GetInstanceNodes(uuid)
369 logging.debug("Forthcoming instance %s resides on %s", uuid,
370 self.needed_locks[locking.LEVEL_NODE])
371 elif self.op.iallocator:
372
373
374
375 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
376
377 if self.op.opportunistic_locking:
378 self.opportunistic_locks[locking.LEVEL_NODE] = True
379 self.opportunistic_locks[locking.LEVEL_NODE_RES] = True
380 if self.op.disk_template == constants.DT_DRBD8:
381 self.opportunistic_locks_count[locking.LEVEL_NODE] = 2
382 self.opportunistic_locks_count[locking.LEVEL_NODE_RES] = 2
383 else:
384 (self.op.pnode_uuid, self.op.pnode) = \
385 ExpandNodeUuidAndName(self.cfg, self.op.pnode_uuid, self.op.pnode)
386 nodelist = [self.op.pnode_uuid]
387 if self.op.snode is not None:
388 (self.op.snode_uuid, self.op.snode) = \
389 ExpandNodeUuidAndName(self.cfg, self.op.snode_uuid, self.op.snode)
390 nodelist.append(self.op.snode_uuid)
391 self.needed_locks[locking.LEVEL_NODE] = nodelist
392
393
394 if self.op.mode == constants.INSTANCE_IMPORT:
395 src_node = self.op.src_node
396 src_path = self.op.src_path
397
398 if src_path is None:
399 self.op.src_path = src_path = self.op.instance_name
400
401 if src_node is None:
402 self.needed_locks[locking.LEVEL_NODE] = locking.ALL_SET
403 self.op.src_node = None
404 if os.path.isabs(src_path):
405 raise errors.OpPrereqError("Importing an instance from a path"
406 " requires a source node option",
407 errors.ECODE_INVAL)
408 else:
409 (self.op.src_node_uuid, self.op.src_node) = (_, src_node) = \
410 ExpandNodeUuidAndName(self.cfg, self.op.src_node_uuid, src_node)
411 if self.needed_locks[locking.LEVEL_NODE] is not locking.ALL_SET:
412 self.needed_locks[locking.LEVEL_NODE].append(self.op.src_node_uuid)
413 if not os.path.isabs(src_path):
414 self.op.src_path = \
415 utils.PathJoin(pathutils.EXPORT_DIR, src_path)
416
417 self.needed_locks[locking.LEVEL_NODE_RES] = \
418 CopyLockList(self.needed_locks[locking.LEVEL_NODE])
419
420
421
422
423
424 if self.needed_locks[locking.LEVEL_NODE] == locking.ALL_SET:
425
426
427
428
429 self.needed_locks[locking.LEVEL_NODEGROUP] = locking.ALL_SET
430 else:
431 self.needed_locks[locking.LEVEL_NODEGROUP] = \
432 list(self.cfg.GetNodeGroupsFromNodes(
433 self.needed_locks[locking.LEVEL_NODE]))
434 self.share_locks[locking.LEVEL_NODEGROUP] = 1
435
441
443 """Run the allocator based on input opcode.
444
445 """
446 if self.op.opportunistic_locking:
447
448 node_name_whitelist = self.cfg.GetNodeNames(
449 set(self.owned_locks(locking.LEVEL_NODE)) &
450 set(self.owned_locks(locking.LEVEL_NODE_RES)))
451 logging.debug("Trying to allocate on nodes %s", node_name_whitelist)
452 else:
453 node_name_whitelist = None
454
455 req = CreateInstanceAllocRequest(self.op, self.disks,
456 self.nics, self.be_full,
457 node_name_whitelist)
458 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
459
460 ial.Run(self.op.iallocator)
461
462 if not ial.success:
463
464 if self.op.opportunistic_locking:
465 ecode = errors.ECODE_TEMP_NORES
466 self.LogInfo("IAllocator '%s' failed on opportunistically acquired"
467 " nodes: %s", self.op.iallocator, ial.info)
468 else:
469 ecode = errors.ECODE_NORES
470
471 raise errors.OpPrereqError("Can't compute nodes using"
472 " iallocator '%s': %s" %
473 (self.op.iallocator, ial.info),
474 ecode)
475
476 (self.op.pnode_uuid, self.op.pnode) = \
477 ExpandNodeUuidAndName(self.cfg, None, ial.result[0])
478 self.LogInfo("Selected nodes for instance %s via iallocator %s: %s",
479 self.op.instance_name, self.op.iallocator,
480 utils.CommaJoin(ial.result))
481
482 assert req.RequiredNodes() in (1, 2), "Wrong node count from iallocator"
483
484 if req.RequiredNodes() == 2:
485 (self.op.snode_uuid, self.op.snode) = \
486 ExpandNodeUuidAndName(self.cfg, None, ial.result[1])
487
489 """Build hooks env.
490
491 This runs on master, primary and secondary nodes of the instance.
492
493 """
494 env = {
495 "ADD_MODE": self.op.mode,
496 }
497 if self.op.mode == constants.INSTANCE_IMPORT:
498 env["SRC_NODE"] = self.op.src_node
499 env["SRC_PATH"] = self.op.src_path
500 env["SRC_IMAGES"] = self.src_images
501
502 env.update(BuildInstanceHookEnv(
503 name=self.op.instance_name,
504 primary_node_name=self.op.pnode,
505 secondary_node_names=self.cfg.GetNodeNames(self.secondaries),
506 status=self.op.start,
507 os_type=self.op.os_type,
508 minmem=self.be_full[constants.BE_MINMEM],
509 maxmem=self.be_full[constants.BE_MAXMEM],
510 vcpus=self.be_full[constants.BE_VCPUS],
511 nics=NICListToTuple(self, self.nics),
512 disk_template=self.op.disk_template,
513
514
515 disks=self.disks,
516 bep=self.be_full,
517 hvp=self.hv_full,
518 hypervisor_name=self.op.hypervisor,
519 tags=self.op.tags,
520 ))
521
522 return env
523
525 """Build hooks nodes.
526
527 """
528 nl = [self.cfg.GetMasterNode(), self.op.pnode_uuid] + self.secondaries
529 return nl, nl
530
576
578 """Use export parameters as defaults.
579
580 In case the opcode doesn't specify (as in override) some instance
581 parameters, then try to use them from the export information, if
582 that declares them.
583
584 """
585 self.op.os_type = einfo.get(constants.INISECT_EXP, "os")
586
587 if not self.op.disks:
588 disks = []
589
590 for idx in range(constants.MAX_DISKS):
591 if einfo.has_option(constants.INISECT_INS, "disk%d_size" % idx):
592 disk_sz = einfo.getint(constants.INISECT_INS, "disk%d_size" % idx)
593 disk_name = einfo.get(constants.INISECT_INS, "disk%d_name" % idx)
594 disk = {
595 constants.IDISK_SIZE: disk_sz,
596 constants.IDISK_NAME: disk_name
597 }
598 disks.append(disk)
599 self.op.disks = disks
600 if not disks and self.op.disk_template != constants.DT_DISKLESS:
601 raise errors.OpPrereqError("No disk info specified and the export"
602 " is missing the disk information",
603 errors.ECODE_INVAL)
604
605 if not self.op.nics:
606 nics = []
607 for idx in range(constants.MAX_NICS):
608 if einfo.has_option(constants.INISECT_INS, "nic%d_mac" % idx):
609 ndict = {}
610 for name in [constants.INIC_IP,
611 constants.INIC_MAC, constants.INIC_NAME]:
612 nic_param_name = "nic%d_%s" % (idx, name)
613 if einfo.has_option(constants.INISECT_INS, nic_param_name):
614 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
615 ndict[name] = v
616 network = einfo.get(constants.INISECT_INS,
617 "nic%d_%s" % (idx, constants.INIC_NETWORK))
618
619
620 if network:
621 ndict[constants.INIC_NETWORK] = network
622 else:
623 for name in list(constants.NICS_PARAMETERS):
624 v = einfo.get(constants.INISECT_INS, "nic%d_%s" % (idx, name))
625 ndict[name] = v
626 nics.append(ndict)
627 else:
628 break
629 self.op.nics = nics
630
631 if not self.op.tags and einfo.has_option(constants.INISECT_INS, "tags"):
632 self.op.tags = einfo.get(constants.INISECT_INS, "tags").split()
633
634 if (self.op.hypervisor is None and
635 einfo.has_option(constants.INISECT_INS, "hypervisor")):
636 self.op.hypervisor = einfo.get(constants.INISECT_INS, "hypervisor")
637
638 if einfo.has_section(constants.INISECT_HYP):
639
640
641 for name, value in einfo.items(constants.INISECT_HYP):
642 if name not in self.op.hvparams:
643 self.op.hvparams[name] = value
644
645 if einfo.has_section(constants.INISECT_BEP):
646
647 for name, value in einfo.items(constants.INISECT_BEP):
648 if name not in self.op.beparams:
649 self.op.beparams[name] = value
650
651 if name == constants.BE_MEMORY:
652 if constants.BE_MAXMEM not in self.op.beparams:
653 self.op.beparams[constants.BE_MAXMEM] = value
654 if constants.BE_MINMEM not in self.op.beparams:
655 self.op.beparams[constants.BE_MINMEM] = value
656 else:
657
658 for name in constants.BES_PARAMETERS:
659 if (name not in self.op.beparams and
660 einfo.has_option(constants.INISECT_INS, name)):
661 self.op.beparams[name] = einfo.get(constants.INISECT_INS, name)
662
663 if einfo.has_section(constants.INISECT_OSP):
664
665 for name, value in einfo.items(constants.INISECT_OSP):
666 if name not in self.op.osparams:
667 self.op.osparams[name] = value
668
669 if einfo.has_section(constants.INISECT_OSP_PRIVATE):
670
671 for name, value in einfo.items(constants.INISECT_OSP_PRIVATE):
672 if name not in self.op.osparams_private:
673 self.op.osparams_private[name] = serializer.Private(value, descr=name)
674
706
708 """Set nodes as in the forthcoming instance
709
710 """
711 (uuid, name) = self.cfg.ExpandInstanceName(self.op.instance_name)
712 inst = self.cfg.GetInstanceInfo(uuid)
713 self.op.pnode_uuid = inst.primary_node
714 self.op.pnode = self.cfg.GetNodeName(inst.primary_node)
715 sec_nodes = self.cfg.GetInstanceSecondaryNodes(uuid)
716 node_names = [self.op.pnode]
717 if sec_nodes:
718 self.op.snode_uuid = sec_nodes[0]
719 self.op.snode = self.cfg.GetNodeName(sec_nodes[0])
720 node_names.append(self.op.snode)
721 self.LogInfo("Nodes of instance %s: %s", name, node_names)
722
724 """Check prerequisites.
725
726 """
727 owned_nodes = frozenset(self.owned_locks(locking.LEVEL_NODE))
728
729 if self.op.commit:
730
731
732 (uuid, name) = self.cfg.ExpandInstanceName(self.op.instance_name)
733 if uuid is None:
734 raise errors.OpPrereqError("Instance %s disappeared from the cluster"
735 " while waiting for locks"
736 % (self.op.instance_name,),
737 errors.ECODE_STATE)
738 if not self.cfg.GetInstanceInfo(uuid).forthcoming:
739 raise errors.OpPrereqError("Instance %s (with uuid %s) is no longer"
740 " forthcoming" % (name, uuid),
741 errors.ECODE_STATE)
742 required_nodes = self.cfg.GetInstanceNodes(uuid)
743 if not owned_nodes.issuperset(required_nodes):
744 raise errors.OpPrereqError("Forthcoming instance %s nodes changed"
745 " since locks were acquired; retry the"
746 " operation" % self.op.instance_name,
747 errors.ECODE_STATE)
748 else:
749 CheckInstanceExistence(self, self.op.instance_name)
750
751
752
753 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
754 cur_groups = list(self.cfg.GetNodeGroupsFromNodes(owned_nodes))
755 if not owned_groups.issuperset(cur_groups):
756 raise errors.OpPrereqError("New instance %s's node groups changed since"
757 " locks were acquired, current groups are"
758 " are '%s', owning groups '%s'; retry the"
759 " operation" %
760 (self.op.instance_name,
761 utils.CommaJoin(cur_groups),
762 utils.CommaJoin(owned_groups)),
763 errors.ECODE_STATE)
764
765 self.instance_file_storage_dir = CalculateFileStorageDir(
766 self.op.disk_template, self.cfg, self.op.instance_name,
767 self.op.file_storage_dir)
768
769 if self.op.mode == constants.INSTANCE_IMPORT:
770 export_info = self._ReadExportInfo()
771 self._ReadExportParams(export_info)
772 self._old_instance_name = export_info.get(constants.INISECT_INS, "name")
773 else:
774 self._old_instance_name = None
775
776 if (not self.cfg.GetVGName() and
777 self.op.disk_template not in constants.DTS_NOT_LVM):
778 raise errors.OpPrereqError("Cluster does not support lvm-based"
779 " instances", errors.ECODE_STATE)
780
781 if (self.op.hypervisor is None or
782 self.op.hypervisor == constants.VALUE_AUTO):
783 self.op.hypervisor = self.cfg.GetHypervisorType()
784
785 cluster = self.cfg.GetClusterInfo()
786 enabled_hvs = cluster.enabled_hypervisors
787 if self.op.hypervisor not in enabled_hvs:
788 raise errors.OpPrereqError("Selected hypervisor (%s) not enabled in the"
789 " cluster (%s)" %
790 (self.op.hypervisor, ",".join(enabled_hvs)),
791 errors.ECODE_STATE)
792
793
794 for tag in self.op.tags:
795 objects.TaggableObject.ValidateTag(tag)
796
797
798 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
799 filled_hvp = cluster.SimpleFillHV(self.op.hypervisor, self.op.os_type,
800 self.op.hvparams)
801 hv_type = hypervisor.GetHypervisorClass(self.op.hypervisor)
802 hv_type.CheckParameterSyntax(filled_hvp)
803 self.hv_full = filled_hvp
804
805 CheckParamsNotGlobal(self.op.hvparams, constants.HVC_GLOBALS, "hypervisor",
806 "instance", "cluster")
807
808
809 self.be_full = ComputeFullBeParams(self.op, cluster)
810
811
812 if self.op.osparams_private is None:
813 self.op.osparams_private = serializer.PrivateDict()
814 if self.op.osparams_secret is None:
815 self.op.osparams_secret = serializer.PrivateDict()
816
817 self.os_full = cluster.SimpleFillOS(
818 self.op.os_type,
819 self.op.osparams,
820 os_params_private=self.op.osparams_private,
821 os_params_secret=self.op.osparams_secret
822 )
823
824
825
826 if self.op.identify_defaults:
827 self._RevertToDefaults(cluster)
828
829
830 self.nics = ComputeNics(self.op, cluster, self.check_ip, self.cfg,
831 self.proc.GetECId())
832
833
834 default_vg = self.cfg.GetVGName()
835 self.disks = ComputeDisks(self.op.disks, self.op.disk_template, default_vg)
836
837 if self.op.mode == constants.INSTANCE_IMPORT:
838 disk_images = []
839 for idx in range(len(self.disks)):
840 option = "disk%d_dump" % idx
841 if export_info.has_option(constants.INISECT_INS, option):
842
843 export_name = export_info.get(constants.INISECT_INS, option)
844 image = utils.PathJoin(self.op.src_path, export_name)
845 disk_images.append(image)
846 else:
847 disk_images.append(False)
848
849 self.src_images = disk_images
850
851 if self.op.instance_name == self._old_instance_name:
852 for idx, nic in enumerate(self.nics):
853 if nic.mac == constants.VALUE_AUTO:
854 nic_mac_ini = "nic%d_mac" % idx
855 nic.mac = export_info.get(constants.INISECT_INS, nic_mac_ini)
856
857
858
859
860 if self.op.ip_check:
861 if netutils.TcpPing(self.check_ip, constants.DEFAULT_NODED_PORT):
862 raise errors.OpPrereqError("IP %s of instance %s already in use" %
863 (self.check_ip, self.op.instance_name),
864 errors.ECODE_NOTUNIQUE)
865
866
867
868
869
870
871
872
873
874 for nic in self.nics:
875 if nic.mac in (constants.VALUE_AUTO, constants.VALUE_GENERATE):
876 nic.mac = self.cfg.GenerateMAC(nic.network, self.proc.GetECId())
877
878
879
880 if self.op.iallocator is not None:
881 if self.op.commit:
882 self._GetNodesFromForthcomingInstance()
883 else:
884 self._RunAllocator()
885
886
887 keep_locks = filter(None, [self.op.pnode_uuid, self.op.snode_uuid,
888 self.op.src_node_uuid])
889 ReleaseLocks(self, locking.LEVEL_NODE, keep=keep_locks)
890 ReleaseLocks(self, locking.LEVEL_NODE_RES, keep=keep_locks)
891
892 ReleaseLocks(self, locking.LEVEL_NODEGROUP,
893 keep=self.cfg.GetNodeGroupsFromNodes(keep_locks))
894
895 assert (self.owned_locks(locking.LEVEL_NODE) ==
896 self.owned_locks(locking.LEVEL_NODE_RES)), \
897 ("Node locks differ from node resource locks (%s vs %s)"
898 % (self.owned_locks(locking.LEVEL_NODE),
899 self.owned_locks(locking.LEVEL_NODE_RES)))
900
901
902
903
904 self.pnode = pnode = self.cfg.GetNodeInfo(self.op.pnode_uuid)
905 assert self.pnode is not None, \
906 "Cannot retrieve locked node %s" % self.op.pnode_uuid
907 if pnode.offline:
908 raise errors.OpPrereqError("Cannot use offline primary node '%s'" %
909 pnode.name, errors.ECODE_STATE)
910 if pnode.drained:
911 raise errors.OpPrereqError("Cannot use drained primary node '%s'" %
912 pnode.name, errors.ECODE_STATE)
913 if not pnode.vm_capable:
914 raise errors.OpPrereqError("Cannot use non-vm_capable primary node"
915 " '%s'" % pnode.name, errors.ECODE_STATE)
916
917 self.secondaries = []
918
919
920
921 for idx, nic in enumerate(self.nics):
922 net_uuid = nic.network
923 if net_uuid is not None:
924 nobj = self.cfg.GetNetwork(net_uuid)
925 netparams = self.cfg.GetGroupNetParams(net_uuid, self.pnode.uuid)
926 if netparams is None:
927 raise errors.OpPrereqError("No netparams found for network"
928 " %s. Probably not connected to"
929 " node's %s nodegroup" %
930 (nobj.name, self.pnode.name),
931 errors.ECODE_INVAL)
932 self.LogInfo("NIC/%d inherits netparams %s" %
933 (idx, netparams.values()))
934 nic.nicparams = dict(netparams)
935 if nic.ip is not None:
936 if nic.ip.lower() == constants.NIC_IP_POOL:
937 try:
938 nic.ip = self.cfg.GenerateIp(net_uuid, self.proc.GetECId())
939 except errors.ReservationError:
940 raise errors.OpPrereqError("Unable to get a free IP for NIC %d"
941 " from the address pool" % idx,
942 errors.ECODE_STATE)
943 self.LogInfo("Chose IP %s from network %s", nic.ip, nobj.name)
944 else:
945 try:
946 self.cfg.ReserveIp(net_uuid, nic.ip, self.proc.GetECId(),
947 check=self.op.conflicts_check)
948 except errors.ReservationError:
949 raise errors.OpPrereqError("IP address %s already in use"
950 " or does not belong to network %s" %
951 (nic.ip, nobj.name),
952 errors.ECODE_NOTUNIQUE)
953
954
955 elif self.op.conflicts_check:
956 CheckForConflictingIp(self, nic.ip, self.pnode.uuid)
957
958
959 if self.op.disk_template in constants.DTS_INT_MIRROR:
960 if self.op.snode_uuid == pnode.uuid:
961 raise errors.OpPrereqError("The secondary node cannot be the"
962 " primary node", errors.ECODE_INVAL)
963 CheckNodeOnline(self, self.op.snode_uuid)
964 CheckNodeNotDrained(self, self.op.snode_uuid)
965 CheckNodeVmCapable(self, self.op.snode_uuid)
966 self.secondaries.append(self.op.snode_uuid)
967
968 snode = self.cfg.GetNodeInfo(self.op.snode_uuid)
969 if pnode.group != snode.group:
970 self.LogWarning("The primary and secondary nodes are in two"
971 " different node groups; the disk parameters"
972 " from the first disk's node group will be"
973 " used")
974
975 nodes = [pnode]
976 if self.op.disk_template in constants.DTS_INT_MIRROR:
977 nodes.append(snode)
978 has_es = lambda n: IsExclusiveStorageEnabledNode(self.cfg, n)
979 excl_stor = compat.any(map(has_es, nodes))
980 if excl_stor and not self.op.disk_template in constants.DTS_EXCL_STORAGE:
981 raise errors.OpPrereqError("Disk template %s not supported with"
982 " exclusive storage" % self.op.disk_template,
983 errors.ECODE_STATE)
984 for disk in self.disks:
985 CheckSpindlesExclusiveStorage(disk, excl_stor, True)
986
987 node_uuids = [pnode.uuid] + self.secondaries
988
989 if not self.adopt_disks:
990 if self.op.disk_template == constants.DT_RBD:
991
992
993
994 CheckRADOSFreeSpace()
995 elif self.op.disk_template == constants.DT_EXT:
996
997 pass
998 elif self.op.disk_template in constants.DTS_LVM:
999
1000 req_sizes = ComputeDiskSizePerVG(self.op.disk_template, self.disks)
1001 CheckNodesFreeDiskPerVG(self, node_uuids, req_sizes)
1002 else:
1003
1004 pass
1005
1006 elif self.op.disk_template == constants.DT_PLAIN:
1007 all_lvs = set(["%s/%s" % (disk[constants.IDISK_VG],
1008 disk[constants.IDISK_ADOPT])
1009 for disk in self.disks])
1010 if len(all_lvs) != len(self.disks):
1011 raise errors.OpPrereqError("Duplicate volume names given for adoption",
1012 errors.ECODE_INVAL)
1013 for lv_name in all_lvs:
1014 try:
1015
1016
1017 self.cfg.ReserveLV(lv_name, self.proc.GetECId())
1018 except errors.ReservationError:
1019 raise errors.OpPrereqError("LV named %s used by another instance" %
1020 lv_name, errors.ECODE_NOTUNIQUE)
1021
1022 vg_names = self.rpc.call_vg_list([pnode.uuid])[pnode.uuid]
1023 vg_names.Raise("Cannot get VG information from node %s" % pnode.name,
1024 prereq=True)
1025
1026 node_lvs = self.rpc.call_lv_list([pnode.uuid],
1027 vg_names.payload.keys())[pnode.uuid]
1028 node_lvs.Raise("Cannot get LV information from node %s" % pnode.name,
1029 prereq=True)
1030 node_lvs = node_lvs.payload
1031
1032 delta = all_lvs.difference(node_lvs.keys())
1033 if delta:
1034 raise errors.OpPrereqError("Missing logical volume(s): %s" %
1035 utils.CommaJoin(delta),
1036 errors.ECODE_INVAL)
1037 online_lvs = [lv for lv in all_lvs if node_lvs[lv][2]]
1038 if online_lvs:
1039 raise errors.OpPrereqError("Online logical volumes found, cannot"
1040 " adopt: %s" % utils.CommaJoin(online_lvs),
1041 errors.ECODE_STATE)
1042
1043 for dsk in self.disks:
1044 dsk[constants.IDISK_SIZE] = \
1045 int(float(node_lvs["%s/%s" % (dsk[constants.IDISK_VG],
1046 dsk[constants.IDISK_ADOPT])][0]))
1047
1048 elif self.op.disk_template == constants.DT_BLOCK:
1049
1050 all_disks = set([os.path.abspath(disk[constants.IDISK_ADOPT])
1051 for disk in self.disks])
1052 if len(all_disks) != len(self.disks):
1053 raise errors.OpPrereqError("Duplicate disk names given for adoption",
1054 errors.ECODE_INVAL)
1055 baddisks = [d for d in all_disks
1056 if not d.startswith(constants.ADOPTABLE_BLOCKDEV_ROOT)]
1057 if baddisks:
1058 raise errors.OpPrereqError("Device node(s) %s lie outside %s and"
1059 " cannot be adopted" %
1060 (utils.CommaJoin(baddisks),
1061 constants.ADOPTABLE_BLOCKDEV_ROOT),
1062 errors.ECODE_INVAL)
1063
1064 node_disks = self.rpc.call_bdev_sizes([pnode.uuid],
1065 list(all_disks))[pnode.uuid]
1066 node_disks.Raise("Cannot get block device information from node %s" %
1067 pnode.name, prereq=True)
1068 node_disks = node_disks.payload
1069 delta = all_disks.difference(node_disks.keys())
1070 if delta:
1071 raise errors.OpPrereqError("Missing block device(s): %s" %
1072 utils.CommaJoin(delta),
1073 errors.ECODE_INVAL)
1074 for dsk in self.disks:
1075 dsk[constants.IDISK_SIZE] = \
1076 int(float(node_disks[dsk[constants.IDISK_ADOPT]]))
1077
1078
1079 node_info = self.cfg.GetNodeInfo(self.op.pnode_uuid)
1080 node_group = self.cfg.GetNodeGroup(node_info.group)
1081 group_disk_params = self.cfg.GetGroupDiskParams(node_group)
1082 group_access_type = group_disk_params[self.op.disk_template].get(
1083 constants.RBD_ACCESS, constants.DISK_KERNELSPACE
1084 )
1085 for dsk in self.disks:
1086 access_type = dsk.get(constants.IDISK_ACCESS, group_access_type)
1087 if not IsValidDiskAccessModeCombination(self.op.hypervisor,
1088 self.op.disk_template,
1089 access_type):
1090 raise errors.OpPrereqError("Selected hypervisor (%s) cannot be"
1091 " used with %s disk access param" %
1092 (self.op.hypervisor, access_type),
1093 errors.ECODE_STATE)
1094
1095
1096 spindle_use = self.be_full.get(constants.BE_SPINDLE_USE, None)
1097 ispec = {
1098 constants.ISPEC_MEM_SIZE: self.be_full.get(constants.BE_MAXMEM, None),
1099 constants.ISPEC_CPU_COUNT: self.be_full.get(constants.BE_VCPUS, None),
1100 constants.ISPEC_DISK_COUNT: len(self.disks),
1101 constants.ISPEC_DISK_SIZE: [disk[constants.IDISK_SIZE]
1102 for disk in self.disks],
1103 constants.ISPEC_NIC_COUNT: len(self.nics),
1104 constants.ISPEC_SPINDLE_USE: spindle_use,
1105 }
1106
1107 group_info = self.cfg.GetNodeGroup(pnode.group)
1108 ipolicy = ganeti.masterd.instance.CalculateGroupIPolicy(cluster, group_info)
1109 disk_types = [self.op.disk_template] * len(self.disks)
1110 res = ComputeIPolicyInstanceSpecViolation(ipolicy, ispec, disk_types)
1111 if not self.op.ignore_ipolicy and res:
1112 msg = ("Instance allocation to group %s (%s) violates policy: %s" %
1113 (pnode.group, group_info.name, utils.CommaJoin(res)))
1114 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1115
1116 CheckHVParams(self, node_uuids, self.op.hypervisor, self.op.hvparams)
1117
1118 CheckOSParams(self, True, node_uuids, self.op.os_type, self.os_full,
1119 self.op.force_variant)
1120
1121 CheckNicsBridgesExist(self, self.nics, self.pnode.uuid)
1122
1123 CheckCompressionTool(self, self.op.compress)
1124
1125
1126
1127
1128
1129
1130 if self.op.start:
1131 hvfull = objects.FillDict(cluster.hvparams.get(self.op.hypervisor, {}),
1132 self.op.hvparams)
1133 CheckNodeFreeMemory(self, self.pnode.uuid,
1134 "creating instance %s" % self.op.instance_name,
1135 self.be_full[constants.BE_MAXMEM],
1136 self.op.hypervisor, hvfull)
1137
1138 self.dry_run_result = list(node_uuids)
1139
1141 """Removes degraded disks and instance.
1142
1143 It optionally checks whether disks are degraded. If the disks are
1144 degraded, they are removed and the instance is also removed from
1145 the configuration.
1146
1147 If L{disk_abort} is True, then the disks are considered degraded
1148 and removed, and the instance is removed from the configuration.
1149
1150 If L{disk_abort} is False, then it first checks whether disks are
1151 degraded and, if so, it removes the disks and the instance is
1152 removed from the configuration.
1153
1154 @type feedback_fn: callable
1155 @param feedback_fn: function used send feedback back to the caller
1156
1157 @type disk_abort: boolean
1158 @param disk_abort:
1159 True if disks are degraded, False to first check if disks are
1160 degraded
1161 @type instance: L{objects.Instance}
1162 @param instance: instance containing the disks to check
1163
1164 @rtype: NoneType
1165 @return: None
1166 @raise errors.OpPrereqError: if disks are degraded
1167
1168 """
1169 disk_info = self.cfg.GetInstanceDisks(instance.uuid)
1170 if disk_abort:
1171 pass
1172 elif self.op.wait_for_sync:
1173 disk_abort = not WaitForSync(self, instance)
1174 elif utils.AnyDiskOfType(disk_info, constants.DTS_INT_MIRROR):
1175
1176 feedback_fn("* checking mirrors status")
1177 disk_abort = not WaitForSync(self, instance, oneshot=True)
1178 else:
1179 disk_abort = False
1180
1181 if disk_abort:
1182 RemoveDisks(self, instance)
1183 for disk_uuid in instance.disks:
1184 self.cfg.RemoveInstanceDisk(instance.uuid, disk_uuid)
1185 self.cfg.RemoveInstance(instance.uuid)
1186 raise errors.OpExecError("There are some degraded disks for"
1187 " this instance")
1188
1190 """Run OS scripts
1191
1192 If necessary, disks are paused. It handles instance create,
1193 import, and remote import.
1194
1195 @type feedback_fn: callable
1196 @param feedback_fn: function used send feedback back to the caller
1197
1198 @type iobj: L{objects.Instance}
1199 @param iobj: instance object
1200
1201 """
1202 if iobj.disks and not self.adopt_disks:
1203 disks = self.cfg.GetInstanceDisks(iobj.uuid)
1204 if self.op.mode == constants.INSTANCE_CREATE:
1205 os_image = objects.GetOSImage(self.op.osparams)
1206
1207 if os_image is None and not self.op.no_install:
1208 pause_sync = (not self.op.wait_for_sync and
1209 utils.AnyDiskOfType(disks, constants.DTS_INT_MIRROR))
1210 if pause_sync:
1211 feedback_fn("* pausing disk sync to install instance OS")
1212 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1213 (disks, iobj),
1214 True)
1215 for idx, success in enumerate(result.payload):
1216 if not success:
1217 logging.warn("pause-sync of instance %s for disk %d failed",
1218 self.op.instance_name, idx)
1219
1220 feedback_fn("* running the instance OS create scripts...")
1221
1222 os_add_result = \
1223 self.rpc.call_instance_os_add(self.pnode.uuid,
1224 (iobj, self.op.osparams_secret),
1225 False,
1226 self.op.debug_level)
1227 if pause_sync:
1228 feedback_fn("* resuming disk sync")
1229 result = self.rpc.call_blockdev_pause_resume_sync(self.pnode.uuid,
1230 (disks, iobj),
1231 False)
1232 for idx, success in enumerate(result.payload):
1233 if not success:
1234 logging.warn("resume-sync of instance %s for disk %d failed",
1235 self.op.instance_name, idx)
1236
1237 os_add_result.Raise("Could not add os for instance %s"
1238 " on node %s" % (self.op.instance_name,
1239 self.pnode.name))
1240
1241 else:
1242 if self.op.mode == constants.INSTANCE_IMPORT:
1243 feedback_fn("* running the instance OS import scripts...")
1244
1245 transfers = []
1246
1247 for idx, image in enumerate(self.src_images):
1248 if not image:
1249 continue
1250
1251 if iobj.os:
1252 dst_io = constants.IEIO_SCRIPT
1253 dst_ioargs = ((disks[idx], iobj), idx)
1254 else:
1255 dst_io = constants.IEIO_RAW_DISK
1256 dst_ioargs = (disks[idx], iobj)
1257
1258
1259 dt = masterd.instance.DiskTransfer("disk/%s" % idx,
1260 constants.IEIO_FILE, (image, ),
1261 dst_io, dst_ioargs,
1262 None)
1263 transfers.append(dt)
1264
1265 import_result = \
1266 masterd.instance.TransferInstanceData(self, feedback_fn,
1267 self.op.src_node_uuid,
1268 self.pnode.uuid,
1269 self.pnode.secondary_ip,
1270 self.op.compress,
1271 iobj, transfers)
1272 if not compat.all(import_result):
1273 self.LogWarning("Some disks for instance %s on node %s were not"
1274 " imported successfully" % (self.op.instance_name,
1275 self.pnode.name))
1276
1277 rename_from = self._old_instance_name
1278
1279 elif self.op.mode == constants.INSTANCE_REMOTE_IMPORT:
1280 feedback_fn("* preparing remote import...")
1281
1282
1283
1284
1285 connect_timeout = (constants.RIE_CONNECT_TIMEOUT +
1286 self.op.source_shutdown_timeout)
1287 timeouts = masterd.instance.ImportExportTimeouts(connect_timeout)
1288
1289 assert iobj.primary_node == self.pnode.uuid
1290 disk_results = \
1291 masterd.instance.RemoteImport(self, feedback_fn, iobj, self.pnode,
1292 self.source_x509_ca,
1293 self._cds, self.op.compress, timeouts)
1294 if not compat.all(disk_results):
1295
1296
1297 self.LogWarning("Some disks for instance %s on node %s were not"
1298 " imported successfully" % (self.op.instance_name,
1299 self.pnode.name))
1300
1301 rename_from = self.source_instance_name
1302
1303 else:
1304
1305 raise errors.ProgrammerError("Unknown OS initialization mode '%s'"
1306 % self.op.mode)
1307
1308 assert iobj.name == self.op.instance_name
1309
1310
1311 if iobj.os:
1312 feedback_fn("Running rename script for %s" % self.op.instance_name)
1313 result = self.rpc.call_instance_run_rename(self.pnode.uuid, iobj,
1314 rename_from,
1315 self.op.debug_level)
1316 result.Warn("Failed to run rename script for %s on node %s" %
1317 (self.op.instance_name, self.pnode.name), self.LogWarning)
1318
1320 """Returns the OS scripts environment for the helper VM
1321
1322 @type instance: L{objects.Instance}
1323 @param instance: instance for which the OS scripts are run
1324
1325 @type script: string
1326 @param script: script to run (e.g.,
1327 constants.OS_SCRIPT_CREATE_UNTRUSTED)
1328
1329 @rtype: dict of string to string
1330 @return: OS scripts environment for the helper VM
1331
1332 """
1333 env = {"OS_SCRIPT": script}
1334
1335
1336 if instance.hypervisor == constants.HT_KVM:
1337 prefix = "/dev/vd"
1338 elif instance.hypervisor in [constants.HT_XEN_PVM, constants.HT_XEN_HVM]:
1339 prefix = "/dev/xvd"
1340 else:
1341 raise errors.OpExecError("Cannot run OS scripts in a virtualized"
1342 " environment for hypervisor '%s'"
1343 % instance.hypervisor)
1344
1345 num_disks = len(self.cfg.GetInstanceDisks(instance.uuid))
1346
1347 for idx, disk_label in enumerate(utils.GetDiskLabels(prefix, num_disks + 1,
1348 start=1)):
1349 env["DISK_%d_PATH" % idx] = disk_label
1350
1351 return env
1352
1354 """Updates the OS parameter 'os-install-package' for an instance.
1355
1356 The OS install package is an archive containing an OS definition
1357 and a file containing the environment variables needed to run the
1358 OS scripts.
1359
1360 The OS install package is served by the metadata daemon to the
1361 instances, so the OS scripts can run inside the virtualized
1362 environment.
1363
1364 @type feedback_fn: callable
1365 @param feedback_fn: function used send feedback back to the caller
1366
1367 @type instance: L{objects.Instance}
1368 @param instance: instance for which the OS parameter
1369 'os-install-package' is updated
1370
1371 @type override_env: dict of string to string
1372 @param override_env: if supplied, it overrides the environment of
1373 the export OS scripts archive
1374
1375 """
1376 if "os-install-package" in instance.osparams:
1377 feedback_fn("Using OS install package '%s'" %
1378 instance.osparams["os-install-package"])
1379 else:
1380 result = self.rpc.call_os_export(instance.primary_node, instance,
1381 override_env)
1382 result.Raise("Could not export OS '%s'" % instance.os)
1383 instance.osparams["os-install-package"] = result.payload
1384
1385 feedback_fn("Created OS install package '%s'" % result.payload)
1386
1388 """Runs the OS scripts inside a safe virtualized environment.
1389
1390 The virtualized environment reuses the instance and temporarily
1391 creates a disk onto which the image of the helper VM is dumped.
1392 The temporary disk is used to boot the helper VM. The OS scripts
1393 are passed to the helper VM through the metadata daemon and the OS
1394 install package.
1395
1396 @type feedback_fn: callable
1397 @param feedback_fn: function used send feedback back to the caller
1398
1399 @type instance: L{objects.Instance}
1400 @param instance: instance for which the OS scripts must be run
1401 inside the virtualized environment
1402
1403 """
1404 install_image = self.cfg.GetInstallImage()
1405
1406 if not install_image:
1407 raise errors.OpExecError("Cannot create install instance because an"
1408 " install image has not been specified")
1409
1410 env = self.GetOsInstallPackageEnvironment(
1411 instance,
1412 constants.OS_SCRIPT_CREATE_UNTRUSTED)
1413 self.UpdateInstanceOsInstallPackage(feedback_fn, instance, env)
1414 UpdateMetadata(feedback_fn, self.rpc, instance,
1415 osparams_private=self.op.osparams_private,
1416 osparams_secret=self.op.osparams_secret)
1417
1418 RunWithHelperVM(self, instance, install_image,
1419 self.op.helper_startup_timeout,
1420 self.op.helper_shutdown_timeout,
1421 log_prefix="Running OS create script",
1422 feedback_fn=feedback_fn)
1423
1424 - def Exec(self, feedback_fn):
1425 """Create and add the instance to the cluster.
1426
1427 """
1428 assert not (self.owned_locks(locking.LEVEL_NODE_RES) -
1429 self.owned_locks(locking.LEVEL_NODE)), \
1430 "Node locks differ from node resource locks"
1431
1432 ht_kind = self.op.hypervisor
1433 if ht_kind in constants.HTS_REQ_PORT:
1434 network_port = self.cfg.AllocatePort()
1435 else:
1436 network_port = None
1437
1438 if self.op.commit:
1439 (instance_uuid, _) = self.cfg.ExpandInstanceName(self.op.instance_name)
1440 else:
1441 instance_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
1442
1443
1444
1445
1446 nodegroup = self.cfg.GetNodeGroup(self.pnode.group)
1447
1448 if self.op.commit:
1449 disks = self.cfg.GetInstanceDisks(instance_uuid)
1450 CommitDisks(disks)
1451 else:
1452 disks = GenerateDiskTemplate(self,
1453 self.op.disk_template,
1454 instance_uuid, self.pnode.uuid,
1455 self.secondaries,
1456 self.disks,
1457 self.instance_file_storage_dir,
1458 self.op.file_driver,
1459 0,
1460 feedback_fn,
1461 self.cfg.GetGroupDiskParams(nodegroup),
1462 forthcoming=self.op.forthcoming)
1463
1464 if self.op.os_type is None:
1465 os_type = ""
1466 else:
1467 os_type = self.op.os_type
1468
1469 iobj = objects.Instance(name=self.op.instance_name,
1470 uuid=instance_uuid,
1471 os=os_type,
1472 primary_node=self.pnode.uuid,
1473 nics=self.nics, disks=[],
1474 disk_template=self.op.disk_template,
1475 disks_active=False,
1476 admin_state=constants.ADMINST_DOWN,
1477 admin_state_source=constants.ADMIN_SOURCE,
1478 network_port=network_port,
1479 beparams=self.op.beparams,
1480 hvparams=self.op.hvparams,
1481 hypervisor=self.op.hypervisor,
1482 osparams=self.op.osparams,
1483 osparams_private=self.op.osparams_private,
1484 forthcoming=self.op.forthcoming,
1485 )
1486
1487 if self.op.tags:
1488 for tag in self.op.tags:
1489 iobj.AddTag(tag)
1490
1491 if self.adopt_disks:
1492 if self.op.disk_template == constants.DT_PLAIN:
1493
1494
1495 tmp_disks = [objects.Disk.FromDict(v.ToDict()) for v in disks]
1496 rename_to = []
1497 for t_dsk, a_dsk in zip(tmp_disks, self.disks):
1498 rename_to.append(t_dsk.logical_id)
1499 t_dsk.logical_id = (t_dsk.logical_id[0], a_dsk[constants.IDISK_ADOPT])
1500 result = self.rpc.call_blockdev_rename(self.pnode.uuid,
1501 zip(tmp_disks, rename_to))
1502 result.Raise("Failed to rename adoped LVs")
1503 elif self.op.forthcoming:
1504 feedback_fn("Instance is forthcoming, not creating disks")
1505 else:
1506 feedback_fn("* creating instance disks...")
1507 try:
1508 CreateDisks(self, iobj, disks=disks)
1509 except errors.OpExecError:
1510 self.LogWarning("Device creation failed")
1511 for disk in disks:
1512 self.cfg.ReleaseDRBDMinors(disk.uuid)
1513 raise
1514
1515 feedback_fn("adding instance %s to cluster config" % self.op.instance_name)
1516 self.cfg.AddInstance(iobj, self.proc.GetECId(), replace=self.op.commit)
1517
1518 feedback_fn("adding disks to cluster config")
1519 for disk in disks:
1520 self.cfg.AddInstanceDisk(iobj.uuid, disk, replace=self.op.commit)
1521
1522 if self.op.forthcoming:
1523 feedback_fn("Instance is forthcoming; not creating the actual instance")
1524 return self.cfg.GetNodeNames(list(self.cfg.GetInstanceNodes(iobj.uuid)))
1525
1526
1527 iobj = self.cfg.GetInstanceInfo(iobj.uuid)
1528
1529 if self.op.mode == constants.INSTANCE_IMPORT:
1530
1531 ReleaseLocks(self, locking.LEVEL_NODE, keep=[self.op.src_node_uuid])
1532 else:
1533
1534 ReleaseLocks(self, locking.LEVEL_NODE)
1535
1536
1537 disk_abort = False
1538 if not self.adopt_disks and self.cfg.GetClusterInfo().prealloc_wipe_disks:
1539 feedback_fn("* wiping instance disks...")
1540 try:
1541 WipeDisks(self, iobj)
1542 except errors.OpExecError, err:
1543 logging.exception("Wiping disks failed")
1544 self.LogWarning("Wiping instance disks failed (%s)", err)
1545 disk_abort = True
1546
1547 self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
1548
1549
1550 os_image = objects.GetOSImage(iobj.osparams)
1551 disk_abort = False
1552
1553 if not self.adopt_disks and os_image is not None:
1554 feedback_fn("* imaging instance disks...")
1555 try:
1556 ImageDisks(self, iobj, os_image)
1557 except errors.OpExecError, err:
1558 logging.exception("Imaging disks failed")
1559 self.LogWarning("Imaging instance disks failed (%s)", err)
1560 disk_abort = True
1561
1562 self._RemoveDegradedDisks(feedback_fn, disk_abort, iobj)
1563
1564
1565 iobj.disks_active = True
1566
1567
1568 ReleaseLocks(self, locking.LEVEL_NODE_RES)
1569
1570 if iobj.os:
1571 result = self.rpc.call_os_diagnose([iobj.primary_node])[iobj.primary_node]
1572 result.Raise("Failed to get OS '%s'" % iobj.os)
1573
1574 trusted = None
1575
1576 for (name, _, _, _, _, _, _, os_trusted) in result.payload:
1577 if name == objects.OS.GetName(iobj.os):
1578 trusted = os_trusted
1579 break
1580
1581 if trusted is None:
1582 raise errors.OpPrereqError("OS '%s' is not available in node '%s'" %
1583 (iobj.os, iobj.primary_node))
1584 elif trusted:
1585 self.RunOsScripts(feedback_fn, iobj)
1586 else:
1587 self.RunOsScriptsVirtualized(feedback_fn, iobj)
1588
1589
1590
1591
1592 iobj = self.cfg.GetInstanceInfo(iobj.uuid)
1593
1594
1595
1596 UpdateMetadata(feedback_fn, self.rpc, iobj,
1597 osparams_private=self.op.osparams_private,
1598 osparams_secret=self.op.osparams_secret)
1599
1600 assert not self.owned_locks(locking.LEVEL_NODE_RES)
1601
1602 if self.op.start:
1603 iobj.admin_state = constants.ADMINST_UP
1604 self.cfg.Update(iobj, feedback_fn)
1605 logging.info("Starting instance %s on node %s", self.op.instance_name,
1606 self.pnode.name)
1607 feedback_fn("* starting instance...")
1608 result = self.rpc.call_instance_start(self.pnode.uuid, (iobj, None, None),
1609 False, self.op.reason)
1610 result.Raise("Could not start instance")
1611
1612 return self.cfg.GetNodeNames(list(self.cfg.GetInstanceNodes(iobj.uuid)))
1613
1615
1616
1617 assert self.op.opportunistic_locking
1618
1619 logging.info("Opportunistic locking did not suceed, falling back to"
1620 " full lock allocation")
1621 feedback_fn("* falling back to full lock allocation")
1622 self.op.opportunistic_locking = False
1623