1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Module implementing the iallocator code."""
32
33 from ganeti import compat
34 from ganeti import constants
35 from ganeti import errors
36 from ganeti import ht
37 from ganeti import outils
38 from ganeti import opcodes
39 from ganeti import rpc
40 from ganeti import serializer
41 from ganeti import utils
42
43 import ganeti.masterd.instance as gmi
44
45
46 _STRING_LIST = ht.TListOf(ht.TString)
47 _JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, {
48
49
50 "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID,
51 opcodes.OpInstanceMigrate.OP_ID,
52 opcodes.OpInstanceReplaceDisks.OP_ID]),
53 })))
54
55 _NEVAC_MOVED = \
56 ht.TListOf(ht.TAnd(ht.TIsLength(3),
57 ht.TItems([ht.TNonEmptyString,
58 ht.TNonEmptyString,
59 ht.TListOf(ht.TNonEmptyString),
60 ])))
61 _NEVAC_FAILED = \
62 ht.TListOf(ht.TAnd(ht.TIsLength(2),
63 ht.TItems([ht.TNonEmptyString,
64 ht.TMaybeString,
65 ])))
66 _NEVAC_RESULT = ht.TAnd(ht.TIsLength(3),
67 ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST]))
68
69 _INST_NAME = ("name", ht.TNonEmptyString)
70 _INST_UUID = ("inst_uuid", ht.TNonEmptyString)
74 """Meta class for request definitions.
75
76 """
77 @classmethod
79 """Extract the slots out of REQ_PARAMS.
80
81 """
82 params = attrs.setdefault("REQ_PARAMS", [])
83 return [slot for (slot, _) in params]
84
87 """A generic IAllocator request object.
88
89 """
90 __metaclass__ = _AutoReqParam
91
92 MODE = NotImplemented
93 REQ_PARAMS = []
94 REQ_RESULT = NotImplemented
95
97 """Constructor for IARequestBase.
98
99 The constructor takes only keyword arguments and will set
100 attributes on this object based on the passed arguments. As such,
101 it means that you should not pass arguments which are not in the
102 REQ_PARAMS attribute for this class.
103
104 """
105 outils.ValidatedSlots.__init__(self, **kwargs)
106
107 self.Validate()
108
126
128 """Gets the request data dict.
129
130 @param cfg: The configuration instance
131
132 """
133 raise NotImplementedError
134
136 """Validates the result of an request.
137
138 @param ia: The IAllocator instance
139 @param result: The IAllocator run result
140 @raises ResultValidationError: If validation fails
141
142 """
143 if ia.success and not self.REQ_RESULT(result):
144 raise errors.ResultValidationError("iallocator returned invalid result,"
145 " expected %s, got %s" %
146 (self.REQ_RESULT, result))
147
150 """An instance allocation request.
151
152 """
153
154 MODE = constants.IALLOCATOR_MODE_ALLOC
155 REQ_PARAMS = [
156 _INST_NAME,
157 ("memory", ht.TNonNegativeInt),
158 ("spindle_use", ht.TNonNegativeInt),
159 ("disks", ht.TListOf(ht.TDict)),
160 ("disk_template", ht.TString),
161 ("os", ht.TString),
162 ("tags", _STRING_LIST),
163 ("nics", ht.TListOf(ht.TDict)),
164 ("vcpus", ht.TInt),
165 ("hypervisor", ht.TString),
166 ("node_whitelist", ht.TMaybeListOf(ht.TNonEmptyString)),
167 ]
168 REQ_RESULT = ht.TList
169
171 """Calculates the required nodes based on the disk_template.
172
173 """
174 if self.disk_template in constants.DTS_INT_MIRROR:
175 return 2
176 else:
177 return 1
178
180 """Requests a new instance.
181
182 The checks for the completeness of the opcode must have already been
183 done.
184
185 """
186 disk_space = gmi.ComputeDiskSize(self.disk_template, self.disks)
187
188 return {
189 "name": self.name,
190 "disk_template": self.disk_template,
191 "tags": self.tags,
192 "os": self.os,
193 "vcpus": self.vcpus,
194 "memory": self.memory,
195 "spindle_use": self.spindle_use,
196 "disks": self.disks,
197 "disk_space_total": disk_space,
198 "nics": self.nics,
199 "required_nodes": self.RequiredNodes(),
200 "hypervisor": self.hypervisor,
201 }
202
213
237
240 """A relocation request.
241
242 """
243
244 MODE = constants.IALLOCATOR_MODE_RELOC
245 REQ_PARAMS = [
246 _INST_UUID,
247 ("relocate_from_node_uuids", _STRING_LIST),
248 ]
249 REQ_RESULT = ht.TList
250
281
305
306 @staticmethod
308 """Returns a list of unique group names for a list of nodes.
309
310 @type node2group: dict
311 @param node2group: Map from node name to group UUID
312 @type groups: dict
313 @param groups: Group information
314 @type nodes: list
315 @param nodes: Node names
316
317 """
318 result = set()
319
320 for node in nodes:
321 try:
322 group_uuid = node2group[node]
323 except KeyError:
324
325 pass
326 else:
327 try:
328 group = groups[group_uuid]
329 except KeyError:
330
331 group_name = group_uuid
332 else:
333 group_name = group["name"]
334
335 result.add(group_name)
336
337 return sorted(result)
338
360
382
385 """IAllocator framework.
386
387 An IAllocator instance has three sets of attributes:
388 - cfg that is needed to query the cluster
389 - input data (all members of the _KEYS class attribute are required)
390 - four buffer attributes (in|out_data|text), that represent the
391 input (to the external script) in text and data structure format,
392 and the output from it, again in two formats
393 - the result variables from the script (success, info, nodes) for
394 easy usage
395
396 """
397
398
399
400 - def __init__(self, cfg, rpc_runner, req):
401 self.cfg = cfg
402 self.rpc = rpc_runner
403 self.req = req
404
405 self.in_text = self.out_text = self.in_data = self.out_data = None
406
407 self.success = self.info = self.result = None
408
409 self._BuildInputData(req)
410
413 """Prepare and execute node info call.
414
415 @type disk_templates: list of string
416 @param disk_templates: the disk templates of the instances to be allocated
417 @type node_list: list of strings
418 @param node_list: list of nodes' UUIDs
419 @type cluster_info: L{objects.Cluster}
420 @param cluster_info: the cluster's information from the config
421 @type hypervisor_name: string
422 @param hypervisor_name: the hypervisor name
423 @rtype: same as the result of the node info RPC call
424 @return: the result of the node info RPC call
425
426 """
427 storage_units_raw = utils.storage.GetStorageUnits(self.cfg, disk_templates)
428 storage_units = rpc.PrepareStorageUnitsForNodes(self.cfg, storage_units_raw,
429 node_list)
430 hvspecs = [(hypervisor_name, cluster_info.hvparams[hypervisor_name])]
431 return self.rpc.call_node_info(node_list, storage_units, hvspecs)
432
434 """Compute the generic allocator input data.
435
436 @type disk_template: list of string
437 @param disk_template: the disk templates of the instances to be allocated
438
439 """
440 cluster_info = self.cfg.GetClusterInfo()
441
442 data = {
443 "version": constants.IALLOCATOR_VERSION,
444 "cluster_name": self.cfg.GetClusterName(),
445 "cluster_tags": list(cluster_info.GetTags()),
446 "enabled_hypervisors": list(cluster_info.enabled_hypervisors),
447 "ipolicy": cluster_info.ipolicy,
448 }
449 ninfo = self.cfg.GetAllNodesInfo()
450 iinfo = self.cfg.GetAllInstancesInfo().values()
451 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo]
452
453
454 node_list = [n.uuid for n in ninfo.values() if n.vm_capable]
455
456 if isinstance(self.req, IAReqInstanceAlloc):
457 hypervisor_name = self.req.hypervisor
458 node_whitelist = self.req.node_whitelist
459 elif isinstance(self.req, IAReqRelocate):
460 hypervisor_name = self.cfg.GetInstanceInfo(self.req.inst_uuid).hypervisor
461 node_whitelist = None
462 else:
463 hypervisor_name = cluster_info.primary_hypervisor
464 node_whitelist = None
465
466 if not disk_template:
467 disk_template = cluster_info.enabled_disk_templates[0]
468
469 node_data = self._ComputeClusterDataNodeInfo([disk_template], node_list,
470 cluster_info, hypervisor_name)
471
472 node_iinfo = \
473 self.rpc.call_all_instances_info(node_list,
474 cluster_info.enabled_hypervisors,
475 cluster_info.hvparams)
476
477 data["nodegroups"] = self._ComputeNodeGroupData(self.cfg)
478
479 config_ndata = self._ComputeBasicNodeData(self.cfg, ninfo, node_whitelist)
480 data["nodes"] = self._ComputeDynamicNodeData(
481 ninfo, node_data, node_iinfo, i_list, config_ndata, disk_template)
482 assert len(data["nodes"]) == len(ninfo), \
483 "Incomplete node data computed"
484
485 data["instances"] = self._ComputeInstanceData(self.cfg, cluster_info,
486 i_list)
487
488 self.in_data = data
489
490 @staticmethod
492 """Compute node groups data.
493
494 """
495 cluster = cfg.GetClusterInfo()
496 ng = dict((guuid, {
497 "name": gdata.name,
498 "alloc_policy": gdata.alloc_policy,
499 "networks": [net_uuid for net_uuid, _ in gdata.networks.items()],
500 "ipolicy": gmi.CalculateGroupIPolicy(cluster, gdata),
501 "tags": list(gdata.GetTags()),
502 })
503 for guuid, gdata in cfg.GetAllNodeGroupsInfo().items())
504
505 return ng
506
507 @staticmethod
509 """Compute global node data.
510
511 @rtype: dict
512 @returns: a dict of name: (node dict, node config)
513
514 """
515
516 node_results = dict((ninfo.name, {
517 "tags": list(ninfo.GetTags()),
518 "primary_ip": ninfo.primary_ip,
519 "secondary_ip": ninfo.secondary_ip,
520 "offline": (ninfo.offline or
521 not (node_whitelist is None or
522 ninfo.name in node_whitelist)),
523 "drained": ninfo.drained,
524 "master_candidate": ninfo.master_candidate,
525 "group": ninfo.group,
526 "master_capable": ninfo.master_capable,
527 "vm_capable": ninfo.vm_capable,
528 "ndparams": cfg.GetNdParams(ninfo),
529 })
530 for ninfo in node_cfg.values())
531
532 return node_results
533
534 @staticmethod
536 """Extract an attribute from the hypervisor's node information.
537
538 This is a helper function to extract data from the hypervisor's information
539 about the node, as part of the result of a node_info query.
540
541 @type hv_info: dict of strings
542 @param hv_info: dictionary of node information from the hypervisor
543 @type node_name: string
544 @param node_name: name of the node
545 @type attr: string
546 @param attr: key of the attribute in the hv_info dictionary
547 @rtype: integer
548 @return: the value of the attribute
549 @raises errors.OpExecError: if key not in dictionary or value not
550 integer
551
552 """
553 if attr not in hv_info:
554 raise errors.OpExecError("Node '%s' didn't return attribute"
555 " '%s'" % (node_name, attr))
556 value = hv_info[attr]
557 if not isinstance(value, int):
558 raise errors.OpExecError("Node '%s' returned invalid value"
559 " for '%s': %s" %
560 (node_name, attr, value))
561 return value
562
563 @staticmethod
566 """Extract storage data from node info.
567
568 @type space_info: see result of the RPC call node info
569 @param space_info: the storage reporting part of the result of the RPC call
570 node info
571 @type node_name: string
572 @param node_name: the node's name
573 @type disk_template: string
574 @param disk_template: the disk template to report space for
575 @rtype: 4-tuple of integers
576 @return: tuple of storage info (total_disk, free_disk, total_spindles,
577 free_spindles)
578
579 """
580 storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template]
581 if storage_type not in constants.STS_REPORT:
582 total_disk = total_spindles = 0
583 free_disk = free_spindles = 0
584 else:
585 template_space_info = utils.storage.LookupSpaceInfoByDiskTemplate(
586 space_info, disk_template)
587 if not template_space_info:
588 raise errors.OpExecError("Node '%s' didn't return space info for disk"
589 "template '%s'" % (node_name, disk_template))
590 total_disk = template_space_info["storage_size"]
591 free_disk = template_space_info["storage_free"]
592
593 total_spindles = 0
594 free_spindles = 0
595 if disk_template in constants.DTS_LVM:
596 lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType(
597 space_info, constants.ST_LVM_PV)
598 if lvm_pv_info:
599 total_spindles = lvm_pv_info["storage_size"]
600 free_spindles = lvm_pv_info["storage_free"]
601 return (total_disk, free_disk, total_spindles, free_spindles)
602
603 @staticmethod
605 """Extract storage data from node info.
606
607 @type space_info: see result of the RPC call node info
608 @param space_info: the storage reporting part of the result of the RPC call
609 node info
610 @type node_name: string
611 @param node_name: the node's name
612 @type has_lvm: boolean
613 @param has_lvm: whether or not LVM storage information is requested
614 @rtype: 4-tuple of integers
615 @return: tuple of storage info (total_disk, free_disk, total_spindles,
616 free_spindles)
617
618 """
619
620 if has_lvm:
621 lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType(
622 space_info, constants.ST_LVM_VG)
623 if not lvm_vg_info:
624 raise errors.OpExecError("Node '%s' didn't return LVM vg space info."
625 % (node_name))
626 total_disk = lvm_vg_info["storage_size"]
627 free_disk = lvm_vg_info["storage_free"]
628 lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType(
629 space_info, constants.ST_LVM_PV)
630 if not lvm_pv_info:
631 raise errors.OpExecError("Node '%s' didn't return LVM pv space info."
632 % (node_name))
633 total_spindles = lvm_pv_info["storage_size"]
634 free_spindles = lvm_pv_info["storage_free"]
635 else:
636
637 total_disk = free_disk = 0
638 total_spindles = free_spindles = 0
639 return (total_disk, free_disk, total_spindles, free_spindles)
640
641 @staticmethod
644 """Compute memory used by primary instances.
645
646 @rtype: tuple (int, int, int)
647 @returns: A tuple of three integers: 1. the sum of memory used by primary
648 instances on the node (including the ones that are currently down), 2.
649 the sum of memory used by primary instances of the node that are up, 3.
650 the amount of memory that is free on the node considering the current
651 usage of the instances.
652
653 """
654 i_p_mem = i_p_up_mem = 0
655 mem_free = input_mem_free
656 for iinfo, beinfo in instance_list:
657 if iinfo.primary_node == node_uuid:
658 i_p_mem += beinfo[constants.BE_MAXMEM]
659 if iinfo.name not in node_instances_info[node_uuid].payload:
660 i_used_mem = 0
661 else:
662 i_used_mem = int(node_instances_info[node_uuid]
663 .payload[iinfo.name]["memory"])
664 i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem
665 mem_free -= max(0, i_mem_diff)
666
667 if iinfo.admin_state == constants.ADMINST_UP:
668 i_p_up_mem += beinfo[constants.BE_MAXMEM]
669 return (i_p_mem, i_p_up_mem, mem_free)
670
673 """Compute global node data.
674
675 @param node_results: the basic node structures as filled from the config
676
677 """
678
679
680 node_results = dict(node_results)
681 for nuuid, nresult in node_data.items():
682 ninfo = node_cfg[nuuid]
683 assert ninfo.name in node_results, "Missing basic data for node %s" % \
684 ninfo.name
685
686 if not ninfo.offline:
687 nresult.Raise("Can't get data for node %s" % ninfo.name)
688 node_iinfo[nuuid].Raise("Can't get node instance info from node %s" %
689 ninfo.name)
690 (_, space_info, (hv_info, )) = nresult.payload
691
692 mem_free = self._GetAttributeFromHypervisorNodeData(hv_info, ninfo.name,
693 "memory_free")
694
695 (i_p_mem, i_p_up_mem, mem_free) = self._ComputeInstanceMemory(
696 i_list, node_iinfo, nuuid, mem_free)
697 (total_disk, free_disk, total_spindles, free_spindles) = \
698 self._ComputeStorageDataFromSpaceInfoByTemplate(
699 space_info, ninfo.name, disk_template)
700
701
702 pnr_dyn = {
703 "total_memory": self._GetAttributeFromHypervisorNodeData(
704 hv_info, ninfo.name, "memory_total"),
705 "reserved_memory": self._GetAttributeFromHypervisorNodeData(
706 hv_info, ninfo.name, "memory_dom0"),
707 "free_memory": mem_free,
708 "total_disk": total_disk,
709 "free_disk": free_disk,
710 "total_spindles": total_spindles,
711 "free_spindles": free_spindles,
712 "total_cpus": self._GetAttributeFromHypervisorNodeData(
713 hv_info, ninfo.name, "cpu_total"),
714 "reserved_cpus": self._GetAttributeFromHypervisorNodeData(
715 hv_info, ninfo.name, "cpu_dom0"),
716 "i_pri_memory": i_p_mem,
717 "i_pri_up_memory": i_p_up_mem,
718 }
719 pnr_dyn.update(node_results[ninfo.name])
720 node_results[ninfo.name] = pnr_dyn
721
722 return node_results
723
724 @staticmethod
726 """Compute global instance data.
727
728 """
729 instance_data = {}
730 for iinfo, beinfo in i_list:
731 nic_data = []
732 for nic in iinfo.nics:
733 filled_params = cluster_info.SimpleFillNIC(nic.nicparams)
734 nic_dict = {
735 "mac": nic.mac,
736 "ip": nic.ip,
737 "mode": filled_params[constants.NIC_MODE],
738 "link": filled_params[constants.NIC_LINK],
739 }
740 if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
741 nic_dict["bridge"] = filled_params[constants.NIC_LINK]
742 nic_data.append(nic_dict)
743 pir = {
744 "tags": list(iinfo.GetTags()),
745 "admin_state": iinfo.admin_state,
746 "vcpus": beinfo[constants.BE_VCPUS],
747 "memory": beinfo[constants.BE_MAXMEM],
748 "spindle_use": beinfo[constants.BE_SPINDLE_USE],
749 "os": iinfo.os,
750 "nodes": [cfg.GetNodeName(iinfo.primary_node)] +
751 cfg.GetNodeNames(iinfo.secondary_nodes),
752 "nics": nic_data,
753 "disks": [{constants.IDISK_SIZE: dsk.size,
754 constants.IDISK_MODE: dsk.mode,
755 constants.IDISK_SPINDLES: dsk.spindles}
756 for dsk in iinfo.disks],
757 "disk_template": iinfo.disk_template,
758 "disks_active": iinfo.disks_active,
759 "hypervisor": iinfo.hypervisor,
760 }
761 pir["disk_space_total"] = gmi.ComputeDiskSize(iinfo.disk_template,
762 pir["disks"])
763 instance_data[iinfo.name] = pir
764
765 return instance_data
766
781
782 - def Run(self, name, validate=True, call_fn=None):
783 """Run an instance allocator and return the results.
784
785 """
786 if call_fn is None:
787 call_fn = self.rpc.call_iallocator_runner
788
789 result = call_fn(self.cfg.GetMasterNode(), name, self.in_text)
790 result.Raise("Failure while running the iallocator script")
791
792 self.out_text = result.payload
793 if validate:
794 self._ValidateResult()
795
797 """Process the allocator results.
798
799 This will process and if successful save the result in
800 self.out_data and the other parameters.
801
802 """
803 try:
804 rdict = serializer.Load(self.out_text)
805 except Exception, err:
806 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err))
807
808 if not isinstance(rdict, dict):
809 raise errors.OpExecError("Can't parse iallocator results: not a dict")
810
811
812 if "nodes" in rdict and "result" not in rdict:
813 rdict["result"] = rdict["nodes"]
814 del rdict["nodes"]
815
816 for key in "success", "info", "result":
817 if key not in rdict:
818 raise errors.OpExecError("Can't parse iallocator results:"
819 " missing key '%s'" % key)
820 setattr(self, key, rdict[key])
821
822 self.req.ValidateResult(self, self.result)
823 self.out_data = rdict
824