Package ganeti :: Package masterd :: Module iallocator
[hide private]
[frames] | no frames]

Source Code for Module ganeti.masterd.iallocator

  1  # 
  2  # 
  3   
  4  # Copyright (C) 2012, 2013 Google Inc. 
  5  # All rights reserved. 
  6  # 
  7  # Redistribution and use in source and binary forms, with or without 
  8  # modification, are permitted provided that the following conditions are 
  9  # met: 
 10  # 
 11  # 1. Redistributions of source code must retain the above copyright notice, 
 12  # this list of conditions and the following disclaimer. 
 13  # 
 14  # 2. Redistributions in binary form must reproduce the above copyright 
 15  # notice, this list of conditions and the following disclaimer in the 
 16  # documentation and/or other materials provided with the distribution. 
 17  # 
 18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
 19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
 20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
 21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
 22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
 23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
 24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
 25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
 26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
 27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
 28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 29   
 30   
 31  """Module implementing the iallocator code.""" 
 32   
 33  from ganeti import compat 
 34  from ganeti import constants 
 35  from ganeti import errors 
 36  from ganeti import ht 
 37  from ganeti import outils 
 38  from ganeti import opcodes 
 39  import ganeti.rpc.node as rpc 
 40  from ganeti import serializer 
 41  from ganeti import utils 
 42   
 43  import ganeti.masterd.instance as gmi 
 44   
 45   
 46  _STRING_LIST = ht.TListOf(ht.TString) 
 47  _JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, { 
 48     # pylint: disable=E1101 
 49     # Class '...' has no 'OP_ID' member 
 50     "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID, 
 51                          opcodes.OpInstanceMigrate.OP_ID, 
 52                          opcodes.OpInstanceReplaceDisks.OP_ID]), 
 53     }))) 
 54   
 55  _NEVAC_MOVED = \ 
 56    ht.TListOf(ht.TAnd(ht.TIsLength(3), 
 57                       ht.TItems([ht.TNonEmptyString, 
 58                                  ht.TNonEmptyString, 
 59                                  ht.TListOf(ht.TNonEmptyString), 
 60                                  ]))) 
 61  _NEVAC_FAILED = \ 
 62    ht.TListOf(ht.TAnd(ht.TIsLength(2), 
 63                       ht.TItems([ht.TNonEmptyString, 
 64                                  ht.TMaybeString, 
 65                                  ]))) 
 66  _NEVAC_RESULT = ht.TAnd(ht.TIsLength(3), 
 67                          ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST])) 
 68   
 69  _INST_NAME = ("name", ht.TNonEmptyString) 
 70  _INST_UUID = ("inst_uuid", ht.TNonEmptyString) 
71 72 73 -class _AutoReqParam(outils.AutoSlots):
74 """Meta class for request definitions. 75 76 """ 77 @classmethod
78 - def _GetSlots(mcs, attrs):
79 """Extract the slots out of REQ_PARAMS. 80 81 """ 82 params = attrs.setdefault("REQ_PARAMS", []) 83 return [slot for (slot, _) in params]
84
85 86 -class IARequestBase(outils.ValidatedSlots):
87 """A generic IAllocator request object. 88 89 """ 90 __metaclass__ = _AutoReqParam 91 92 MODE = NotImplemented 93 REQ_PARAMS = [] 94 REQ_RESULT = NotImplemented 95
96 - def __init__(self, **kwargs):
97 """Constructor for IARequestBase. 98 99 The constructor takes only keyword arguments and will set 100 attributes on this object based on the passed arguments. As such, 101 it means that you should not pass arguments which are not in the 102 REQ_PARAMS attribute for this class. 103 104 """ 105 outils.ValidatedSlots.__init__(self, **kwargs) 106 107 self.Validate()
108
109 - def Validate(self):
110 """Validates all parameters of the request. 111 112 113 This method returns L{None} if the validation succeeds, or raises 114 an exception otherwise. 115 116 @rtype: NoneType 117 @return: L{None}, if the validation succeeds 118 119 @raise Exception: validation fails 120 121 """ 122 assert self.MODE in constants.VALID_IALLOCATOR_MODES 123 124 for (param, validator) in self.REQ_PARAMS: 125 if not hasattr(self, param): 126 raise errors.OpPrereqError("Request is missing '%s' parameter" % param, 127 errors.ECODE_INVAL) 128 129 value = getattr(self, param) 130 if not validator(value): 131 raise errors.OpPrereqError(("Request parameter '%s' has invalid" 132 " type %s/value %s") % 133 (param, type(value), value), 134 errors.ECODE_INVAL)
135
136 - def GetRequest(self, cfg):
137 """Gets the request data dict. 138 139 @param cfg: The configuration instance 140 141 """ 142 raise NotImplementedError
143
144 - def ValidateResult(self, ia, result):
145 """Validates the result of an request. 146 147 @param ia: The IAllocator instance 148 @param result: The IAllocator run result 149 @raises ResultValidationError: If validation fails 150 151 """ 152 if ia.success and not self.REQ_RESULT(result): 153 raise errors.ResultValidationError("iallocator returned invalid result," 154 " expected %s, got %s" % 155 (self.REQ_RESULT, result))
156
157 158 -class IAReqInstanceAlloc(IARequestBase):
159 """An instance allocation request. 160 161 """ 162 # pylint: disable=E1101 163 MODE = constants.IALLOCATOR_MODE_ALLOC 164 REQ_PARAMS = [ 165 _INST_NAME, 166 ("memory", ht.TNonNegativeInt), 167 ("spindle_use", ht.TNonNegativeInt), 168 ("disks", ht.TListOf(ht.TDict)), 169 ("disk_template", ht.TString), 170 ("group_name", ht.TMaybe(ht.TNonEmptyString)), 171 ("os", ht.TString), 172 ("tags", _STRING_LIST), 173 ("nics", ht.TListOf(ht.TDict)), 174 ("vcpus", ht.TInt), 175 ("hypervisor", ht.TString), 176 ("node_whitelist", ht.TMaybeListOf(ht.TNonEmptyString)), 177 ] 178 REQ_RESULT = ht.TList 179
180 - def RequiredNodes(self):
181 """Calculates the required nodes based on the disk_template. 182 183 """ 184 if self.disk_template in constants.DTS_INT_MIRROR: 185 return 2 186 else: 187 return 1
188
189 - def GetRequest(self, cfg):
190 """Requests a new instance. 191 192 The checks for the completeness of the opcode must have already been 193 done. 194 195 """ 196 disk_space = gmi.ComputeDiskSize(self.disk_template, self.disks) 197 198 return { 199 "name": self.name, 200 "disk_template": self.disk_template, 201 "group_name": self.group_name, 202 "tags": self.tags, 203 "os": self.os, 204 "vcpus": self.vcpus, 205 "memory": self.memory, 206 "spindle_use": self.spindle_use, 207 "disks": self.disks, 208 "disk_space_total": disk_space, 209 "nics": self.nics, 210 "required_nodes": self.RequiredNodes(), 211 "hypervisor": self.hypervisor, 212 }
213
214 - def ValidateResult(self, ia, result):
215 """Validates an single instance allocation request. 216 217 """ 218 IARequestBase.ValidateResult(self, ia, result) 219 220 if ia.success and len(result) != self.RequiredNodes(): 221 raise errors.ResultValidationError("iallocator returned invalid number" 222 " of nodes (%s), required %s" % 223 (len(result), self.RequiredNodes()))
224
225 226 -class IAReqMultiInstanceAlloc(IARequestBase):
227 """An multi instance allocation request. 228 229 """ 230 # pylint: disable=E1101 231 MODE = constants.IALLOCATOR_MODE_MULTI_ALLOC 232 REQ_PARAMS = [ 233 ("instances", ht.TListOf(ht.TInstanceOf(IAReqInstanceAlloc))), 234 ] 235 _MASUCCESS = \ 236 ht.TListOf(ht.TAnd(ht.TIsLength(2), 237 ht.TItems([ht.TNonEmptyString, 238 ht.TListOf(ht.TNonEmptyString), 239 ]))) 240 _MAFAILED = ht.TListOf(ht.TNonEmptyString) 241 REQ_RESULT = ht.TAnd(ht.TList, ht.TIsLength(2), 242 ht.TItems([_MASUCCESS, _MAFAILED])) 243
244 - def GetRequest(self, cfg):
245 return { 246 "instances": [iareq.GetRequest(cfg) for iareq in self.instances], 247 }
248
249 250 -class IAReqRelocate(IARequestBase):
251 """A relocation request. 252 253 """ 254 # pylint: disable=E1101 255 MODE = constants.IALLOCATOR_MODE_RELOC 256 REQ_PARAMS = [ 257 _INST_UUID, 258 ("relocate_from_node_uuids", _STRING_LIST), 259 ] 260 REQ_RESULT = ht.TList 261
262 - def GetRequest(self, cfg):
263 """Request an relocation of an instance 264 265 The checks for the completeness of the opcode must have already been 266 done. 267 268 """ 269 instance = cfg.GetInstanceInfo(self.inst_uuid) 270 if instance is None: 271 raise errors.ProgrammerError("Unknown instance '%s' passed to" 272 " IAllocator" % self.inst_uuid) 273 274 if instance.disk_template not in constants.DTS_MIRRORED: 275 raise errors.OpPrereqError("Can't relocate non-mirrored instances", 276 errors.ECODE_INVAL) 277 278 secondary_nodes = cfg.GetInstanceSecondaryNodes(instance.uuid) 279 if (instance.disk_template in constants.DTS_INT_MIRROR and 280 len(secondary_nodes) != 1): 281 raise errors.OpPrereqError("Instance has not exactly one secondary node", 282 errors.ECODE_STATE) 283 284 inst_disks = cfg.GetInstanceDisks(instance.uuid) 285 disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in inst_disks] 286 disk_space = gmi.ComputeDiskSize(instance.disk_template, disk_sizes) 287 288 return { 289 "name": instance.name, 290 "disk_space_total": disk_space, 291 "required_nodes": 1, 292 "relocate_from": cfg.GetNodeNames(self.relocate_from_node_uuids), 293 }
294
295 - def ValidateResult(self, ia, result):
296 """Validates the result of an relocation request. 297 298 """ 299 IARequestBase.ValidateResult(self, ia, result) 300 301 node2group = dict((name, ndata["group"]) 302 for (name, ndata) in ia.in_data["nodes"].items()) 303 304 fn = compat.partial(self._NodesToGroups, node2group, 305 ia.in_data["nodegroups"]) 306 307 instance = ia.cfg.GetInstanceInfo(self.inst_uuid) 308 request_groups = fn(ia.cfg.GetNodeNames(self.relocate_from_node_uuids) + 309 ia.cfg.GetNodeNames([instance.primary_node])) 310 result_groups = fn(result + ia.cfg.GetNodeNames([instance.primary_node])) 311 312 if ia.success and not set(result_groups).issubset(request_groups): 313 raise errors.ResultValidationError("Groups of nodes returned by" 314 " iallocator (%s) differ from original" 315 " groups (%s)" % 316 (utils.CommaJoin(result_groups), 317 utils.CommaJoin(request_groups)))
318 319 @staticmethod
320 - def _NodesToGroups(node2group, groups, nodes):
321 """Returns a list of unique group names for a list of nodes. 322 323 @type node2group: dict 324 @param node2group: Map from node name to group UUID 325 @type groups: dict 326 @param groups: Group information 327 @type nodes: list 328 @param nodes: Node names 329 330 """ 331 result = set() 332 333 for node in nodes: 334 try: 335 group_uuid = node2group[node] 336 except KeyError: 337 # Ignore unknown node 338 pass 339 else: 340 try: 341 group = groups[group_uuid] 342 except KeyError: 343 # Can't find group, let's use UUID 344 group_name = group_uuid 345 else: 346 group_name = group["name"] 347 348 result.add(group_name) 349 350 return sorted(result)
351
352 353 -class IAReqNodeEvac(IARequestBase):
354 """A node evacuation request. 355 356 """ 357 # pylint: disable=E1101 358 MODE = constants.IALLOCATOR_MODE_NODE_EVAC 359 REQ_PARAMS = [ 360 ("instances", _STRING_LIST), 361 ("evac_mode", ht.TEvacMode), 362 ] 363 REQ_RESULT = _NEVAC_RESULT 364
365 - def GetRequest(self, cfg):
366 """Get data for node-evacuate requests. 367 368 """ 369 return { 370 "instances": self.instances, 371 "evac_mode": self.evac_mode, 372 }
373
374 375 -class IAReqGroupChange(IARequestBase):
376 """A group change request. 377 378 """ 379 # pylint: disable=E1101 380 MODE = constants.IALLOCATOR_MODE_CHG_GROUP 381 REQ_PARAMS = [ 382 ("instances", _STRING_LIST), 383 ("target_groups", _STRING_LIST), 384 ] 385 REQ_RESULT = _NEVAC_RESULT 386
387 - def GetRequest(self, cfg):
388 """Get data for node-evacuate requests. 389 390 """ 391 return { 392 "instances": self.instances, 393 "target_groups": self.target_groups, 394 }
395
396 397 -class IAllocator(object):
398 """IAllocator framework. 399 400 An IAllocator instance has three sets of attributes: 401 - cfg that is needed to query the cluster 402 - input data (all members of the _KEYS class attribute are required) 403 - four buffer attributes (in|out_data|text), that represent the 404 input (to the external script) in text and data structure format, 405 and the output from it, again in two formats 406 - the result variables from the script (success, info, nodes) for 407 easy usage 408 409 """ 410 # pylint: disable=R0902 411 # lots of instance attributes 412
413 - def __init__(self, cfg, rpc_runner, req):
414 self.cfg = cfg 415 self.rpc = rpc_runner 416 self.req = req 417 # init buffer variables 418 self.in_text = self.out_text = self.in_data = self.out_data = None 419 # init result fields 420 self.success = self.info = self.result = None 421 422 self._BuildInputData(req)
423
424 - def _ComputeClusterDataNodeInfo(self, disk_templates, node_list, 425 cluster_info, hypervisor_name):
426 """Prepare and execute node info call. 427 428 @type disk_templates: list of string 429 @param disk_templates: the disk templates of the instances to be allocated 430 @type node_list: list of strings 431 @param node_list: list of nodes' UUIDs 432 @type cluster_info: L{objects.Cluster} 433 @param cluster_info: the cluster's information from the config 434 @type hypervisor_name: string 435 @param hypervisor_name: the hypervisor name 436 @rtype: same as the result of the node info RPC call 437 @return: the result of the node info RPC call 438 439 """ 440 storage_units_raw = utils.storage.GetStorageUnits(self.cfg, disk_templates) 441 storage_units = rpc.PrepareStorageUnitsForNodes(self.cfg, storage_units_raw, 442 node_list) 443 hvspecs = [(hypervisor_name, cluster_info.hvparams[hypervisor_name])] 444 return self.rpc.call_node_info(node_list, storage_units, hvspecs)
445
446 - def _ComputeClusterData(self, disk_template=None):
447 """Compute the generic allocator input data. 448 449 @type disk_template: list of string 450 @param disk_template: the disk templates of the instances to be allocated 451 452 """ 453 cfg = self.cfg.GetDetachedConfig() 454 cluster_info = cfg.GetClusterInfo() 455 # cluster data 456 data = { 457 "version": constants.IALLOCATOR_VERSION, 458 "cluster_name": cluster_info.cluster_name, 459 "cluster_tags": list(cluster_info.GetTags()), 460 "enabled_hypervisors": list(cluster_info.enabled_hypervisors), 461 "ipolicy": cluster_info.ipolicy, 462 } 463 ginfo = cfg.GetAllNodeGroupsInfo() 464 ninfo = cfg.GetAllNodesInfo() 465 iinfo = cfg.GetAllInstancesInfo() 466 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo.values()] 467 468 # node data 469 node_list = [n.uuid for n in ninfo.values() if n.vm_capable] 470 471 if isinstance(self.req, IAReqInstanceAlloc): 472 hypervisor_name = self.req.hypervisor 473 node_whitelist = self.req.node_whitelist 474 elif isinstance(self.req, IAReqRelocate): 475 hypervisor_name = iinfo[self.req.inst_uuid].hypervisor 476 node_whitelist = None 477 else: 478 hypervisor_name = cluster_info.primary_hypervisor 479 node_whitelist = None 480 481 if not disk_template: 482 disk_template = cluster_info.enabled_disk_templates[0] 483 484 node_data = self._ComputeClusterDataNodeInfo([disk_template], node_list, 485 cluster_info, hypervisor_name) 486 487 node_iinfo = \ 488 self.rpc.call_all_instances_info(node_list, 489 cluster_info.enabled_hypervisors, 490 cluster_info.hvparams) 491 492 data["nodegroups"] = self._ComputeNodeGroupData(cluster_info, ginfo) 493 494 config_ndata = self._ComputeBasicNodeData(cfg, ninfo, node_whitelist) 495 data["nodes"] = self._ComputeDynamicNodeData( 496 ninfo, node_data, node_iinfo, i_list, config_ndata, disk_template) 497 assert len(data["nodes"]) == len(ninfo), \ 498 "Incomplete node data computed" 499 500 data["instances"] = self._ComputeInstanceData(cfg, cluster_info, i_list) 501 502 self.in_data = data
503 504 @staticmethod
505 - def _ComputeNodeGroupData(cluster, ginfo):
506 """Compute node groups data. 507 508 """ 509 ng = dict((guuid, { 510 "name": gdata.name, 511 "alloc_policy": gdata.alloc_policy, 512 "networks": [net_uuid for net_uuid, _ in gdata.networks.items()], 513 "ipolicy": gmi.CalculateGroupIPolicy(cluster, gdata), 514 "tags": list(gdata.GetTags()), 515 }) 516 for guuid, gdata in ginfo.items()) 517 518 return ng
519 520 @staticmethod
521 - def _ComputeBasicNodeData(cfg, node_cfg, node_whitelist):
522 """Compute global node data. 523 524 @rtype: dict 525 @returns: a dict of name: (node dict, node config) 526 527 """ 528 # fill in static (config-based) values 529 node_results = dict((ninfo.name, { 530 "tags": list(ninfo.GetTags()), 531 "primary_ip": ninfo.primary_ip, 532 "secondary_ip": ninfo.secondary_ip, 533 "offline": (ninfo.offline or 534 not (node_whitelist is None or 535 ninfo.name in node_whitelist)), 536 "drained": ninfo.drained, 537 "master_candidate": ninfo.master_candidate, 538 "group": ninfo.group, 539 "master_capable": ninfo.master_capable, 540 "vm_capable": ninfo.vm_capable, 541 "ndparams": cfg.GetNdParams(ninfo), 542 }) 543 for ninfo in node_cfg.values()) 544 545 return node_results
546 547 @staticmethod
548 - def _GetAttributeFromHypervisorNodeData(hv_info, node_name, attr):
549 """Extract an attribute from the hypervisor's node information. 550 551 This is a helper function to extract data from the hypervisor's information 552 about the node, as part of the result of a node_info query. 553 554 @type hv_info: dict of strings 555 @param hv_info: dictionary of node information from the hypervisor 556 @type node_name: string 557 @param node_name: name of the node 558 @type attr: string 559 @param attr: key of the attribute in the hv_info dictionary 560 @rtype: integer 561 @return: the value of the attribute 562 @raises errors.OpExecError: if key not in dictionary or value not 563 integer 564 565 """ 566 if attr not in hv_info: 567 raise errors.OpExecError("Node '%s' didn't return attribute" 568 " '%s'" % (node_name, attr)) 569 value = hv_info[attr] 570 if not isinstance(value, int): 571 raise errors.OpExecError("Node '%s' returned invalid value" 572 " for '%s': %s" % 573 (node_name, attr, value)) 574 return value
575 576 @staticmethod
577 - def _ComputeStorageDataFromSpaceInfoByTemplate( 578 space_info, node_name, disk_template):
579 """Extract storage data from node info. 580 581 @type space_info: see result of the RPC call node info 582 @param space_info: the storage reporting part of the result of the RPC call 583 node info 584 @type node_name: string 585 @param node_name: the node's name 586 @type disk_template: string 587 @param disk_template: the disk template to report space for 588 @rtype: 4-tuple of integers 589 @return: tuple of storage info (total_disk, free_disk, total_spindles, 590 free_spindles) 591 592 """ 593 storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template] 594 if storage_type not in constants.STS_REPORT: 595 total_disk = total_spindles = 0 596 free_disk = free_spindles = 0 597 else: 598 template_space_info = utils.storage.LookupSpaceInfoByDiskTemplate( 599 space_info, disk_template) 600 if not template_space_info: 601 raise errors.OpExecError("Node '%s' didn't return space info for disk" 602 "template '%s'" % (node_name, disk_template)) 603 total_disk = template_space_info["storage_size"] 604 free_disk = template_space_info["storage_free"] 605 606 total_spindles = 0 607 free_spindles = 0 608 if disk_template in constants.DTS_LVM: 609 lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType( 610 space_info, constants.ST_LVM_PV) 611 if lvm_pv_info: 612 total_spindles = lvm_pv_info["storage_size"] 613 free_spindles = lvm_pv_info["storage_free"] 614 return (total_disk, free_disk, total_spindles, free_spindles)
615 616 @staticmethod
617 - def _ComputeStorageDataFromSpaceInfo(space_info, node_name, has_lvm):
618 """Extract storage data from node info. 619 620 @type space_info: see result of the RPC call node info 621 @param space_info: the storage reporting part of the result of the RPC call 622 node info 623 @type node_name: string 624 @param node_name: the node's name 625 @type has_lvm: boolean 626 @param has_lvm: whether or not LVM storage information is requested 627 @rtype: 4-tuple of integers 628 @return: tuple of storage info (total_disk, free_disk, total_spindles, 629 free_spindles) 630 631 """ 632 # TODO: replace this with proper storage reporting 633 if has_lvm: 634 lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType( 635 space_info, constants.ST_LVM_VG) 636 if not lvm_vg_info: 637 raise errors.OpExecError("Node '%s' didn't return LVM vg space info." 638 % (node_name)) 639 total_disk = lvm_vg_info["storage_size"] 640 free_disk = lvm_vg_info["storage_free"] 641 lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType( 642 space_info, constants.ST_LVM_PV) 643 if not lvm_pv_info: 644 raise errors.OpExecError("Node '%s' didn't return LVM pv space info." 645 % (node_name)) 646 total_spindles = lvm_pv_info["storage_size"] 647 free_spindles = lvm_pv_info["storage_free"] 648 else: 649 # we didn't even ask the node for VG status, so use zeros 650 total_disk = free_disk = 0 651 total_spindles = free_spindles = 0 652 return (total_disk, free_disk, total_spindles, free_spindles)
653 654 @staticmethod
655 - def _ComputeInstanceMemory(instance_list, node_instances_info, node_uuid, 656 input_mem_free):
657 """Compute memory used by primary instances. 658 659 @rtype: tuple (int, int, int) 660 @returns: A tuple of three integers: 1. the sum of memory used by primary 661 instances on the node (including the ones that are currently down), 2. 662 the sum of memory used by primary instances of the node that are up, 3. 663 the amount of memory that is free on the node considering the current 664 usage of the instances. 665 666 """ 667 i_p_mem = i_p_up_mem = 0 668 mem_free = input_mem_free 669 for iinfo, beinfo in instance_list: 670 if iinfo.primary_node == node_uuid: 671 i_p_mem += beinfo[constants.BE_MAXMEM] 672 if iinfo.name not in node_instances_info[node_uuid].payload: 673 i_used_mem = 0 674 else: 675 i_used_mem = int(node_instances_info[node_uuid] 676 .payload[iinfo.name]["memory"]) 677 i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem 678 mem_free -= max(0, i_mem_diff) 679 680 if iinfo.admin_state == constants.ADMINST_UP: 681 i_p_up_mem += beinfo[constants.BE_MAXMEM] 682 return (i_p_mem, i_p_up_mem, mem_free)
683
684 - def _ComputeDynamicNodeData(self, node_cfg, node_data, node_iinfo, i_list, 685 node_results, disk_template):
686 """Compute global node data. 687 688 @param node_results: the basic node structures as filled from the config 689 690 """ 691 #TODO(dynmem): compute the right data on MAX and MIN memory 692 # make a copy of the current dict 693 node_results = dict(node_results) 694 for nuuid, nresult in node_data.items(): 695 ninfo = node_cfg[nuuid] 696 assert ninfo.name in node_results, "Missing basic data for node %s" % \ 697 ninfo.name 698 699 if not ninfo.offline: 700 nresult.Raise("Can't get data for node %s" % ninfo.name) 701 node_iinfo[nuuid].Raise("Can't get node instance info from node %s" % 702 ninfo.name) 703 (_, space_info, (hv_info, )) = nresult.payload 704 705 mem_free = self._GetAttributeFromHypervisorNodeData(hv_info, ninfo.name, 706 "memory_free") 707 708 (i_p_mem, i_p_up_mem, mem_free) = self._ComputeInstanceMemory( 709 i_list, node_iinfo, nuuid, mem_free) 710 (total_disk, free_disk, total_spindles, free_spindles) = \ 711 self._ComputeStorageDataFromSpaceInfoByTemplate( 712 space_info, ninfo.name, disk_template) 713 714 # compute memory used by instances 715 pnr_dyn = { 716 "total_memory": self._GetAttributeFromHypervisorNodeData( 717 hv_info, ninfo.name, "memory_total"), 718 "reserved_memory": self._GetAttributeFromHypervisorNodeData( 719 hv_info, ninfo.name, "memory_dom0"), 720 "free_memory": mem_free, 721 "total_disk": total_disk, 722 "free_disk": free_disk, 723 "total_spindles": total_spindles, 724 "free_spindles": free_spindles, 725 "total_cpus": self._GetAttributeFromHypervisorNodeData( 726 hv_info, ninfo.name, "cpu_total"), 727 "reserved_cpus": self._GetAttributeFromHypervisorNodeData( 728 hv_info, ninfo.name, "cpu_dom0"), 729 "i_pri_memory": i_p_mem, 730 "i_pri_up_memory": i_p_up_mem, 731 } 732 pnr_dyn.update(node_results[ninfo.name]) 733 node_results[ninfo.name] = pnr_dyn 734 735 return node_results
736 737 @staticmethod
738 - def _ComputeInstanceData(cfg, cluster_info, i_list):
739 """Compute global instance data. 740 741 """ 742 instance_data = {} 743 for iinfo, beinfo in i_list: 744 nic_data = [] 745 for nic in iinfo.nics: 746 filled_params = cluster_info.SimpleFillNIC(nic.nicparams) 747 nic_dict = { 748 "mac": nic.mac, 749 "ip": nic.ip, 750 "mode": filled_params[constants.NIC_MODE], 751 "link": filled_params[constants.NIC_LINK], 752 } 753 if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED: 754 nic_dict["bridge"] = filled_params[constants.NIC_LINK] 755 nic_data.append(nic_dict) 756 inst_disks = cfg.GetInstanceDisks(iinfo.uuid) 757 pir = { 758 "tags": list(iinfo.GetTags()), 759 "admin_state": iinfo.admin_state, 760 "vcpus": beinfo[constants.BE_VCPUS], 761 "memory": beinfo[constants.BE_MAXMEM], 762 "spindle_use": beinfo[constants.BE_SPINDLE_USE], 763 "os": iinfo.os, 764 "nodes": [cfg.GetNodeName(iinfo.primary_node)] + 765 cfg.GetNodeNames( 766 cfg.GetInstanceSecondaryNodes(iinfo.uuid)), 767 "nics": nic_data, 768 "disks": [{constants.IDISK_SIZE: dsk.size, 769 constants.IDISK_MODE: dsk.mode, 770 constants.IDISK_SPINDLES: dsk.spindles} 771 for dsk in inst_disks], 772 "disk_template": iinfo.disk_template, 773 "disks_active": iinfo.disks_active, 774 "hypervisor": iinfo.hypervisor, 775 } 776 pir["disk_space_total"] = gmi.ComputeDiskSize(iinfo.disk_template, 777 pir["disks"]) 778 instance_data[iinfo.name] = pir 779 780 return instance_data
781
782 - def _BuildInputData(self, req):
783 """Build input data structures. 784 785 """ 786 request = req.GetRequest(self.cfg) 787 disk_template = None 788 if "disk_template" in request: 789 disk_template = request["disk_template"] 790 self._ComputeClusterData(disk_template=disk_template) 791 792 request["type"] = req.MODE 793 self.in_data["request"] = request 794 795 self.in_text = serializer.Dump(self.in_data)
796
797 - def Run(self, name, validate=True, call_fn=None):
798 """Run an instance allocator and return the results. 799 800 """ 801 if call_fn is None: 802 call_fn = self.rpc.call_iallocator_runner 803 804 ial_params = self.cfg.GetDefaultIAllocatorParameters() 805 806 result = call_fn(self.cfg.GetMasterNode(), name, self.in_text, ial_params) 807 result.Raise("Failure while running the iallocator script") 808 809 self.out_text = result.payload 810 if validate: 811 self._ValidateResult()
812
813 - def _ValidateResult(self):
814 """Process the allocator results. 815 816 This will process and if successful save the result in 817 self.out_data and the other parameters. 818 819 """ 820 try: 821 rdict = serializer.Load(self.out_text) 822 except Exception, err: 823 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err)) 824 825 if not isinstance(rdict, dict): 826 raise errors.OpExecError("Can't parse iallocator results: not a dict") 827 828 # TODO: remove backwards compatiblity in later versions 829 if "nodes" in rdict and "result" not in rdict: 830 rdict["result"] = rdict["nodes"] 831 del rdict["nodes"] 832 833 for key in "success", "info", "result": 834 if key not in rdict: 835 raise errors.OpExecError("Can't parse iallocator results:" 836 " missing key '%s'" % key) 837 setattr(self, key, rdict[key]) 838 839 self.req.ValidateResult(self, self.result) 840 self.out_data = rdict
841