Package ganeti :: Package masterd :: Module iallocator
[hide private]
[frames] | no frames]

Source Code for Module ganeti.masterd.iallocator

  1  # 
  2  # 
  3   
  4  # Copyright (C) 2012, 2013 Google Inc. 
  5  # All rights reserved. 
  6  # 
  7  # Redistribution and use in source and binary forms, with or without 
  8  # modification, are permitted provided that the following conditions are 
  9  # met: 
 10  # 
 11  # 1. Redistributions of source code must retain the above copyright notice, 
 12  # this list of conditions and the following disclaimer. 
 13  # 
 14  # 2. Redistributions in binary form must reproduce the above copyright 
 15  # notice, this list of conditions and the following disclaimer in the 
 16  # documentation and/or other materials provided with the distribution. 
 17  # 
 18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
 19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
 20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
 21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
 22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
 23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
 24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
 25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
 26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
 27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
 28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 29   
 30   
 31  """Module implementing the iallocator code.""" 
 32   
 33  from ganeti import compat 
 34  from ganeti import constants 
 35  from ganeti import errors 
 36  from ganeti import ht 
 37  from ganeti import outils 
 38  from ganeti import opcodes 
 39  import ganeti.rpc.node as rpc 
 40  from ganeti import serializer 
 41  from ganeti import utils 
 42   
 43  import ganeti.masterd.instance as gmi 
 44   
 45   
 46  _STRING_LIST = ht.TListOf(ht.TString) 
 47  _JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, { 
 48     # pylint: disable=E1101 
 49     # Class '...' has no 'OP_ID' member 
 50     "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID, 
 51                          opcodes.OpInstanceMigrate.OP_ID, 
 52                          opcodes.OpInstanceReplaceDisks.OP_ID]), 
 53     }))) 
 54   
 55  _NEVAC_MOVED = \ 
 56    ht.TListOf(ht.TAnd(ht.TIsLength(3), 
 57                       ht.TItems([ht.TNonEmptyString, 
 58                                  ht.TNonEmptyString, 
 59                                  ht.TListOf(ht.TNonEmptyString), 
 60                                  ]))) 
 61  _NEVAC_FAILED = \ 
 62    ht.TListOf(ht.TAnd(ht.TIsLength(2), 
 63                       ht.TItems([ht.TNonEmptyString, 
 64                                  ht.TMaybeString, 
 65                                  ]))) 
 66  _NEVAC_RESULT = ht.TAnd(ht.TIsLength(3), 
 67                          ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST])) 
 68   
 69  _INST_NAME = ("name", ht.TNonEmptyString) 
 70  _INST_UUID = ("inst_uuid", ht.TNonEmptyString) 
71 72 73 -class _AutoReqParam(outils.AutoSlots):
74 """Meta class for request definitions. 75 76 """ 77 @classmethod
78 - def _GetSlots(mcs, attrs):
79 """Extract the slots out of REQ_PARAMS. 80 81 """ 82 params = attrs.setdefault("REQ_PARAMS", []) 83 return [slot for (slot, _) in params]
84
85 86 -class IARequestBase(outils.ValidatedSlots):
87 """A generic IAllocator request object. 88 89 """ 90 __metaclass__ = _AutoReqParam 91 92 MODE = NotImplemented 93 REQ_PARAMS = [] 94 REQ_RESULT = NotImplemented 95
96 - def __init__(self, **kwargs):
97 """Constructor for IARequestBase. 98 99 The constructor takes only keyword arguments and will set 100 attributes on this object based on the passed arguments. As such, 101 it means that you should not pass arguments which are not in the 102 REQ_PARAMS attribute for this class. 103 104 """ 105 outils.ValidatedSlots.__init__(self, **kwargs) 106 107 self.Validate()
108
109 - def Validate(self):
110 """Validates all parameters of the request. 111 112 113 This method returns L{None} if the validation succeeds, or raises 114 an exception otherwise. 115 116 @rtype: NoneType 117 @return: L{None}, if the validation succeeds 118 119 @raise Exception: validation fails 120 121 """ 122 assert self.MODE in constants.VALID_IALLOCATOR_MODES 123 124 for (param, validator) in self.REQ_PARAMS: 125 if not hasattr(self, param): 126 raise errors.OpPrereqError("Request is missing '%s' parameter" % param, 127 errors.ECODE_INVAL) 128 129 value = getattr(self, param) 130 if not validator(value): 131 raise errors.OpPrereqError(("Request parameter '%s' has invalid" 132 " type %s/value %s") % 133 (param, type(value), value), 134 errors.ECODE_INVAL)
135
136 - def GetRequest(self, cfg):
137 """Gets the request data dict. 138 139 @param cfg: The configuration instance 140 141 """ 142 raise NotImplementedError
143
144 - def ValidateResult(self, ia, result):
145 """Validates the result of an request. 146 147 @param ia: The IAllocator instance 148 @param result: The IAllocator run result 149 @raises ResultValidationError: If validation fails 150 151 """ 152 if ia.success and not self.REQ_RESULT(result): 153 raise errors.ResultValidationError("iallocator returned invalid result," 154 " expected %s, got %s" % 155 (self.REQ_RESULT, result))
156
157 158 -class IAReqInstanceAlloc(IARequestBase):
159 """An instance allocation request. 160 161 """ 162 # pylint: disable=E1101 163 MODE = constants.IALLOCATOR_MODE_ALLOC 164 REQ_PARAMS = [ 165 _INST_NAME, 166 ("memory", ht.TNonNegativeInt), 167 ("spindle_use", ht.TNonNegativeInt), 168 ("disks", ht.TListOf(ht.TDict)), 169 ("disk_template", ht.TString), 170 ("os", ht.TString), 171 ("tags", _STRING_LIST), 172 ("nics", ht.TListOf(ht.TDict)), 173 ("vcpus", ht.TInt), 174 ("hypervisor", ht.TString), 175 ("node_whitelist", ht.TMaybeListOf(ht.TNonEmptyString)), 176 ] 177 REQ_RESULT = ht.TList 178
179 - def RequiredNodes(self):
180 """Calculates the required nodes based on the disk_template. 181 182 """ 183 if self.disk_template in constants.DTS_INT_MIRROR: 184 return 2 185 else: 186 return 1
187
188 - def GetRequest(self, cfg):
189 """Requests a new instance. 190 191 The checks for the completeness of the opcode must have already been 192 done. 193 194 """ 195 disk_space = gmi.ComputeDiskSize(self.disk_template, self.disks) 196 197 return { 198 "name": self.name, 199 "disk_template": self.disk_template, 200 "tags": self.tags, 201 "os": self.os, 202 "vcpus": self.vcpus, 203 "memory": self.memory, 204 "spindle_use": self.spindle_use, 205 "disks": self.disks, 206 "disk_space_total": disk_space, 207 "nics": self.nics, 208 "required_nodes": self.RequiredNodes(), 209 "hypervisor": self.hypervisor, 210 }
211
212 - def ValidateResult(self, ia, result):
213 """Validates an single instance allocation request. 214 215 """ 216 IARequestBase.ValidateResult(self, ia, result) 217 218 if ia.success and len(result) != self.RequiredNodes(): 219 raise errors.ResultValidationError("iallocator returned invalid number" 220 " of nodes (%s), required %s" % 221 (len(result), self.RequiredNodes()))
222
223 224 -class IAReqMultiInstanceAlloc(IARequestBase):
225 """An multi instance allocation request. 226 227 """ 228 # pylint: disable=E1101 229 MODE = constants.IALLOCATOR_MODE_MULTI_ALLOC 230 REQ_PARAMS = [ 231 ("instances", ht.TListOf(ht.TInstanceOf(IAReqInstanceAlloc))), 232 ] 233 _MASUCCESS = \ 234 ht.TListOf(ht.TAnd(ht.TIsLength(2), 235 ht.TItems([ht.TNonEmptyString, 236 ht.TListOf(ht.TNonEmptyString), 237 ]))) 238 _MAFAILED = ht.TListOf(ht.TNonEmptyString) 239 REQ_RESULT = ht.TAnd(ht.TList, ht.TIsLength(2), 240 ht.TItems([_MASUCCESS, _MAFAILED])) 241
242 - def GetRequest(self, cfg):
243 return { 244 "instances": [iareq.GetRequest(cfg) for iareq in self.instances], 245 }
246
247 248 -class IAReqRelocate(IARequestBase):
249 """A relocation request. 250 251 """ 252 # pylint: disable=E1101 253 MODE = constants.IALLOCATOR_MODE_RELOC 254 REQ_PARAMS = [ 255 _INST_UUID, 256 ("relocate_from_node_uuids", _STRING_LIST), 257 ] 258 REQ_RESULT = ht.TList 259
260 - def GetRequest(self, cfg):
261 """Request an relocation of an instance 262 263 The checks for the completeness of the opcode must have already been 264 done. 265 266 """ 267 instance = cfg.GetInstanceInfo(self.inst_uuid) 268 if instance is None: 269 raise errors.ProgrammerError("Unknown instance '%s' passed to" 270 " IAllocator" % self.inst_uuid) 271 272 if instance.disk_template not in constants.DTS_MIRRORED: 273 raise errors.OpPrereqError("Can't relocate non-mirrored instances", 274 errors.ECODE_INVAL) 275 276 if (instance.disk_template in constants.DTS_INT_MIRROR and 277 len(instance.secondary_nodes) != 1): 278 raise errors.OpPrereqError("Instance has not exactly one secondary node", 279 errors.ECODE_STATE) 280 281 disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in instance.disks] 282 disk_space = gmi.ComputeDiskSize(instance.disk_template, disk_sizes) 283 284 return { 285 "name": instance.name, 286 "disk_space_total": disk_space, 287 "required_nodes": 1, 288 "relocate_from": cfg.GetNodeNames(self.relocate_from_node_uuids), 289 }
290
291 - def ValidateResult(self, ia, result):
292 """Validates the result of an relocation request. 293 294 """ 295 IARequestBase.ValidateResult(self, ia, result) 296 297 node2group = dict((name, ndata["group"]) 298 for (name, ndata) in ia.in_data["nodes"].items()) 299 300 fn = compat.partial(self._NodesToGroups, node2group, 301 ia.in_data["nodegroups"]) 302 303 instance = ia.cfg.GetInstanceInfo(self.inst_uuid) 304 request_groups = fn(ia.cfg.GetNodeNames(self.relocate_from_node_uuids) + 305 ia.cfg.GetNodeNames([instance.primary_node])) 306 result_groups = fn(result + ia.cfg.GetNodeNames([instance.primary_node])) 307 308 if ia.success and not set(result_groups).issubset(request_groups): 309 raise errors.ResultValidationError("Groups of nodes returned by" 310 " iallocator (%s) differ from original" 311 " groups (%s)" % 312 (utils.CommaJoin(result_groups), 313 utils.CommaJoin(request_groups)))
314 315 @staticmethod
316 - def _NodesToGroups(node2group, groups, nodes):
317 """Returns a list of unique group names for a list of nodes. 318 319 @type node2group: dict 320 @param node2group: Map from node name to group UUID 321 @type groups: dict 322 @param groups: Group information 323 @type nodes: list 324 @param nodes: Node names 325 326 """ 327 result = set() 328 329 for node in nodes: 330 try: 331 group_uuid = node2group[node] 332 except KeyError: 333 # Ignore unknown node 334 pass 335 else: 336 try: 337 group = groups[group_uuid] 338 except KeyError: 339 # Can't find group, let's use UUID 340 group_name = group_uuid 341 else: 342 group_name = group["name"] 343 344 result.add(group_name) 345 346 return sorted(result)
347
348 349 -class IAReqNodeEvac(IARequestBase):
350 """A node evacuation request. 351 352 """ 353 # pylint: disable=E1101 354 MODE = constants.IALLOCATOR_MODE_NODE_EVAC 355 REQ_PARAMS = [ 356 ("instances", _STRING_LIST), 357 ("evac_mode", ht.TEvacMode), 358 ] 359 REQ_RESULT = _NEVAC_RESULT 360
361 - def GetRequest(self, cfg):
362 """Get data for node-evacuate requests. 363 364 """ 365 return { 366 "instances": self.instances, 367 "evac_mode": self.evac_mode, 368 }
369
370 371 -class IAReqGroupChange(IARequestBase):
372 """A group change request. 373 374 """ 375 # pylint: disable=E1101 376 MODE = constants.IALLOCATOR_MODE_CHG_GROUP 377 REQ_PARAMS = [ 378 ("instances", _STRING_LIST), 379 ("target_groups", _STRING_LIST), 380 ] 381 REQ_RESULT = _NEVAC_RESULT 382
383 - def GetRequest(self, cfg):
384 """Get data for node-evacuate requests. 385 386 """ 387 return { 388 "instances": self.instances, 389 "target_groups": self.target_groups, 390 }
391
392 393 -class IAllocator(object):
394 """IAllocator framework. 395 396 An IAllocator instance has three sets of attributes: 397 - cfg that is needed to query the cluster 398 - input data (all members of the _KEYS class attribute are required) 399 - four buffer attributes (in|out_data|text), that represent the 400 input (to the external script) in text and data structure format, 401 and the output from it, again in two formats 402 - the result variables from the script (success, info, nodes) for 403 easy usage 404 405 """ 406 # pylint: disable=R0902 407 # lots of instance attributes 408
409 - def __init__(self, cfg, rpc_runner, req):
410 self.cfg = cfg 411 self.rpc = rpc_runner 412 self.req = req 413 # init buffer variables 414 self.in_text = self.out_text = self.in_data = self.out_data = None 415 # init result fields 416 self.success = self.info = self.result = None 417 418 self._BuildInputData(req)
419
420 - def _ComputeClusterDataNodeInfo(self, disk_templates, node_list, 421 cluster_info, hypervisor_name):
422 """Prepare and execute node info call. 423 424 @type disk_templates: list of string 425 @param disk_templates: the disk templates of the instances to be allocated 426 @type node_list: list of strings 427 @param node_list: list of nodes' UUIDs 428 @type cluster_info: L{objects.Cluster} 429 @param cluster_info: the cluster's information from the config 430 @type hypervisor_name: string 431 @param hypervisor_name: the hypervisor name 432 @rtype: same as the result of the node info RPC call 433 @return: the result of the node info RPC call 434 435 """ 436 storage_units_raw = utils.storage.GetStorageUnits(self.cfg, disk_templates) 437 storage_units = rpc.PrepareStorageUnitsForNodes(self.cfg, storage_units_raw, 438 node_list) 439 hvspecs = [(hypervisor_name, cluster_info.hvparams[hypervisor_name])] 440 return self.rpc.call_node_info(node_list, storage_units, hvspecs)
441
442 - def _ComputeClusterData(self, disk_template=None):
443 """Compute the generic allocator input data. 444 445 @type disk_template: list of string 446 @param disk_template: the disk templates of the instances to be allocated 447 448 """ 449 cluster_info = self.cfg.GetClusterInfo() 450 # cluster data 451 data = { 452 "version": constants.IALLOCATOR_VERSION, 453 "cluster_name": self.cfg.GetClusterName(), 454 "cluster_tags": list(cluster_info.GetTags()), 455 "enabled_hypervisors": list(cluster_info.enabled_hypervisors), 456 "ipolicy": cluster_info.ipolicy, 457 } 458 ninfo = self.cfg.GetAllNodesInfo() 459 iinfo = self.cfg.GetAllInstancesInfo().values() 460 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo] 461 462 # node data 463 node_list = [n.uuid for n in ninfo.values() if n.vm_capable] 464 465 if isinstance(self.req, IAReqInstanceAlloc): 466 hypervisor_name = self.req.hypervisor 467 node_whitelist = self.req.node_whitelist 468 elif isinstance(self.req, IAReqRelocate): 469 hypervisor_name = self.cfg.GetInstanceInfo(self.req.inst_uuid).hypervisor 470 node_whitelist = None 471 else: 472 hypervisor_name = cluster_info.primary_hypervisor 473 node_whitelist = None 474 475 if not disk_template: 476 disk_template = cluster_info.enabled_disk_templates[0] 477 478 node_data = self._ComputeClusterDataNodeInfo([disk_template], node_list, 479 cluster_info, hypervisor_name) 480 481 node_iinfo = \ 482 self.rpc.call_all_instances_info(node_list, 483 cluster_info.enabled_hypervisors, 484 cluster_info.hvparams) 485 486 data["nodegroups"] = self._ComputeNodeGroupData(self.cfg) 487 488 config_ndata = self._ComputeBasicNodeData(self.cfg, ninfo, node_whitelist) 489 data["nodes"] = self._ComputeDynamicNodeData( 490 ninfo, node_data, node_iinfo, i_list, config_ndata, disk_template) 491 assert len(data["nodes"]) == len(ninfo), \ 492 "Incomplete node data computed" 493 494 data["instances"] = self._ComputeInstanceData(self.cfg, cluster_info, 495 i_list) 496 497 self.in_data = data
498 499 @staticmethod
500 - def _ComputeNodeGroupData(cfg):
501 """Compute node groups data. 502 503 """ 504 cluster = cfg.GetClusterInfo() 505 ng = dict((guuid, { 506 "name": gdata.name, 507 "alloc_policy": gdata.alloc_policy, 508 "networks": [net_uuid for net_uuid, _ in gdata.networks.items()], 509 "ipolicy": gmi.CalculateGroupIPolicy(cluster, gdata), 510 "tags": list(gdata.GetTags()), 511 }) 512 for guuid, gdata in cfg.GetAllNodeGroupsInfo().items()) 513 514 return ng
515 516 @staticmethod
517 - def _ComputeBasicNodeData(cfg, node_cfg, node_whitelist):
518 """Compute global node data. 519 520 @rtype: dict 521 @returns: a dict of name: (node dict, node config) 522 523 """ 524 # fill in static (config-based) values 525 node_results = dict((ninfo.name, { 526 "tags": list(ninfo.GetTags()), 527 "primary_ip": ninfo.primary_ip, 528 "secondary_ip": ninfo.secondary_ip, 529 "offline": (ninfo.offline or 530 not (node_whitelist is None or 531 ninfo.name in node_whitelist)), 532 "drained": ninfo.drained, 533 "master_candidate": ninfo.master_candidate, 534 "group": ninfo.group, 535 "master_capable": ninfo.master_capable, 536 "vm_capable": ninfo.vm_capable, 537 "ndparams": cfg.GetNdParams(ninfo), 538 }) 539 for ninfo in node_cfg.values()) 540 541 return node_results
542 543 @staticmethod
544 - def _GetAttributeFromHypervisorNodeData(hv_info, node_name, attr):
545 """Extract an attribute from the hypervisor's node information. 546 547 This is a helper function to extract data from the hypervisor's information 548 about the node, as part of the result of a node_info query. 549 550 @type hv_info: dict of strings 551 @param hv_info: dictionary of node information from the hypervisor 552 @type node_name: string 553 @param node_name: name of the node 554 @type attr: string 555 @param attr: key of the attribute in the hv_info dictionary 556 @rtype: integer 557 @return: the value of the attribute 558 @raises errors.OpExecError: if key not in dictionary or value not 559 integer 560 561 """ 562 if attr not in hv_info: 563 raise errors.OpExecError("Node '%s' didn't return attribute" 564 " '%s'" % (node_name, attr)) 565 value = hv_info[attr] 566 if not isinstance(value, int): 567 raise errors.OpExecError("Node '%s' returned invalid value" 568 " for '%s': %s" % 569 (node_name, attr, value)) 570 return value
571 572 @staticmethod
573 - def _ComputeStorageDataFromSpaceInfoByTemplate( 574 space_info, node_name, disk_template):
575 """Extract storage data from node info. 576 577 @type space_info: see result of the RPC call node info 578 @param space_info: the storage reporting part of the result of the RPC call 579 node info 580 @type node_name: string 581 @param node_name: the node's name 582 @type disk_template: string 583 @param disk_template: the disk template to report space for 584 @rtype: 4-tuple of integers 585 @return: tuple of storage info (total_disk, free_disk, total_spindles, 586 free_spindles) 587 588 """ 589 storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template] 590 if storage_type not in constants.STS_REPORT: 591 total_disk = total_spindles = 0 592 free_disk = free_spindles = 0 593 else: 594 template_space_info = utils.storage.LookupSpaceInfoByDiskTemplate( 595 space_info, disk_template) 596 if not template_space_info: 597 raise errors.OpExecError("Node '%s' didn't return space info for disk" 598 "template '%s'" % (node_name, disk_template)) 599 total_disk = template_space_info["storage_size"] 600 free_disk = template_space_info["storage_free"] 601 602 total_spindles = 0 603 free_spindles = 0 604 if disk_template in constants.DTS_LVM: 605 lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType( 606 space_info, constants.ST_LVM_PV) 607 if lvm_pv_info: 608 total_spindles = lvm_pv_info["storage_size"] 609 free_spindles = lvm_pv_info["storage_free"] 610 return (total_disk, free_disk, total_spindles, free_spindles)
611 612 @staticmethod
613 - def _ComputeStorageDataFromSpaceInfo(space_info, node_name, has_lvm):
614 """Extract storage data from node info. 615 616 @type space_info: see result of the RPC call node info 617 @param space_info: the storage reporting part of the result of the RPC call 618 node info 619 @type node_name: string 620 @param node_name: the node's name 621 @type has_lvm: boolean 622 @param has_lvm: whether or not LVM storage information is requested 623 @rtype: 4-tuple of integers 624 @return: tuple of storage info (total_disk, free_disk, total_spindles, 625 free_spindles) 626 627 """ 628 # TODO: replace this with proper storage reporting 629 if has_lvm: 630 lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType( 631 space_info, constants.ST_LVM_VG) 632 if not lvm_vg_info: 633 raise errors.OpExecError("Node '%s' didn't return LVM vg space info." 634 % (node_name)) 635 total_disk = lvm_vg_info["storage_size"] 636 free_disk = lvm_vg_info["storage_free"] 637 lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType( 638 space_info, constants.ST_LVM_PV) 639 if not lvm_pv_info: 640 raise errors.OpExecError("Node '%s' didn't return LVM pv space info." 641 % (node_name)) 642 total_spindles = lvm_pv_info["storage_size"] 643 free_spindles = lvm_pv_info["storage_free"] 644 else: 645 # we didn't even ask the node for VG status, so use zeros 646 total_disk = free_disk = 0 647 total_spindles = free_spindles = 0 648 return (total_disk, free_disk, total_spindles, free_spindles)
649 650 @staticmethod
651 - def _ComputeInstanceMemory(instance_list, node_instances_info, node_uuid, 652 input_mem_free):
653 """Compute memory used by primary instances. 654 655 @rtype: tuple (int, int, int) 656 @returns: A tuple of three integers: 1. the sum of memory used by primary 657 instances on the node (including the ones that are currently down), 2. 658 the sum of memory used by primary instances of the node that are up, 3. 659 the amount of memory that is free on the node considering the current 660 usage of the instances. 661 662 """ 663 i_p_mem = i_p_up_mem = 0 664 mem_free = input_mem_free 665 for iinfo, beinfo in instance_list: 666 if iinfo.primary_node == node_uuid: 667 i_p_mem += beinfo[constants.BE_MAXMEM] 668 if iinfo.name not in node_instances_info[node_uuid].payload: 669 i_used_mem = 0 670 else: 671 i_used_mem = int(node_instances_info[node_uuid] 672 .payload[iinfo.name]["memory"]) 673 i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem 674 mem_free -= max(0, i_mem_diff) 675 676 if iinfo.admin_state == constants.ADMINST_UP: 677 i_p_up_mem += beinfo[constants.BE_MAXMEM] 678 return (i_p_mem, i_p_up_mem, mem_free)
679
680 - def _ComputeDynamicNodeData(self, node_cfg, node_data, node_iinfo, i_list, 681 node_results, disk_template):
682 """Compute global node data. 683 684 @param node_results: the basic node structures as filled from the config 685 686 """ 687 #TODO(dynmem): compute the right data on MAX and MIN memory 688 # make a copy of the current dict 689 node_results = dict(node_results) 690 for nuuid, nresult in node_data.items(): 691 ninfo = node_cfg[nuuid] 692 assert ninfo.name in node_results, "Missing basic data for node %s" % \ 693 ninfo.name 694 695 if not ninfo.offline: 696 nresult.Raise("Can't get data for node %s" % ninfo.name) 697 node_iinfo[nuuid].Raise("Can't get node instance info from node %s" % 698 ninfo.name) 699 (_, space_info, (hv_info, )) = nresult.payload 700 701 mem_free = self._GetAttributeFromHypervisorNodeData(hv_info, ninfo.name, 702 "memory_free") 703 704 (i_p_mem, i_p_up_mem, mem_free) = self._ComputeInstanceMemory( 705 i_list, node_iinfo, nuuid, mem_free) 706 (total_disk, free_disk, total_spindles, free_spindles) = \ 707 self._ComputeStorageDataFromSpaceInfoByTemplate( 708 space_info, ninfo.name, disk_template) 709 710 # compute memory used by instances 711 pnr_dyn = { 712 "total_memory": self._GetAttributeFromHypervisorNodeData( 713 hv_info, ninfo.name, "memory_total"), 714 "reserved_memory": self._GetAttributeFromHypervisorNodeData( 715 hv_info, ninfo.name, "memory_dom0"), 716 "free_memory": mem_free, 717 "total_disk": total_disk, 718 "free_disk": free_disk, 719 "total_spindles": total_spindles, 720 "free_spindles": free_spindles, 721 "total_cpus": self._GetAttributeFromHypervisorNodeData( 722 hv_info, ninfo.name, "cpu_total"), 723 "reserved_cpus": self._GetAttributeFromHypervisorNodeData( 724 hv_info, ninfo.name, "cpu_dom0"), 725 "i_pri_memory": i_p_mem, 726 "i_pri_up_memory": i_p_up_mem, 727 } 728 pnr_dyn.update(node_results[ninfo.name]) 729 node_results[ninfo.name] = pnr_dyn 730 731 return node_results
732 733 @staticmethod
734 - def _ComputeInstanceData(cfg, cluster_info, i_list):
735 """Compute global instance data. 736 737 """ 738 instance_data = {} 739 for iinfo, beinfo in i_list: 740 nic_data = [] 741 for nic in iinfo.nics: 742 filled_params = cluster_info.SimpleFillNIC(nic.nicparams) 743 nic_dict = { 744 "mac": nic.mac, 745 "ip": nic.ip, 746 "mode": filled_params[constants.NIC_MODE], 747 "link": filled_params[constants.NIC_LINK], 748 } 749 if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED: 750 nic_dict["bridge"] = filled_params[constants.NIC_LINK] 751 nic_data.append(nic_dict) 752 pir = { 753 "tags": list(iinfo.GetTags()), 754 "admin_state": iinfo.admin_state, 755 "vcpus": beinfo[constants.BE_VCPUS], 756 "memory": beinfo[constants.BE_MAXMEM], 757 "spindle_use": beinfo[constants.BE_SPINDLE_USE], 758 "os": iinfo.os, 759 "nodes": [cfg.GetNodeName(iinfo.primary_node)] + 760 cfg.GetNodeNames(iinfo.secondary_nodes), 761 "nics": nic_data, 762 "disks": [{constants.IDISK_SIZE: dsk.size, 763 constants.IDISK_MODE: dsk.mode, 764 constants.IDISK_SPINDLES: dsk.spindles} 765 for dsk in iinfo.disks], 766 "disk_template": iinfo.disk_template, 767 "disks_active": iinfo.disks_active, 768 "hypervisor": iinfo.hypervisor, 769 } 770 pir["disk_space_total"] = gmi.ComputeDiskSize(iinfo.disk_template, 771 pir["disks"]) 772 instance_data[iinfo.name] = pir 773 774 return instance_data
775
776 - def _BuildInputData(self, req):
777 """Build input data structures. 778 779 """ 780 request = req.GetRequest(self.cfg) 781 disk_template = None 782 if "disk_template" in request: 783 disk_template = request["disk_template"] 784 self._ComputeClusterData(disk_template=disk_template) 785 786 request["type"] = req.MODE 787 self.in_data["request"] = request 788 789 self.in_text = serializer.Dump(self.in_data)
790
791 - def Run(self, name, validate=True, call_fn=None):
792 """Run an instance allocator and return the results. 793 794 """ 795 if call_fn is None: 796 call_fn = self.rpc.call_iallocator_runner 797 798 ial_params = self.cfg.GetDefaultIAllocatorParameters() 799 800 result = call_fn(self.cfg.GetMasterNode(), name, self.in_text, ial_params) 801 result.Raise("Failure while running the iallocator script") 802 803 self.out_text = result.payload 804 if validate: 805 self._ValidateResult()
806
807 - def _ValidateResult(self):
808 """Process the allocator results. 809 810 This will process and if successful save the result in 811 self.out_data and the other parameters. 812 813 """ 814 try: 815 rdict = serializer.Load(self.out_text) 816 except Exception, err: 817 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err)) 818 819 if not isinstance(rdict, dict): 820 raise errors.OpExecError("Can't parse iallocator results: not a dict") 821 822 # TODO: remove backwards compatiblity in later versions 823 if "nodes" in rdict and "result" not in rdict: 824 rdict["result"] = rdict["nodes"] 825 del rdict["nodes"] 826 827 for key in "success", "info", "result": 828 if key not in rdict: 829 raise errors.OpExecError("Can't parse iallocator results:" 830 " missing key '%s'" % key) 831 setattr(self, key, rdict[key]) 832 833 self.req.ValidateResult(self, self.result) 834 self.out_data = rdict
835