Package ganeti :: Package masterd :: Module iallocator
[hide private]
[frames] | no frames]

Source Code for Module ganeti.masterd.iallocator

  1  # 
  2  # 
  3   
  4  # Copyright (C) 2012, 2013 Google Inc. 
  5  # All rights reserved. 
  6  # 
  7  # Redistribution and use in source and binary forms, with or without 
  8  # modification, are permitted provided that the following conditions are 
  9  # met: 
 10  # 
 11  # 1. Redistributions of source code must retain the above copyright notice, 
 12  # this list of conditions and the following disclaimer. 
 13  # 
 14  # 2. Redistributions in binary form must reproduce the above copyright 
 15  # notice, this list of conditions and the following disclaimer in the 
 16  # documentation and/or other materials provided with the distribution. 
 17  # 
 18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
 19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
 20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
 21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
 22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
 23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
 24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
 25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
 26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
 27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
 28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 29   
 30   
 31  """Module implementing the iallocator code.""" 
 32   
 33  from ganeti import compat 
 34  from ganeti import constants 
 35  from ganeti import errors 
 36  from ganeti import ht 
 37  from ganeti import outils 
 38  from ganeti import opcodes 
 39  import ganeti.rpc.node as rpc 
 40  from ganeti import serializer 
 41  from ganeti import utils 
 42   
 43  import ganeti.masterd.instance as gmi 
 44   
 45   
 46  _STRING_LIST = ht.TListOf(ht.TString) 
 47  _JOB_LIST = ht.TListOf(ht.TListOf(ht.TStrictDict(True, False, { 
 48     # pylint: disable=E1101 
 49     # Class '...' has no 'OP_ID' member 
 50     "OP_ID": ht.TElemOf([opcodes.OpInstanceFailover.OP_ID, 
 51                          opcodes.OpInstanceMigrate.OP_ID, 
 52                          opcodes.OpInstanceReplaceDisks.OP_ID]), 
 53     }))) 
 54   
 55  _NEVAC_MOVED = \ 
 56    ht.TListOf(ht.TAnd(ht.TIsLength(3), 
 57                       ht.TItems([ht.TNonEmptyString, 
 58                                  ht.TNonEmptyString, 
 59                                  ht.TListOf(ht.TNonEmptyString), 
 60                                  ]))) 
 61  _NEVAC_FAILED = \ 
 62    ht.TListOf(ht.TAnd(ht.TIsLength(2), 
 63                       ht.TItems([ht.TNonEmptyString, 
 64                                  ht.TMaybeString, 
 65                                  ]))) 
 66  _NEVAC_RESULT = ht.TAnd(ht.TIsLength(3), 
 67                          ht.TItems([_NEVAC_MOVED, _NEVAC_FAILED, _JOB_LIST])) 
 68   
 69  _INST_NAME = ("name", ht.TNonEmptyString) 
 70  _INST_UUID = ("inst_uuid", ht.TNonEmptyString) 
71 72 73 -class _AutoReqParam(outils.AutoSlots):
74 """Meta class for request definitions. 75 76 """ 77 @classmethod
78 - def _GetSlots(mcs, attrs):
79 """Extract the slots out of REQ_PARAMS. 80 81 """ 82 params = attrs.setdefault("REQ_PARAMS", []) 83 return [slot for (slot, _) in params]
84
85 86 -class IARequestBase(outils.ValidatedSlots):
87 """A generic IAllocator request object. 88 89 """ 90 __metaclass__ = _AutoReqParam 91 92 MODE = NotImplemented 93 REQ_PARAMS = [] 94 REQ_RESULT = NotImplemented 95
96 - def __init__(self, **kwargs):
97 """Constructor for IARequestBase. 98 99 The constructor takes only keyword arguments and will set 100 attributes on this object based on the passed arguments. As such, 101 it means that you should not pass arguments which are not in the 102 REQ_PARAMS attribute for this class. 103 104 """ 105 outils.ValidatedSlots.__init__(self, **kwargs) 106 107 self.Validate()
108
109 - def Validate(self):
110 """Validates all parameters of the request. 111 112 113 This method returns L{None} if the validation succeeds, or raises 114 an exception otherwise. 115 116 @rtype: NoneType 117 @return: L{None}, if the validation succeeds 118 119 @raise Exception: validation fails 120 121 """ 122 assert self.MODE in constants.VALID_IALLOCATOR_MODES 123 124 for (param, validator) in self.REQ_PARAMS: 125 if not hasattr(self, param): 126 raise errors.OpPrereqError("Request is missing '%s' parameter" % param, 127 errors.ECODE_INVAL) 128 129 value = getattr(self, param) 130 if not validator(value): 131 raise errors.OpPrereqError(("Request parameter '%s' has invalid" 132 " type %s/value %s") % 133 (param, type(value), value), 134 errors.ECODE_INVAL)
135
136 - def GetRequest(self, cfg):
137 """Gets the request data dict. 138 139 @param cfg: The configuration instance 140 141 """ 142 raise NotImplementedError
143
144 - def ValidateResult(self, ia, result):
145 """Validates the result of an request. 146 147 @param ia: The IAllocator instance 148 @param result: The IAllocator run result 149 @raises ResultValidationError: If validation fails 150 151 """ 152 if ia.success and not self.REQ_RESULT(result): 153 raise errors.ResultValidationError("iallocator returned invalid result," 154 " expected %s, got %s" % 155 (self.REQ_RESULT, result))
156
157 158 -class IAReqInstanceAlloc(IARequestBase):
159 """An instance allocation request. 160 161 """ 162 # pylint: disable=E1101 163 MODE = constants.IALLOCATOR_MODE_ALLOC 164 REQ_PARAMS = [ 165 _INST_NAME, 166 ("memory", ht.TNonNegativeInt), 167 ("spindle_use", ht.TNonNegativeInt), 168 ("disks", ht.TListOf(ht.TDict)), 169 ("disk_template", ht.TString), 170 ("os", ht.TString), 171 ("tags", _STRING_LIST), 172 ("nics", ht.TListOf(ht.TDict)), 173 ("vcpus", ht.TInt), 174 ("hypervisor", ht.TString), 175 ("node_whitelist", ht.TMaybeListOf(ht.TNonEmptyString)), 176 ] 177 REQ_RESULT = ht.TList 178
179 - def RequiredNodes(self):
180 """Calculates the required nodes based on the disk_template. 181 182 """ 183 if self.disk_template in constants.DTS_INT_MIRROR: 184 return 2 185 else: 186 return 1
187
188 - def GetRequest(self, cfg):
189 """Requests a new instance. 190 191 The checks for the completeness of the opcode must have already been 192 done. 193 194 """ 195 disk_space = gmi.ComputeDiskSize(self.disk_template, self.disks) 196 197 return { 198 "name": self.name, 199 "disk_template": self.disk_template, 200 "tags": self.tags, 201 "os": self.os, 202 "vcpus": self.vcpus, 203 "memory": self.memory, 204 "spindle_use": self.spindle_use, 205 "disks": self.disks, 206 "disk_space_total": disk_space, 207 "nics": self.nics, 208 "required_nodes": self.RequiredNodes(), 209 "hypervisor": self.hypervisor, 210 }
211
212 - def ValidateResult(self, ia, result):
213 """Validates an single instance allocation request. 214 215 """ 216 IARequestBase.ValidateResult(self, ia, result) 217 218 if ia.success and len(result) != self.RequiredNodes(): 219 raise errors.ResultValidationError("iallocator returned invalid number" 220 " of nodes (%s), required %s" % 221 (len(result), self.RequiredNodes()))
222
223 224 -class IAReqMultiInstanceAlloc(IARequestBase):
225 """An multi instance allocation request. 226 227 """ 228 # pylint: disable=E1101 229 MODE = constants.IALLOCATOR_MODE_MULTI_ALLOC 230 REQ_PARAMS = [ 231 ("instances", ht.TListOf(ht.TInstanceOf(IAReqInstanceAlloc))), 232 ] 233 _MASUCCESS = \ 234 ht.TListOf(ht.TAnd(ht.TIsLength(2), 235 ht.TItems([ht.TNonEmptyString, 236 ht.TListOf(ht.TNonEmptyString), 237 ]))) 238 _MAFAILED = ht.TListOf(ht.TNonEmptyString) 239 REQ_RESULT = ht.TAnd(ht.TList, ht.TIsLength(2), 240 ht.TItems([_MASUCCESS, _MAFAILED])) 241
242 - def GetRequest(self, cfg):
243 return { 244 "instances": [iareq.GetRequest(cfg) for iareq in self.instances], 245 }
246
247 248 -class IAReqRelocate(IARequestBase):
249 """A relocation request. 250 251 """ 252 # pylint: disable=E1101 253 MODE = constants.IALLOCATOR_MODE_RELOC 254 REQ_PARAMS = [ 255 _INST_UUID, 256 ("relocate_from_node_uuids", _STRING_LIST), 257 ] 258 REQ_RESULT = ht.TList 259
260 - def GetRequest(self, cfg):
261 """Request an relocation of an instance 262 263 The checks for the completeness of the opcode must have already been 264 done. 265 266 """ 267 instance = cfg.GetInstanceInfo(self.inst_uuid) 268 if instance is None: 269 raise errors.ProgrammerError("Unknown instance '%s' passed to" 270 " IAllocator" % self.inst_uuid) 271 272 if instance.disk_template not in constants.DTS_MIRRORED: 273 raise errors.OpPrereqError("Can't relocate non-mirrored instances", 274 errors.ECODE_INVAL) 275 276 secondary_nodes = cfg.GetInstanceSecondaryNodes(instance.uuid) 277 if (instance.disk_template in constants.DTS_INT_MIRROR and 278 len(secondary_nodes) != 1): 279 raise errors.OpPrereqError("Instance has not exactly one secondary node", 280 errors.ECODE_STATE) 281 282 inst_disks = cfg.GetInstanceDisks(instance.uuid) 283 disk_sizes = [{constants.IDISK_SIZE: disk.size} for disk in inst_disks] 284 disk_space = gmi.ComputeDiskSize(instance.disk_template, disk_sizes) 285 286 return { 287 "name": instance.name, 288 "disk_space_total": disk_space, 289 "required_nodes": 1, 290 "relocate_from": cfg.GetNodeNames(self.relocate_from_node_uuids), 291 }
292
293 - def ValidateResult(self, ia, result):
294 """Validates the result of an relocation request. 295 296 """ 297 IARequestBase.ValidateResult(self, ia, result) 298 299 node2group = dict((name, ndata["group"]) 300 for (name, ndata) in ia.in_data["nodes"].items()) 301 302 fn = compat.partial(self._NodesToGroups, node2group, 303 ia.in_data["nodegroups"]) 304 305 instance = ia.cfg.GetInstanceInfo(self.inst_uuid) 306 request_groups = fn(ia.cfg.GetNodeNames(self.relocate_from_node_uuids) + 307 ia.cfg.GetNodeNames([instance.primary_node])) 308 result_groups = fn(result + ia.cfg.GetNodeNames([instance.primary_node])) 309 310 if ia.success and not set(result_groups).issubset(request_groups): 311 raise errors.ResultValidationError("Groups of nodes returned by" 312 " iallocator (%s) differ from original" 313 " groups (%s)" % 314 (utils.CommaJoin(result_groups), 315 utils.CommaJoin(request_groups)))
316 317 @staticmethod
318 - def _NodesToGroups(node2group, groups, nodes):
319 """Returns a list of unique group names for a list of nodes. 320 321 @type node2group: dict 322 @param node2group: Map from node name to group UUID 323 @type groups: dict 324 @param groups: Group information 325 @type nodes: list 326 @param nodes: Node names 327 328 """ 329 result = set() 330 331 for node in nodes: 332 try: 333 group_uuid = node2group[node] 334 except KeyError: 335 # Ignore unknown node 336 pass 337 else: 338 try: 339 group = groups[group_uuid] 340 except KeyError: 341 # Can't find group, let's use UUID 342 group_name = group_uuid 343 else: 344 group_name = group["name"] 345 346 result.add(group_name) 347 348 return sorted(result)
349
350 351 -class IAReqNodeEvac(IARequestBase):
352 """A node evacuation request. 353 354 """ 355 # pylint: disable=E1101 356 MODE = constants.IALLOCATOR_MODE_NODE_EVAC 357 REQ_PARAMS = [ 358 ("instances", _STRING_LIST), 359 ("evac_mode", ht.TEvacMode), 360 ] 361 REQ_RESULT = _NEVAC_RESULT 362
363 - def GetRequest(self, cfg):
364 """Get data for node-evacuate requests. 365 366 """ 367 return { 368 "instances": self.instances, 369 "evac_mode": self.evac_mode, 370 }
371
372 373 -class IAReqGroupChange(IARequestBase):
374 """A group change request. 375 376 """ 377 # pylint: disable=E1101 378 MODE = constants.IALLOCATOR_MODE_CHG_GROUP 379 REQ_PARAMS = [ 380 ("instances", _STRING_LIST), 381 ("target_groups", _STRING_LIST), 382 ] 383 REQ_RESULT = _NEVAC_RESULT 384
385 - def GetRequest(self, cfg):
386 """Get data for node-evacuate requests. 387 388 """ 389 return { 390 "instances": self.instances, 391 "target_groups": self.target_groups, 392 }
393
394 395 -class IAllocator(object):
396 """IAllocator framework. 397 398 An IAllocator instance has three sets of attributes: 399 - cfg that is needed to query the cluster 400 - input data (all members of the _KEYS class attribute are required) 401 - four buffer attributes (in|out_data|text), that represent the 402 input (to the external script) in text and data structure format, 403 and the output from it, again in two formats 404 - the result variables from the script (success, info, nodes) for 405 easy usage 406 407 """ 408 # pylint: disable=R0902 409 # lots of instance attributes 410
411 - def __init__(self, cfg, rpc_runner, req):
412 self.cfg = cfg 413 self.rpc = rpc_runner 414 self.req = req 415 # init buffer variables 416 self.in_text = self.out_text = self.in_data = self.out_data = None 417 # init result fields 418 self.success = self.info = self.result = None 419 420 self._BuildInputData(req)
421
422 - def _ComputeClusterDataNodeInfo(self, disk_templates, node_list, 423 cluster_info, hypervisor_name):
424 """Prepare and execute node info call. 425 426 @type disk_templates: list of string 427 @param disk_templates: the disk templates of the instances to be allocated 428 @type node_list: list of strings 429 @param node_list: list of nodes' UUIDs 430 @type cluster_info: L{objects.Cluster} 431 @param cluster_info: the cluster's information from the config 432 @type hypervisor_name: string 433 @param hypervisor_name: the hypervisor name 434 @rtype: same as the result of the node info RPC call 435 @return: the result of the node info RPC call 436 437 """ 438 storage_units_raw = utils.storage.GetStorageUnits(self.cfg, disk_templates) 439 storage_units = rpc.PrepareStorageUnitsForNodes(self.cfg, storage_units_raw, 440 node_list) 441 hvspecs = [(hypervisor_name, cluster_info.hvparams[hypervisor_name])] 442 return self.rpc.call_node_info(node_list, storage_units, hvspecs)
443
444 - def _ComputeClusterData(self, disk_template=None):
445 """Compute the generic allocator input data. 446 447 @type disk_template: list of string 448 @param disk_template: the disk templates of the instances to be allocated 449 450 """ 451 cfg = self.cfg.GetDetachedConfig() 452 cluster_info = cfg.GetClusterInfo() 453 # cluster data 454 data = { 455 "version": constants.IALLOCATOR_VERSION, 456 "cluster_name": cluster_info.cluster_name, 457 "cluster_tags": list(cluster_info.GetTags()), 458 "enabled_hypervisors": list(cluster_info.enabled_hypervisors), 459 "ipolicy": cluster_info.ipolicy, 460 } 461 ginfo = cfg.GetAllNodeGroupsInfo() 462 ninfo = cfg.GetAllNodesInfo() 463 iinfo = cfg.GetAllInstancesInfo() 464 i_list = [(inst, cluster_info.FillBE(inst)) for inst in iinfo.values()] 465 466 # node data 467 node_list = [n.uuid for n in ninfo.values() if n.vm_capable] 468 469 if isinstance(self.req, IAReqInstanceAlloc): 470 hypervisor_name = self.req.hypervisor 471 node_whitelist = self.req.node_whitelist 472 elif isinstance(self.req, IAReqRelocate): 473 hypervisor_name = iinfo[self.req.inst_uuid].hypervisor 474 node_whitelist = None 475 else: 476 hypervisor_name = cluster_info.primary_hypervisor 477 node_whitelist = None 478 479 if not disk_template: 480 disk_template = cluster_info.enabled_disk_templates[0] 481 482 node_data = self._ComputeClusterDataNodeInfo([disk_template], node_list, 483 cluster_info, hypervisor_name) 484 485 node_iinfo = \ 486 self.rpc.call_all_instances_info(node_list, 487 cluster_info.enabled_hypervisors, 488 cluster_info.hvparams) 489 490 data["nodegroups"] = self._ComputeNodeGroupData(cluster_info, ginfo) 491 492 config_ndata = self._ComputeBasicNodeData(cfg, ninfo, node_whitelist) 493 data["nodes"] = self._ComputeDynamicNodeData( 494 ninfo, node_data, node_iinfo, i_list, config_ndata, disk_template) 495 assert len(data["nodes"]) == len(ninfo), \ 496 "Incomplete node data computed" 497 498 data["instances"] = self._ComputeInstanceData(cfg, cluster_info, i_list) 499 500 self.in_data = data
501 502 @staticmethod
503 - def _ComputeNodeGroupData(cluster, ginfo):
504 """Compute node groups data. 505 506 """ 507 ng = dict((guuid, { 508 "name": gdata.name, 509 "alloc_policy": gdata.alloc_policy, 510 "networks": [net_uuid for net_uuid, _ in gdata.networks.items()], 511 "ipolicy": gmi.CalculateGroupIPolicy(cluster, gdata), 512 "tags": list(gdata.GetTags()), 513 }) 514 for guuid, gdata in ginfo.items()) 515 516 return ng
517 518 @staticmethod
519 - def _ComputeBasicNodeData(cfg, node_cfg, node_whitelist):
520 """Compute global node data. 521 522 @rtype: dict 523 @returns: a dict of name: (node dict, node config) 524 525 """ 526 # fill in static (config-based) values 527 node_results = dict((ninfo.name, { 528 "tags": list(ninfo.GetTags()), 529 "primary_ip": ninfo.primary_ip, 530 "secondary_ip": ninfo.secondary_ip, 531 "offline": (ninfo.offline or 532 not (node_whitelist is None or 533 ninfo.name in node_whitelist)), 534 "drained": ninfo.drained, 535 "master_candidate": ninfo.master_candidate, 536 "group": ninfo.group, 537 "master_capable": ninfo.master_capable, 538 "vm_capable": ninfo.vm_capable, 539 "ndparams": cfg.GetNdParams(ninfo), 540 }) 541 for ninfo in node_cfg.values()) 542 543 return node_results
544 545 @staticmethod
546 - def _GetAttributeFromHypervisorNodeData(hv_info, node_name, attr):
547 """Extract an attribute from the hypervisor's node information. 548 549 This is a helper function to extract data from the hypervisor's information 550 about the node, as part of the result of a node_info query. 551 552 @type hv_info: dict of strings 553 @param hv_info: dictionary of node information from the hypervisor 554 @type node_name: string 555 @param node_name: name of the node 556 @type attr: string 557 @param attr: key of the attribute in the hv_info dictionary 558 @rtype: integer 559 @return: the value of the attribute 560 @raises errors.OpExecError: if key not in dictionary or value not 561 integer 562 563 """ 564 if attr not in hv_info: 565 raise errors.OpExecError("Node '%s' didn't return attribute" 566 " '%s'" % (node_name, attr)) 567 value = hv_info[attr] 568 if not isinstance(value, int): 569 raise errors.OpExecError("Node '%s' returned invalid value" 570 " for '%s': %s" % 571 (node_name, attr, value)) 572 return value
573 574 @staticmethod
575 - def _ComputeStorageDataFromSpaceInfoByTemplate( 576 space_info, node_name, disk_template):
577 """Extract storage data from node info. 578 579 @type space_info: see result of the RPC call node info 580 @param space_info: the storage reporting part of the result of the RPC call 581 node info 582 @type node_name: string 583 @param node_name: the node's name 584 @type disk_template: string 585 @param disk_template: the disk template to report space for 586 @rtype: 4-tuple of integers 587 @return: tuple of storage info (total_disk, free_disk, total_spindles, 588 free_spindles) 589 590 """ 591 storage_type = constants.MAP_DISK_TEMPLATE_STORAGE_TYPE[disk_template] 592 if storage_type not in constants.STS_REPORT: 593 total_disk = total_spindles = 0 594 free_disk = free_spindles = 0 595 else: 596 template_space_info = utils.storage.LookupSpaceInfoByDiskTemplate( 597 space_info, disk_template) 598 if not template_space_info: 599 raise errors.OpExecError("Node '%s' didn't return space info for disk" 600 "template '%s'" % (node_name, disk_template)) 601 total_disk = template_space_info["storage_size"] 602 free_disk = template_space_info["storage_free"] 603 604 total_spindles = 0 605 free_spindles = 0 606 if disk_template in constants.DTS_LVM: 607 lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType( 608 space_info, constants.ST_LVM_PV) 609 if lvm_pv_info: 610 total_spindles = lvm_pv_info["storage_size"] 611 free_spindles = lvm_pv_info["storage_free"] 612 return (total_disk, free_disk, total_spindles, free_spindles)
613 614 @staticmethod
615 - def _ComputeStorageDataFromSpaceInfo(space_info, node_name, has_lvm):
616 """Extract storage data from node info. 617 618 @type space_info: see result of the RPC call node info 619 @param space_info: the storage reporting part of the result of the RPC call 620 node info 621 @type node_name: string 622 @param node_name: the node's name 623 @type has_lvm: boolean 624 @param has_lvm: whether or not LVM storage information is requested 625 @rtype: 4-tuple of integers 626 @return: tuple of storage info (total_disk, free_disk, total_spindles, 627 free_spindles) 628 629 """ 630 # TODO: replace this with proper storage reporting 631 if has_lvm: 632 lvm_vg_info = utils.storage.LookupSpaceInfoByStorageType( 633 space_info, constants.ST_LVM_VG) 634 if not lvm_vg_info: 635 raise errors.OpExecError("Node '%s' didn't return LVM vg space info." 636 % (node_name)) 637 total_disk = lvm_vg_info["storage_size"] 638 free_disk = lvm_vg_info["storage_free"] 639 lvm_pv_info = utils.storage.LookupSpaceInfoByStorageType( 640 space_info, constants.ST_LVM_PV) 641 if not lvm_pv_info: 642 raise errors.OpExecError("Node '%s' didn't return LVM pv space info." 643 % (node_name)) 644 total_spindles = lvm_pv_info["storage_size"] 645 free_spindles = lvm_pv_info["storage_free"] 646 else: 647 # we didn't even ask the node for VG status, so use zeros 648 total_disk = free_disk = 0 649 total_spindles = free_spindles = 0 650 return (total_disk, free_disk, total_spindles, free_spindles)
651 652 @staticmethod
653 - def _ComputeInstanceMemory(instance_list, node_instances_info, node_uuid, 654 input_mem_free):
655 """Compute memory used by primary instances. 656 657 @rtype: tuple (int, int, int) 658 @returns: A tuple of three integers: 1. the sum of memory used by primary 659 instances on the node (including the ones that are currently down), 2. 660 the sum of memory used by primary instances of the node that are up, 3. 661 the amount of memory that is free on the node considering the current 662 usage of the instances. 663 664 """ 665 i_p_mem = i_p_up_mem = 0 666 mem_free = input_mem_free 667 for iinfo, beinfo in instance_list: 668 if iinfo.primary_node == node_uuid: 669 i_p_mem += beinfo[constants.BE_MAXMEM] 670 if iinfo.name not in node_instances_info[node_uuid].payload: 671 i_used_mem = 0 672 else: 673 i_used_mem = int(node_instances_info[node_uuid] 674 .payload[iinfo.name]["memory"]) 675 i_mem_diff = beinfo[constants.BE_MAXMEM] - i_used_mem 676 mem_free -= max(0, i_mem_diff) 677 678 if iinfo.admin_state == constants.ADMINST_UP: 679 i_p_up_mem += beinfo[constants.BE_MAXMEM] 680 return (i_p_mem, i_p_up_mem, mem_free)
681
682 - def _ComputeDynamicNodeData(self, node_cfg, node_data, node_iinfo, i_list, 683 node_results, disk_template):
684 """Compute global node data. 685 686 @param node_results: the basic node structures as filled from the config 687 688 """ 689 #TODO(dynmem): compute the right data on MAX and MIN memory 690 # make a copy of the current dict 691 node_results = dict(node_results) 692 for nuuid, nresult in node_data.items(): 693 ninfo = node_cfg[nuuid] 694 assert ninfo.name in node_results, "Missing basic data for node %s" % \ 695 ninfo.name 696 697 if not ninfo.offline: 698 nresult.Raise("Can't get data for node %s" % ninfo.name) 699 node_iinfo[nuuid].Raise("Can't get node instance info from node %s" % 700 ninfo.name) 701 (_, space_info, (hv_info, )) = nresult.payload 702 703 mem_free = self._GetAttributeFromHypervisorNodeData(hv_info, ninfo.name, 704 "memory_free") 705 706 (i_p_mem, i_p_up_mem, mem_free) = self._ComputeInstanceMemory( 707 i_list, node_iinfo, nuuid, mem_free) 708 (total_disk, free_disk, total_spindles, free_spindles) = \ 709 self._ComputeStorageDataFromSpaceInfoByTemplate( 710 space_info, ninfo.name, disk_template) 711 712 # compute memory used by instances 713 pnr_dyn = { 714 "total_memory": self._GetAttributeFromHypervisorNodeData( 715 hv_info, ninfo.name, "memory_total"), 716 "reserved_memory": self._GetAttributeFromHypervisorNodeData( 717 hv_info, ninfo.name, "memory_dom0"), 718 "free_memory": mem_free, 719 "total_disk": total_disk, 720 "free_disk": free_disk, 721 "total_spindles": total_spindles, 722 "free_spindles": free_spindles, 723 "total_cpus": self._GetAttributeFromHypervisorNodeData( 724 hv_info, ninfo.name, "cpu_total"), 725 "reserved_cpus": self._GetAttributeFromHypervisorNodeData( 726 hv_info, ninfo.name, "cpu_dom0"), 727 "i_pri_memory": i_p_mem, 728 "i_pri_up_memory": i_p_up_mem, 729 } 730 pnr_dyn.update(node_results[ninfo.name]) 731 node_results[ninfo.name] = pnr_dyn 732 733 return node_results
734 735 @staticmethod
736 - def _ComputeInstanceData(cfg, cluster_info, i_list):
737 """Compute global instance data. 738 739 """ 740 instance_data = {} 741 for iinfo, beinfo in i_list: 742 nic_data = [] 743 for nic in iinfo.nics: 744 filled_params = cluster_info.SimpleFillNIC(nic.nicparams) 745 nic_dict = { 746 "mac": nic.mac, 747 "ip": nic.ip, 748 "mode": filled_params[constants.NIC_MODE], 749 "link": filled_params[constants.NIC_LINK], 750 } 751 if filled_params[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED: 752 nic_dict["bridge"] = filled_params[constants.NIC_LINK] 753 nic_data.append(nic_dict) 754 inst_disks = cfg.GetInstanceDisks(iinfo.uuid) 755 pir = { 756 "tags": list(iinfo.GetTags()), 757 "admin_state": iinfo.admin_state, 758 "vcpus": beinfo[constants.BE_VCPUS], 759 "memory": beinfo[constants.BE_MAXMEM], 760 "spindle_use": beinfo[constants.BE_SPINDLE_USE], 761 "os": iinfo.os, 762 "nodes": [cfg.GetNodeName(iinfo.primary_node)] + 763 cfg.GetNodeNames( 764 cfg.GetInstanceSecondaryNodes(iinfo.uuid)), 765 "nics": nic_data, 766 "disks": [{constants.IDISK_SIZE: dsk.size, 767 constants.IDISK_MODE: dsk.mode, 768 constants.IDISK_SPINDLES: dsk.spindles} 769 for dsk in inst_disks], 770 "disk_template": iinfo.disk_template, 771 "disks_active": iinfo.disks_active, 772 "hypervisor": iinfo.hypervisor, 773 } 774 pir["disk_space_total"] = gmi.ComputeDiskSize(iinfo.disk_template, 775 pir["disks"]) 776 instance_data[iinfo.name] = pir 777 778 return instance_data
779
780 - def _BuildInputData(self, req):
781 """Build input data structures. 782 783 """ 784 request = req.GetRequest(self.cfg) 785 disk_template = None 786 if "disk_template" in request: 787 disk_template = request["disk_template"] 788 self._ComputeClusterData(disk_template=disk_template) 789 790 request["type"] = req.MODE 791 self.in_data["request"] = request 792 793 self.in_text = serializer.Dump(self.in_data)
794
795 - def Run(self, name, validate=True, call_fn=None):
796 """Run an instance allocator and return the results. 797 798 """ 799 if call_fn is None: 800 call_fn = self.rpc.call_iallocator_runner 801 802 ial_params = self.cfg.GetDefaultIAllocatorParameters() 803 804 result = call_fn(self.cfg.GetMasterNode(), name, self.in_text, ial_params) 805 result.Raise("Failure while running the iallocator script") 806 807 self.out_text = result.payload 808 if validate: 809 self._ValidateResult()
810
811 - def _ValidateResult(self):
812 """Process the allocator results. 813 814 This will process and if successful save the result in 815 self.out_data and the other parameters. 816 817 """ 818 try: 819 rdict = serializer.Load(self.out_text) 820 except Exception, err: 821 raise errors.OpExecError("Can't parse iallocator results: %s" % str(err)) 822 823 if not isinstance(rdict, dict): 824 raise errors.OpExecError("Can't parse iallocator results: not a dict") 825 826 # TODO: remove backwards compatiblity in later versions 827 if "nodes" in rdict and "result" not in rdict: 828 rdict["result"] = rdict["nodes"] 829 del rdict["nodes"] 830 831 for key in "success", "info", "result": 832 if key not in rdict: 833 raise errors.OpExecError("Can't parse iallocator results:" 834 " missing key '%s'" % key) 835 setattr(self, key, rdict[key]) 836 837 self.req.ValidateResult(self, self.result) 838 self.out_data = rdict
839