Package ganeti :: Module config
[hide private]
[frames] | no frames]

Source Code for Module ganeti.config

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Configuration management for Ganeti 
  32   
  33  This module provides the interface to the Ganeti cluster configuration. 
  34   
  35  The configuration data is stored on every node but is updated on the master 
  36  only. After each update, the master distributes the data to the other nodes. 
  37   
  38  Currently, the data storage format is JSON. YAML was slow and consuming too 
  39  much memory. 
  40   
  41  """ 
  42   
  43  # pylint: disable=R0904 
  44  # R0904: Too many public methods 
  45   
  46  import copy 
  47  import os 
  48  import random 
  49  import logging 
  50  import time 
  51  import threading 
  52  import itertools 
  53   
  54  from ganeti import errors 
  55  from ganeti import utils 
  56  from ganeti import constants 
  57  import ganeti.wconfd as wc 
  58  from ganeti import objects 
  59  from ganeti import serializer 
  60  from ganeti import uidpool 
  61  from ganeti import netutils 
  62  from ganeti import runtime 
  63  from ganeti import pathutils 
  64  from ganeti import network 
65 66 67 -def GetWConfdContext(ec_id, livelock):
68 """Prepare a context for communication with WConfd. 69 70 WConfd needs to know the identity of each caller to properly manage locks and 71 detect job death. This helper function prepares the identity object given a 72 job ID (optional) and a livelock file. 73 74 @type ec_id: int, or None 75 @param ec_id: the job ID or None, if the caller isn't a job 76 @type livelock: L{ganeti.utils.livelock.LiveLock} 77 @param livelock: a livelock object holding the lockfile needed for WConfd 78 @return: the WConfd context 79 80 """ 81 if ec_id is None: 82 return (threading.current_thread().getName(), 83 livelock.GetPath(), os.getpid()) 84 else: 85 return (ec_id, 86 livelock.GetPath(), os.getpid())
87
88 89 -def GetConfig(ec_id, livelock, **kwargs):
90 """A utility function for constructing instances of ConfigWriter. 91 92 It prepares a WConfd context and uses it to create a ConfigWriter instance. 93 94 @type ec_id: int, or None 95 @param ec_id: the job ID or None, if the caller isn't a job 96 @type livelock: L{ganeti.utils.livelock.LiveLock} 97 @param livelock: a livelock object holding the lockfile needed for WConfd 98 @type kwargs: dict 99 @param kwargs: Any additional arguments for the ConfigWriter constructor 100 @rtype: L{ConfigWriter} 101 @return: the ConfigWriter context 102 103 """ 104 kwargs['wconfdcontext'] = GetWConfdContext(ec_id, livelock) 105 106 # if the config is to be opened in the accept_foreign mode, we should 107 # also tell the RPC client not to check for the master node 108 accept_foreign = kwargs.get('accept_foreign', False) 109 kwargs['wconfd'] = wc.Client(allow_non_master=accept_foreign) 110 111 return ConfigWriter(**kwargs)
112
113 114 -def _ConfigSync(shared=0):
115 """Configuration synchronization decorator. 116 117 """ 118 def wrap(fn): 119 def sync_function(*args, **kwargs): 120 with args[0].GetConfigManager(shared): 121 return fn(*args, **kwargs)
122 return sync_function 123 return wrap 124 125 # job id used for resource management at config upgrade time 126 _UPGRADE_CONFIG_JID = "jid-cfg-upgrade"
127 128 129 -def _ValidateConfig(data):
130 """Verifies that a configuration dict looks valid. 131 132 This only verifies the version of the configuration. 133 134 @raise errors.ConfigurationError: if the version differs from what 135 we expect 136 137 """ 138 if data['version'] != constants.CONFIG_VERSION: 139 raise errors.ConfigVersionMismatch(constants.CONFIG_VERSION, 140 data['version'])
141
142 143 -class TemporaryReservationManager(object):
144 """A temporary resource reservation manager. 145 146 This is used to reserve resources in a job, before using them, making sure 147 other jobs cannot get them in the meantime. 148 149 """
150 - def __init__(self):
151 self._ec_reserved = {}
152
153 - def Reserved(self, resource):
154 for holder_reserved in self._ec_reserved.values(): 155 if resource in holder_reserved: 156 return True 157 return False
158
159 - def Reserve(self, ec_id, resource):
160 if self.Reserved(resource): 161 raise errors.ReservationError("Duplicate reservation for resource '%s'" 162 % str(resource)) 163 if ec_id not in self._ec_reserved: 164 self._ec_reserved[ec_id] = set([resource]) 165 else: 166 self._ec_reserved[ec_id].add(resource)
167
168 - def DropECReservations(self, ec_id):
169 if ec_id in self._ec_reserved: 170 del self._ec_reserved[ec_id]
171
172 - def GetReserved(self):
173 all_reserved = set() 174 for holder_reserved in self._ec_reserved.values(): 175 all_reserved.update(holder_reserved) 176 return all_reserved
177
178 - def GetECReserved(self, ec_id):
179 """ Used when you want to retrieve all reservations for a specific 180 execution context. E.g when commiting reserved IPs for a specific 181 network. 182 183 """ 184 ec_reserved = set() 185 if ec_id in self._ec_reserved: 186 ec_reserved.update(self._ec_reserved[ec_id]) 187 return ec_reserved
188
189 - def Generate(self, existing, generate_one_fn, ec_id):
190 """Generate a new resource of this type 191 192 """ 193 assert callable(generate_one_fn) 194 195 all_elems = self.GetReserved() 196 all_elems.update(existing) 197 retries = 64 198 while retries > 0: 199 new_resource = generate_one_fn() 200 if new_resource is not None and new_resource not in all_elems: 201 break 202 else: 203 raise errors.ConfigurationError("Not able generate new resource" 204 " (last tried: %s)" % new_resource) 205 self.Reserve(ec_id, new_resource) 206 return new_resource
207
208 209 -def _MatchNameComponentIgnoreCase(short_name, names):
210 """Wrapper around L{utils.text.MatchNameComponent}. 211 212 """ 213 return utils.MatchNameComponent(short_name, names, case_sensitive=False)
214
215 216 -def _CheckInstanceDiskIvNames(disks):
217 """Checks if instance's disks' C{iv_name} attributes are in order. 218 219 @type disks: list of L{objects.Disk} 220 @param disks: List of disks 221 @rtype: list of tuples; (int, string, string) 222 @return: List of wrongly named disks, each tuple contains disk index, 223 expected and actual name 224 225 """ 226 result = [] 227 228 for (idx, disk) in enumerate(disks): 229 exp_iv_name = "disk/%s" % idx 230 if disk.iv_name != exp_iv_name: 231 result.append((idx, exp_iv_name, disk.iv_name)) 232 233 return result
234
235 236 -class ConfigManager(object):
237 """Locks the configuration and exposes it to be read or modified. 238 239 """
240 - def __init__(self, config_writer, shared=False):
241 assert isinstance(config_writer, ConfigWriter), \ 242 "invalid argument: Not a ConfigWriter" 243 self._config_writer = config_writer 244 self._shared = shared
245
246 - def __enter__(self):
247 try: 248 self._config_writer._OpenConfig(self._shared) # pylint: disable=W0212 249 except Exception: 250 logging.debug("Opening configuration failed") 251 try: 252 self._config_writer._CloseConfig(False) # pylint: disable=W0212 253 except Exception: # pylint: disable=W0703 254 logging.debug("Closing configuration failed as well") 255 raise
256
257 - def __exit__(self, exc_type, exc_value, traceback):
258 # save the configuration, if this was a write opreration that succeeded 259 if exc_type is not None: 260 logging.debug("Configuration operation failed," 261 " the changes will not be saved") 262 # pylint: disable=W0212 263 self._config_writer._CloseConfig(not self._shared and exc_type is None) 264 return False
265
266 267 -def _UpdateIvNames(base_idx, disks):
268 """Update the C{iv_name} attribute of disks. 269 270 @type disks: list of L{objects.Disk} 271 272 """ 273 for (idx, disk) in enumerate(disks): 274 disk.iv_name = "disk/%s" % (base_idx + idx)
275
276 277 -class ConfigWriter(object):
278 """The interface to the cluster configuration. 279 280 WARNING: The class is no longer thread-safe! 281 Each thread must construct a separate instance. 282 283 @ivar _all_rms: a list of all temporary reservation managers 284 285 """
286 - def __init__(self, cfg_file=None, offline=False, _getents=runtime.GetEnts, 287 accept_foreign=False, wconfdcontext=None, wconfd=None):
288 self.write_count = 0 289 self._config_data = None 290 self._SetConfigData(None) 291 self._offline = offline 292 if cfg_file is None: 293 self._cfg_file = pathutils.CLUSTER_CONF_FILE 294 else: 295 self._cfg_file = cfg_file 296 self._getents = _getents 297 self._temporary_ids = TemporaryReservationManager() 298 self._all_rms = [self._temporary_ids] 299 # Note: in order to prevent errors when resolving our name later, 300 # we compute it here once and reuse it; it's 301 # better to raise an error before starting to modify the config 302 # file than after it was modified 303 self._my_hostname = netutils.Hostname.GetSysName() 304 self._cfg_id = None 305 self._wconfdcontext = wconfdcontext 306 self._wconfd = wconfd 307 self._accept_foreign = accept_foreign 308 self._lock_count = 0 309 self._lock_current_shared = None
310
311 - def _ConfigData(self):
312 return self._config_data
313
314 - def OutDate(self):
315 self._config_data = None
316
317 - def _SetConfigData(self, cfg):
318 self._config_data = cfg
319
320 - def _GetWConfdContext(self):
321 return self._wconfdcontext
322 323 # this method needs to be static, so that we can call it on the class 324 @staticmethod
325 - def IsCluster():
326 """Check if the cluster is configured. 327 328 """ 329 return os.path.exists(pathutils.CLUSTER_CONF_FILE)
330
331 - def _UnlockedGetNdParams(self, node):
332 nodegroup = self._UnlockedGetNodeGroup(node.group) 333 return self._ConfigData().cluster.FillND(node, nodegroup)
334 335 @_ConfigSync(shared=1)
336 - def GetNdParams(self, node):
337 """Get the node params populated with cluster defaults. 338 339 @type node: L{objects.Node} 340 @param node: The node we want to know the params for 341 @return: A dict with the filled in node params 342 343 """ 344 return self._UnlockedGetNdParams(node)
345 346 @_ConfigSync(shared=1)
347 - def GetNdGroupParams(self, nodegroup):
348 """Get the node groups params populated with cluster defaults. 349 350 @type nodegroup: L{objects.NodeGroup} 351 @param nodegroup: The node group we want to know the params for 352 @return: A dict with the filled in node group params 353 354 """ 355 return self._UnlockedGetNdGroupParams(nodegroup)
356
357 - def _UnlockedGetNdGroupParams(self, group):
358 """Get the ndparams of the group. 359 360 @type group: L{objects.NodeGroup} 361 @param group: The group we want to know the params for 362 @rtype: dict of str to int 363 @return: A dict with the filled in node group params 364 365 """ 366 return self._ConfigData().cluster.FillNDGroup(group)
367 368 @_ConfigSync(shared=1)
369 - def GetGroupSshPorts(self):
370 """Get a map of group UUIDs to SSH ports. 371 372 @rtype: dict of str to int 373 @return: a dict mapping the UUIDs to the SSH ports 374 375 """ 376 port_map = {} 377 for uuid, group in self._config_data.nodegroups.items(): 378 ndparams = self._UnlockedGetNdGroupParams(group) 379 port = ndparams.get(constants.ND_SSH_PORT) 380 port_map[uuid] = port 381 return port_map
382 383 @_ConfigSync(shared=1)
384 - def GetInstanceDiskParams(self, instance):
385 """Get the disk params populated with inherit chain. 386 387 @type instance: L{objects.Instance} 388 @param instance: The instance we want to know the params for 389 @return: A dict with the filled in disk params 390 391 """ 392 node = self._UnlockedGetNodeInfo(instance.primary_node) 393 nodegroup = self._UnlockedGetNodeGroup(node.group) 394 return self._UnlockedGetGroupDiskParams(nodegroup)
395 396 @_ConfigSync()
397 - def SetInstanceDiskTemplate(self, inst_uuid, disk_template):
398 """Set the instance's disk template to the given value. 399 400 @type inst_uuid: string 401 @param inst_uuid: The UUID of the instance object 402 @type disk_template: string 403 @param disk_template: The new disk template of the instance 404 405 """ 406 instance = self._UnlockedGetInstanceInfo(inst_uuid) 407 if instance is None: 408 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 409 410 # Update the disk template of the instance 411 instance.disk_template = disk_template
412
413 - def _UnlockedGetInstanceDisks(self, inst_uuid):
414 """Return the disks' info for the given instance 415 416 @type inst_uuid: string 417 @param inst_uuid: The UUID of the instance we want to know the disks for 418 419 @rtype: List of L{objects.Disk} 420 @return: A list with all the disks' info 421 422 """ 423 instance = self._UnlockedGetInstanceInfo(inst_uuid) 424 if instance is None: 425 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 426 427 return [self._UnlockedGetDiskInfo(disk_uuid) 428 for disk_uuid in instance.disks]
429 430 @_ConfigSync(shared=1)
431 - def GetInstanceDisks(self, inst_uuid):
432 """Return the disks' info for the given instance 433 434 This is a simple wrapper over L{_UnlockedGetInstanceDisks}. 435 436 """ 437 return self._UnlockedGetInstanceDisks(inst_uuid)
438
439 - def _UnlockedAddDisk(self, disk):
440 """Add a disk to the config. 441 442 @type disk: L{objects.Disk} 443 @param disk: The disk object 444 445 """ 446 if not isinstance(disk, objects.Disk): 447 raise errors.ProgrammerError("Invalid type passed to _UnlockedAddDisk") 448 449 logging.info("Adding disk %s to configuration", disk.uuid) 450 451 self._CheckUniqueUUID(disk, include_temporary=False) 452 disk.serial_no = 1 453 disk.ctime = disk.mtime = time.time() 454 disk.UpgradeConfig() 455 self._ConfigData().disks[disk.uuid] = disk 456 self._ConfigData().cluster.serial_no += 1
457
458 - def _UnlockedAttachInstanceDisk(self, inst_uuid, disk_uuid, idx=None):
459 """Attach a disk to an instance. 460 461 @type inst_uuid: string 462 @param inst_uuid: The UUID of the instance object 463 @type disk_uuid: string 464 @param disk_uuid: The UUID of the disk object 465 @type idx: int 466 @param idx: the index of the newly attached disk; if not 467 passed, the disk will be attached as the last one. 468 469 """ 470 instance = self._UnlockedGetInstanceInfo(inst_uuid) 471 if instance is None: 472 raise errors.ConfigurationError("Instance %s doesn't exist" 473 % inst_uuid) 474 if disk_uuid not in self._ConfigData().disks: 475 raise errors.ConfigurationError("Disk %s doesn't exist" % disk_uuid) 476 477 if idx is None: 478 idx = len(instance.disks) 479 else: 480 if idx < 0: 481 raise IndexError("Not accepting negative indices other than -1") 482 elif idx > len(instance.disks): 483 raise IndexError("Got disk index %s, but there are only %s" % 484 (idx, len(instance.disks))) 485 486 # Disk must not be attached anywhere else 487 for inst in self._ConfigData().instances.values(): 488 if disk_uuid in inst.disks: 489 raise errors.ReservationError("Disk %s already attached to instance %s" 490 % (disk_uuid, inst.name)) 491 492 instance.disks.insert(idx, disk_uuid) 493 instance_disks = self._UnlockedGetInstanceDisks(inst_uuid) 494 _UpdateIvNames(idx, instance_disks[idx:]) 495 instance.serial_no += 1 496 instance.mtime = time.time()
497 498 @_ConfigSync()
499 - def AddInstanceDisk(self, inst_uuid, disk, idx=None):
500 """Add a disk to the config and attach it to instance. 501 502 This is a simple wrapper over L{_UnlockedAddDisk} and 503 L{_UnlockedAttachInstanceDisk}. 504 505 """ 506 self._UnlockedAddDisk(disk) 507 self._UnlockedAttachInstanceDisk(inst_uuid, disk.uuid, idx)
508
509 - def _UnlockedDetachInstanceDisk(self, inst_uuid, disk_uuid):
510 """Detach a disk from an instance. 511 512 @type inst_uuid: string 513 @param inst_uuid: The UUID of the instance object 514 @type disk_uuid: string 515 @param disk_uuid: The UUID of the disk object 516 517 """ 518 instance = self._UnlockedGetInstanceInfo(inst_uuid) 519 if instance is None: 520 raise errors.ConfigurationError("Instance %s doesn't exist" 521 % inst_uuid) 522 if disk_uuid not in self._ConfigData().disks: 523 raise errors.ConfigurationError("Disk %s doesn't exist" % disk_uuid) 524 525 # Check if disk is attached to the instance 526 if disk_uuid not in instance.disks: 527 raise errors.ProgrammerError("Disk %s is not attached to an instance" 528 % disk_uuid) 529 530 idx = instance.disks.index(disk_uuid) 531 instance.disks.remove(disk_uuid) 532 instance_disks = self._UnlockedGetInstanceDisks(inst_uuid) 533 _UpdateIvNames(idx, instance_disks[idx:]) 534 instance.serial_no += 1 535 instance.mtime = time.time()
536
537 - def _UnlockedRemoveDisk(self, disk_uuid):
538 """Remove the disk from the configuration. 539 540 @type disk_uuid: string 541 @param disk_uuid: The UUID of the disk object 542 543 """ 544 if disk_uuid not in self._ConfigData().disks: 545 raise errors.ConfigurationError("Disk %s doesn't exist" % disk_uuid) 546 547 # Disk must not be attached anywhere 548 for inst in self._ConfigData().instances.values(): 549 if disk_uuid in inst.disks: 550 raise errors.ReservationError("Cannot remove disk %s. Disk is" 551 " attached to instance %s" 552 % (disk_uuid, inst.name)) 553 554 # Remove disk from config file 555 del self._ConfigData().disks[disk_uuid] 556 self._ConfigData().cluster.serial_no += 1
557 558 @_ConfigSync()
559 - def RemoveInstanceDisk(self, inst_uuid, disk_uuid):
560 """Detach a disk from an instance and remove it from the config. 561 562 This is a simple wrapper over L{_UnlockedDetachInstanceDisk} and 563 L{_UnlockedRemoveDisk}. 564 565 """ 566 self._UnlockedDetachInstanceDisk(inst_uuid, disk_uuid) 567 self._UnlockedRemoveDisk(disk_uuid)
568
569 - def _UnlockedGetDiskInfo(self, disk_uuid):
570 """Returns information about a disk. 571 572 It takes the information from the configuration file. 573 574 @param disk_uuid: UUID of the disk 575 576 @rtype: L{objects.Disk} 577 @return: the disk object 578 579 """ 580 if disk_uuid not in self._ConfigData().disks: 581 return None 582 583 return self._ConfigData().disks[disk_uuid]
584 585 @_ConfigSync(shared=1)
586 - def GetDiskInfo(self, disk_uuid):
587 """Returns information about a disk. 588 589 This is a simple wrapper over L{_UnlockedGetDiskInfo}. 590 591 """ 592 return self._UnlockedGetDiskInfo(disk_uuid)
593
594 - def _AllInstanceNodes(self, inst_uuid):
595 """Compute the set of all disk-related nodes for an instance. 596 597 This abstracts away some work from '_UnlockedGetInstanceNodes' 598 and '_UnlockedGetInstanceSecondaryNodes'. 599 600 @type inst_uuid: string 601 @param inst_uuid: The UUID of the instance we want to get nodes for 602 @rtype: set of strings 603 @return: A set of names for all the nodes of the instance 604 605 """ 606 instance = self._UnlockedGetInstanceInfo(inst_uuid) 607 if instance is None: 608 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 609 610 instance_disks = self._UnlockedGetInstanceDisks(inst_uuid) 611 all_nodes = [] 612 for disk in instance_disks: 613 all_nodes.extend(disk.all_nodes) 614 return (set(all_nodes), instance)
615
616 - def _UnlockedGetInstanceNodes(self, inst_uuid):
617 """Get all disk-related nodes for an instance. 618 619 For non-DRBD, this will be empty, for DRBD it will contain both 620 the primary and the secondaries. 621 622 @type inst_uuid: string 623 @param inst_uuid: The UUID of the instance we want to get nodes for 624 @rtype: list of strings 625 @return: A list of names for all the nodes of the instance 626 627 """ 628 (all_nodes, instance) = self._AllInstanceNodes(inst_uuid) 629 # ensure that primary node is always the first 630 all_nodes.discard(instance.primary_node) 631 return (instance.primary_node, ) + tuple(all_nodes)
632 633 @_ConfigSync(shared=1)
634 - def GetInstanceNodes(self, inst_uuid):
635 """Get all disk-related nodes for an instance. 636 637 This is just a wrapper over L{_UnlockedGetInstanceNodes} 638 639 """ 640 return self._UnlockedGetInstanceNodes(inst_uuid)
641
642 - def _UnlockedGetInstanceSecondaryNodes(self, inst_uuid):
643 """Get the list of secondary nodes. 644 645 @type inst_uuid: string 646 @param inst_uuid: The UUID of the instance we want to get nodes for 647 @rtype: list of strings 648 @return: A list of names for all the secondary nodes of the instance 649 650 """ 651 (all_nodes, instance) = self._AllInstanceNodes(inst_uuid) 652 all_nodes.discard(instance.primary_node) 653 return tuple(all_nodes)
654 655 @_ConfigSync(shared=1)
656 - def GetInstanceSecondaryNodes(self, inst_uuid):
657 """Get the list of secondary nodes. 658 659 This is a simple wrapper over L{_UnlockedGetInstanceSecondaryNodes}. 660 661 """ 662 return self._UnlockedGetInstanceSecondaryNodes(inst_uuid)
663
664 - def _UnlockedGetInstanceLVsByNode(self, inst_uuid, lvmap=None):
665 """Provide a mapping of node to LVs a given instance owns. 666 667 @type inst_uuid: string 668 @param inst_uuid: The UUID of the instance we want to 669 compute the LVsByNode for 670 @type lvmap: dict 671 @param lvmap: Optional dictionary to receive the 672 'node' : ['lv', ...] data. 673 @rtype: dict or None 674 @return: None if lvmap arg is given, otherwise, a dictionary of 675 the form { 'node_uuid' : ['volume1', 'volume2', ...], ... }; 676 volumeN is of the form "vg_name/lv_name", compatible with 677 GetVolumeList() 678 679 """ 680 def _MapLVsByNode(lvmap, devices, node_uuid): 681 """Recursive helper function.""" 682 if not node_uuid in lvmap: 683 lvmap[node_uuid] = [] 684 685 for dev in devices: 686 if dev.dev_type == constants.DT_PLAIN: 687 lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1]) 688 689 elif dev.dev_type in constants.DTS_DRBD: 690 if dev.children: 691 _MapLVsByNode(lvmap, dev.children, dev.logical_id[0]) 692 _MapLVsByNode(lvmap, dev.children, dev.logical_id[1]) 693 694 elif dev.children: 695 _MapLVsByNode(lvmap, dev.children, node_uuid)
696 697 instance = self._UnlockedGetInstanceInfo(inst_uuid) 698 if instance is None: 699 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 700 701 if lvmap is None: 702 lvmap = {} 703 ret = lvmap 704 else: 705 ret = None 706 707 _MapLVsByNode(lvmap, 708 self._UnlockedGetInstanceDisks(instance.uuid), 709 instance.primary_node) 710 return ret
711 712 @_ConfigSync(shared=1)
713 - def GetInstanceLVsByNode(self, inst_uuid, lvmap=None):
714 """Provide a mapping of node to LVs a given instance owns. 715 716 This is a simple wrapper over L{_UnlockedGetInstanceLVsByNode} 717 718 """ 719 return self._UnlockedGetInstanceLVsByNode(inst_uuid, lvmap=lvmap)
720 721 @_ConfigSync(shared=1)
722 - def GetGroupDiskParams(self, group):
723 """Get the disk params populated with inherit chain. 724 725 @type group: L{objects.NodeGroup} 726 @param group: The group we want to know the params for 727 @return: A dict with the filled in disk params 728 729 """ 730 return self._UnlockedGetGroupDiskParams(group)
731
732 - def _UnlockedGetGroupDiskParams(self, group):
733 """Get the disk params populated with inherit chain down to node-group. 734 735 @type group: L{objects.NodeGroup} 736 @param group: The group we want to know the params for 737 @return: A dict with the filled in disk params 738 739 """ 740 data = self._ConfigData().cluster.SimpleFillDP(group.diskparams) 741 assert isinstance(data, dict), "Not a dictionary: " + str(data) 742 return data
743 744 @_ConfigSync(shared=1)
745 - def GetPotentialMasterCandidates(self):
746 """Gets the list of node names of potential master candidates. 747 748 @rtype: list of str 749 @return: list of node names of potential master candidates 750 751 """ 752 # FIXME: Note that currently potential master candidates are nodes 753 # but this definition will be extended once RAPI-unmodifiable 754 # parameters are introduced. 755 nodes = self._UnlockedGetAllNodesInfo() 756 return [node_info.name for node_info in nodes.values()]
757
758 - def GenerateMAC(self, net_uuid, _ec_id):
759 """Generate a MAC for an instance. 760 761 This should check the current instances for duplicates. 762 763 """ 764 return self._wconfd.GenerateMAC(self._GetWConfdContext(), net_uuid)
765
766 - def ReserveMAC(self, mac, _ec_id):
767 """Reserve a MAC for an instance. 768 769 This only checks instances managed by this cluster, it does not 770 check for potential collisions elsewhere. 771 772 """ 773 self._wconfd.ReserveMAC(self._GetWConfdContext(), mac)
774
775 - def _UnlockedCommitTemporaryIps(self, _ec_id):
776 """Commit all reserved IP address to their respective pools 777 778 """ 779 if self._offline: 780 raise errors.ProgrammerError("Can't call CommitTemporaryIps" 781 " in offline mode") 782 ips = self._wconfd.ListReservedIps(self._GetWConfdContext()) 783 for action, address, net_uuid in ips: 784 self._UnlockedCommitIp(action, net_uuid, address)
785
786 - def _UnlockedCommitIp(self, action, net_uuid, address):
787 """Commit a reserved IP address to an IP pool. 788 789 The IP address is taken from the network's IP pool and marked as free. 790 791 """ 792 nobj = self._UnlockedGetNetwork(net_uuid) 793 if nobj is None: 794 raise errors.ProgrammerError("Network '%s' not found" % (net_uuid, )) 795 pool = network.AddressPool(nobj) 796 if action == constants.RESERVE_ACTION: 797 pool.Reserve(address) 798 elif action == constants.RELEASE_ACTION: 799 pool.Release(address)
800
801 - def ReleaseIp(self, net_uuid, address, _ec_id):
802 """Give a specific IP address back to an IP pool. 803 804 The IP address is returned to the IP pool and marked as reserved. 805 806 """ 807 if net_uuid: 808 if self._offline: 809 raise errors.ProgrammerError("Can't call ReleaseIp in offline mode") 810 self._wconfd.ReleaseIp(self._GetWConfdContext(), net_uuid, address)
811
812 - def GenerateIp(self, net_uuid, _ec_id):
813 """Find a free IPv4 address for an instance. 814 815 """ 816 if self._offline: 817 raise errors.ProgrammerError("Can't call GenerateIp in offline mode") 818 return self._wconfd.GenerateIp(self._GetWConfdContext(), net_uuid)
819
820 - def ReserveIp(self, net_uuid, address, _ec_id, check=True):
821 """Reserve a given IPv4 address for use by an instance. 822 823 """ 824 if self._offline: 825 raise errors.ProgrammerError("Can't call ReserveIp in offline mode") 826 return self._wconfd.ReserveIp(self._GetWConfdContext(), net_uuid, address, 827 check)
828
829 - def ReserveLV(self, lv_name, _ec_id):
830 """Reserve an VG/LV pair for an instance. 831 832 @type lv_name: string 833 @param lv_name: the logical volume name to reserve 834 835 """ 836 return self._wconfd.ReserveLV(self._GetWConfdContext(), lv_name)
837
838 - def GenerateDRBDSecret(self, _ec_id):
839 """Generate a DRBD secret. 840 841 This checks the current disks for duplicates. 842 843 """ 844 return self._wconfd.GenerateDRBDSecret(self._GetWConfdContext())
845 846 # FIXME: After _AllIDs is removed, move it to config_mock.py
847 - def _AllLVs(self):
848 """Compute the list of all LVs. 849 850 """ 851 lvnames = set() 852 for instance in self._ConfigData().instances.values(): 853 node_data = self._UnlockedGetInstanceLVsByNode(instance.uuid) 854 for lv_list in node_data.values(): 855 lvnames.update(lv_list) 856 return lvnames
857
858 - def _AllNICs(self):
859 """Compute the list of all NICs. 860 861 """ 862 nics = [] 863 for instance in self._ConfigData().instances.values(): 864 nics.extend(instance.nics) 865 return nics
866
867 - def _AllIDs(self, include_temporary):
868 """Compute the list of all UUIDs and names we have. 869 870 @type include_temporary: boolean 871 @param include_temporary: whether to include the _temporary_ids set 872 @rtype: set 873 @return: a set of IDs 874 875 """ 876 existing = set() 877 if include_temporary: 878 existing.update(self._temporary_ids.GetReserved()) 879 existing.update(self._AllLVs()) 880 existing.update(self._ConfigData().instances.keys()) 881 existing.update(self._ConfigData().nodes.keys()) 882 existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid]) 883 return existing
884
885 - def _GenerateUniqueID(self, ec_id):
886 """Generate an unique UUID. 887 888 This checks the current node, instances and disk names for 889 duplicates. 890 891 @rtype: string 892 @return: the unique id 893 894 """ 895 existing = self._AllIDs(include_temporary=False) 896 return self._temporary_ids.Generate(existing, utils.NewUUID, ec_id)
897 898 @_ConfigSync(shared=1)
899 - def GenerateUniqueID(self, ec_id):
900 """Generate an unique ID. 901 902 This is just a wrapper over the unlocked version. 903 904 @type ec_id: string 905 @param ec_id: unique id for the job to reserve the id to 906 907 """ 908 return self._GenerateUniqueID(ec_id)
909
910 - def _AllMACs(self):
911 """Return all MACs present in the config. 912 913 @rtype: list 914 @return: the list of all MACs 915 916 """ 917 result = [] 918 for instance in self._ConfigData().instances.values(): 919 for nic in instance.nics: 920 result.append(nic.mac) 921 922 return result
923
924 - def _AllDRBDSecrets(self):
925 """Return all DRBD secrets present in the config. 926 927 @rtype: list 928 @return: the list of all DRBD secrets 929 930 """ 931 def helper(disk, result): 932 """Recursively gather secrets from this disk.""" 933 if disk.dev_type == constants.DT_DRBD8: 934 result.append(disk.logical_id[5]) 935 if disk.children: 936 for child in disk.children: 937 helper(child, result)
938 939 result = [] 940 for disk in self._ConfigData().disks.values(): 941 helper(disk, result) 942 943 return result 944 945 @staticmethod
946 - def _VerifyDisks(data, result):
947 """Per-disk verification checks 948 949 Extends L{result} with diagnostic information about the disks. 950 951 @type data: see L{_ConfigData} 952 @param data: configuration data 953 954 @type result: list of strings 955 @param result: list containing diagnostic messages 956 957 """ 958 instance_disk_uuids = [d for insts in data.instances.values() 959 for d in insts.disks] 960 for disk_uuid in data.disks: 961 disk = data.disks[disk_uuid] 962 result.extend(["disk %s error: %s" % (disk.uuid, msg) 963 for msg in disk.Verify()]) 964 if disk.uuid != disk_uuid: 965 result.append("disk '%s' is indexed by wrong UUID '%s'" % 966 (disk.name, disk_uuid)) 967 if disk.uuid not in instance_disk_uuids: 968 result.append("disk '%s' is not attached to any instance" % 969 disk.uuid)
970
971 - def _UnlockedVerifyConfig(self):
972 """Verify function. 973 974 @rtype: list 975 @return: a list of error messages; a non-empty list signifies 976 configuration errors 977 978 """ 979 # pylint: disable=R0914 980 result = [] 981 seen_macs = [] 982 ports = {} 983 data = self._ConfigData() 984 cluster = data.cluster 985 986 # First call WConfd to perform its checks, if we're not offline 987 if not self._offline: 988 try: 989 self._wconfd.VerifyConfig() 990 except errors.ConfigVerifyError, err: 991 try: 992 for msg in err.args[1]: 993 result.append(msg) 994 except IndexError: 995 pass 996 997 def _helper(owner, attr, value, template): 998 try: 999 utils.ForceDictType(value, template) 1000 except errors.GenericError, err: 1001 result.append("%s has invalid %s: %s" % (owner, attr, err))
1002 1003 def _helper_nic(owner, params): 1004 try: 1005 objects.NIC.CheckParameterSyntax(params) 1006 except errors.ConfigurationError, err: 1007 result.append("%s has invalid nicparams: %s" % (owner, err)) 1008 1009 def _helper_ipolicy(owner, ipolicy, iscluster): 1010 try: 1011 objects.InstancePolicy.CheckParameterSyntax(ipolicy, iscluster) 1012 except errors.ConfigurationError, err: 1013 result.append("%s has invalid instance policy: %s" % (owner, err)) 1014 for key, value in ipolicy.items(): 1015 if key == constants.ISPECS_MINMAX: 1016 for k in range(len(value)): 1017 _helper_ispecs(owner, "ipolicy/%s[%s]" % (key, k), value[k]) 1018 elif key == constants.ISPECS_STD: 1019 _helper(owner, "ipolicy/" + key, value, 1020 constants.ISPECS_PARAMETER_TYPES) 1021 else: 1022 # FIXME: assuming list type 1023 if key in constants.IPOLICY_PARAMETERS: 1024 exp_type = float 1025 # if the value is int, it can be converted into float 1026 convertible_types = [int] 1027 else: 1028 exp_type = list 1029 convertible_types = [] 1030 # Try to convert from allowed types, if necessary. 1031 if any(isinstance(value, ct) for ct in convertible_types): 1032 try: 1033 value = exp_type(value) 1034 ipolicy[key] = value 1035 except ValueError: 1036 pass 1037 if not isinstance(value, exp_type): 1038 result.append("%s has invalid instance policy: for %s," 1039 " expecting %s, got %s" % 1040 (owner, key, exp_type.__name__, type(value))) 1041 1042 def _helper_ispecs(owner, parentkey, params): 1043 for (key, value) in params.items(): 1044 fullkey = "/".join([parentkey, key]) 1045 _helper(owner, fullkey, value, constants.ISPECS_PARAMETER_TYPES) 1046 1047 # check cluster parameters 1048 _helper("cluster", "beparams", cluster.SimpleFillBE({}), 1049 constants.BES_PARAMETER_TYPES) 1050 _helper("cluster", "nicparams", cluster.SimpleFillNIC({}), 1051 constants.NICS_PARAMETER_TYPES) 1052 _helper_nic("cluster", cluster.SimpleFillNIC({})) 1053 _helper("cluster", "ndparams", cluster.SimpleFillND({}), 1054 constants.NDS_PARAMETER_TYPES) 1055 _helper_ipolicy("cluster", cluster.ipolicy, True) 1056 1057 for disk_template in cluster.diskparams: 1058 if disk_template not in constants.DTS_HAVE_ACCESS: 1059 continue 1060 1061 access = cluster.diskparams[disk_template].get(constants.LDP_ACCESS, 1062 constants.DISK_KERNELSPACE) 1063 if access not in constants.DISK_VALID_ACCESS_MODES: 1064 result.append( 1065 "Invalid value of '%s:%s': '%s' (expected one of %s)" % ( 1066 disk_template, constants.LDP_ACCESS, access, 1067 utils.CommaJoin(constants.DISK_VALID_ACCESS_MODES) 1068 ) 1069 ) 1070 1071 self._VerifyDisks(data, result) 1072 1073 # per-instance checks 1074 for instance_uuid in data.instances: 1075 instance = data.instances[instance_uuid] 1076 if instance.uuid != instance_uuid: 1077 result.append("instance '%s' is indexed by wrong UUID '%s'" % 1078 (instance.name, instance_uuid)) 1079 if instance.primary_node not in data.nodes: 1080 result.append("instance '%s' has invalid primary node '%s'" % 1081 (instance.name, instance.primary_node)) 1082 for snode in self._UnlockedGetInstanceSecondaryNodes(instance.uuid): 1083 if snode not in data.nodes: 1084 result.append("instance '%s' has invalid secondary node '%s'" % 1085 (instance.name, snode)) 1086 for idx, nic in enumerate(instance.nics): 1087 if nic.mac in seen_macs: 1088 result.append("instance '%s' has NIC %d mac %s duplicate" % 1089 (instance.name, idx, nic.mac)) 1090 else: 1091 seen_macs.append(nic.mac) 1092 if nic.nicparams: 1093 filled = cluster.SimpleFillNIC(nic.nicparams) 1094 owner = "instance %s nic %d" % (instance.name, idx) 1095 _helper(owner, "nicparams", 1096 filled, constants.NICS_PARAMETER_TYPES) 1097 _helper_nic(owner, filled) 1098 1099 # disk template checks 1100 if not instance.disk_template in data.cluster.enabled_disk_templates: 1101 result.append("instance '%s' uses the disabled disk template '%s'." % 1102 (instance.name, instance.disk_template)) 1103 1104 # parameter checks 1105 if instance.beparams: 1106 _helper("instance %s" % instance.name, "beparams", 1107 cluster.FillBE(instance), constants.BES_PARAMETER_TYPES) 1108 1109 # check that disks exists 1110 for disk_uuid in instance.disks: 1111 if disk_uuid not in data.disks: 1112 result.append("Instance '%s' has invalid disk '%s'" % 1113 (instance.name, disk_uuid)) 1114 1115 instance_disks = self._UnlockedGetInstanceDisks(instance.uuid) 1116 # gather the drbd ports for duplicate checks 1117 for (idx, dsk) in enumerate(instance_disks): 1118 if dsk.dev_type in constants.DTS_DRBD: 1119 tcp_port = dsk.logical_id[2] 1120 if tcp_port not in ports: 1121 ports[tcp_port] = [] 1122 ports[tcp_port].append((instance.name, "drbd disk %s" % idx)) 1123 # gather network port reservation 1124 net_port = getattr(instance, "network_port", None) 1125 if net_port is not None: 1126 if net_port not in ports: 1127 ports[net_port] = [] 1128 ports[net_port].append((instance.name, "network port")) 1129 1130 wrong_names = _CheckInstanceDiskIvNames(instance_disks) 1131 if wrong_names: 1132 tmp = "; ".join(("name of disk %s should be '%s', but is '%s'" % 1133 (idx, exp_name, actual_name)) 1134 for (idx, exp_name, actual_name) in wrong_names) 1135 1136 result.append("Instance '%s' has wrongly named disks: %s" % 1137 (instance.name, tmp)) 1138 1139 # cluster-wide pool of free ports 1140 for free_port in cluster.tcpudp_port_pool: 1141 if free_port not in ports: 1142 ports[free_port] = [] 1143 ports[free_port].append(("cluster", "port marked as free")) 1144 1145 # compute tcp/udp duplicate ports 1146 keys = ports.keys() 1147 keys.sort() 1148 for pnum in keys: 1149 pdata = ports[pnum] 1150 if len(pdata) > 1: 1151 txt = utils.CommaJoin(["%s/%s" % val for val in pdata]) 1152 result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt)) 1153 1154 # highest used tcp port check 1155 if keys: 1156 if keys[-1] > cluster.highest_used_port: 1157 result.append("Highest used port mismatch, saved %s, computed %s" % 1158 (cluster.highest_used_port, keys[-1])) 1159 1160 if not data.nodes[cluster.master_node].master_candidate: 1161 result.append("Master node is not a master candidate") 1162 1163 # master candidate checks 1164 mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats() 1165 if mc_now < mc_max: 1166 result.append("Not enough master candidates: actual %d, target %d" % 1167 (mc_now, mc_max)) 1168 1169 # node checks 1170 for node_uuid, node in data.nodes.items(): 1171 if node.uuid != node_uuid: 1172 result.append("Node '%s' is indexed by wrong UUID '%s'" % 1173 (node.name, node_uuid)) 1174 if [node.master_candidate, node.drained, node.offline].count(True) > 1: 1175 result.append("Node %s state is invalid: master_candidate=%s," 1176 " drain=%s, offline=%s" % 1177 (node.name, node.master_candidate, node.drained, 1178 node.offline)) 1179 if node.group not in data.nodegroups: 1180 result.append("Node '%s' has invalid group '%s'" % 1181 (node.name, node.group)) 1182 else: 1183 _helper("node %s" % node.name, "ndparams", 1184 cluster.FillND(node, data.nodegroups[node.group]), 1185 constants.NDS_PARAMETER_TYPES) 1186 used_globals = constants.NDC_GLOBALS.intersection(node.ndparams) 1187 if used_globals: 1188 result.append("Node '%s' has some global parameters set: %s" % 1189 (node.name, utils.CommaJoin(used_globals))) 1190 1191 # nodegroups checks 1192 nodegroups_names = set() 1193 for nodegroup_uuid in data.nodegroups: 1194 nodegroup = data.nodegroups[nodegroup_uuid] 1195 if nodegroup.uuid != nodegroup_uuid: 1196 result.append("node group '%s' (uuid: '%s') indexed by wrong uuid '%s'" 1197 % (nodegroup.name, nodegroup.uuid, nodegroup_uuid)) 1198 if utils.UUID_RE.match(nodegroup.name.lower()): 1199 result.append("node group '%s' (uuid: '%s') has uuid-like name" % 1200 (nodegroup.name, nodegroup.uuid)) 1201 if nodegroup.name in nodegroups_names: 1202 result.append("duplicate node group name '%s'" % nodegroup.name) 1203 else: 1204 nodegroups_names.add(nodegroup.name) 1205 group_name = "group %s" % nodegroup.name 1206 _helper_ipolicy(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy), 1207 False) 1208 if nodegroup.ndparams: 1209 _helper(group_name, "ndparams", 1210 cluster.SimpleFillND(nodegroup.ndparams), 1211 constants.NDS_PARAMETER_TYPES) 1212 1213 # drbd minors check 1214 # FIXME: The check for DRBD map needs to be implemented in WConfd 1215 1216 # IP checks 1217 default_nicparams = cluster.nicparams[constants.PP_DEFAULT] 1218 ips = {} 1219 1220 def _AddIpAddress(ip, name): 1221 ips.setdefault(ip, []).append(name) 1222 1223 _AddIpAddress(cluster.master_ip, "cluster_ip") 1224 1225 for node in data.nodes.values(): 1226 _AddIpAddress(node.primary_ip, "node:%s/primary" % node.name) 1227 if node.secondary_ip != node.primary_ip: 1228 _AddIpAddress(node.secondary_ip, "node:%s/secondary" % node.name) 1229 1230 for instance in data.instances.values(): 1231 for idx, nic in enumerate(instance.nics): 1232 if nic.ip is None: 1233 continue 1234 1235 nicparams = objects.FillDict(default_nicparams, nic.nicparams) 1236 nic_mode = nicparams[constants.NIC_MODE] 1237 nic_link = nicparams[constants.NIC_LINK] 1238 1239 if nic_mode == constants.NIC_MODE_BRIDGED: 1240 link = "bridge:%s" % nic_link 1241 elif nic_mode == constants.NIC_MODE_ROUTED: 1242 link = "route:%s" % nic_link 1243 elif nic_mode == constants.NIC_MODE_OVS: 1244 link = "ovs:%s" % nic_link 1245 else: 1246 raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode) 1247 1248 _AddIpAddress("%s/%s/%s" % (link, nic.ip, nic.network), 1249 "instance:%s/nic:%d" % (instance.name, idx)) 1250 1251 for ip, owners in ips.items(): 1252 if len(owners) > 1: 1253 result.append("IP address %s is used by multiple owners: %s" % 1254 (ip, utils.CommaJoin(owners))) 1255 1256 return result 1257
1258 - def _UnlockedVerifyConfigAndLog(self, feedback_fn=None):
1259 """Verify the configuration and log any errors. 1260 1261 The errors get logged as critical errors and also to the feedback function, 1262 if given. 1263 1264 @param feedback_fn: Callable feedback function 1265 @rtype: list 1266 @return: a list of error messages; a non-empty list signifies 1267 configuration errors 1268 1269 """ 1270 assert feedback_fn is None or callable(feedback_fn) 1271 1272 # Warn on config errors, but don't abort the save - the 1273 # configuration has already been modified, and we can't revert; 1274 # the best we can do is to warn the user and save as is, leaving 1275 # recovery to the user 1276 config_errors = self._UnlockedVerifyConfig() 1277 if config_errors: 1278 errmsg = ("Configuration data is not consistent: %s" % 1279 (utils.CommaJoin(config_errors))) 1280 logging.critical(errmsg) 1281 if feedback_fn: 1282 feedback_fn(errmsg) 1283 return config_errors
1284 1285 @_ConfigSync(shared=1)
1286 - def VerifyConfig(self):
1287 """Verify function. 1288 1289 This is just a wrapper over L{_UnlockedVerifyConfig}. 1290 1291 @rtype: list 1292 @return: a list of error messages; a non-empty list signifies 1293 configuration errors 1294 1295 """ 1296 return self._UnlockedVerifyConfig()
1297 1298 @_ConfigSync()
1299 - def AddTcpUdpPort(self, port):
1300 """Adds a new port to the available port pool. 1301 1302 @warning: this method does not "flush" the configuration (via 1303 L{_WriteConfig}); callers should do that themselves once the 1304 configuration is stable 1305 1306 """ 1307 if not isinstance(port, int): 1308 raise errors.ProgrammerError("Invalid type passed for port") 1309 1310 self._ConfigData().cluster.tcpudp_port_pool.add(port)
1311 1312 @_ConfigSync(shared=1)
1313 - def GetPortList(self):
1314 """Returns a copy of the current port list. 1315 1316 """ 1317 return self._ConfigData().cluster.tcpudp_port_pool.copy()
1318 1319 @_ConfigSync()
1320 - def AllocatePort(self):
1321 """Allocate a port. 1322 1323 The port will be taken from the available port pool or from the 1324 default port range (and in this case we increase 1325 highest_used_port). 1326 1327 """ 1328 # If there are TCP/IP ports configured, we use them first. 1329 if self._ConfigData().cluster.tcpudp_port_pool: 1330 port = self._ConfigData().cluster.tcpudp_port_pool.pop() 1331 else: 1332 port = self._ConfigData().cluster.highest_used_port + 1 1333 if port >= constants.LAST_DRBD_PORT: 1334 raise errors.ConfigurationError("The highest used port is greater" 1335 " than %s. Aborting." % 1336 constants.LAST_DRBD_PORT) 1337 self._ConfigData().cluster.highest_used_port = port 1338 return port
1339 1340 @_ConfigSync(shared=1)
1341 - def ComputeDRBDMap(self):
1342 """Compute the used DRBD minor/nodes. 1343 1344 This is just a wrapper over a call to WConfd. 1345 1346 @return: dictionary of node_uuid: dict of minor: instance_uuid; 1347 the returned dict will have all the nodes in it (even if with 1348 an empty list). 1349 1350 """ 1351 if self._offline: 1352 raise errors.ProgrammerError("Can't call ComputeDRBDMap in offline mode") 1353 else: 1354 return dict(map(lambda (k, v): (k, dict(v)), 1355 self._wconfd.ComputeDRBDMap()))
1356
1357 - def AllocateDRBDMinor(self, node_uuids, inst_uuid):
1358 """Allocate a drbd minor. 1359 1360 This is just a wrapper over a call to WConfd. 1361 1362 The free minor will be automatically computed from the existing 1363 devices. A node can be given multiple times in order to allocate 1364 multiple minors. The result is the list of minors, in the same 1365 order as the passed nodes. 1366 1367 @type inst_uuid: string 1368 @param inst_uuid: the instance for which we allocate minors 1369 1370 """ 1371 assert isinstance(inst_uuid, basestring), \ 1372 "Invalid argument '%s' passed to AllocateDRBDMinor" % inst_uuid 1373 1374 if self._offline: 1375 raise errors.ProgrammerError("Can't call AllocateDRBDMinor" 1376 " in offline mode") 1377 1378 result = self._wconfd.AllocateDRBDMinor(inst_uuid, node_uuids) 1379 logging.debug("Request to allocate drbd minors, input: %s, returning %s", 1380 node_uuids, result) 1381 return result
1382
1383 - def _UnlockedReleaseDRBDMinors(self, inst_uuid):
1384 """Release temporary drbd minors allocated for a given instance. 1385 1386 This is just a wrapper over a call to WConfd. 1387 1388 @type inst_uuid: string 1389 @param inst_uuid: the instance for which temporary minors should be 1390 released 1391 1392 """ 1393 assert isinstance(inst_uuid, basestring), \ 1394 "Invalid argument passed to ReleaseDRBDMinors" 1395 # in offline mode we allow the calls to release DRBD minors, 1396 # because then nothing can be allocated anyway; 1397 # this is useful for testing 1398 if not self._offline: 1399 self._wconfd.ReleaseDRBDMinors(inst_uuid)
1400 1401 @_ConfigSync()
1402 - def ReleaseDRBDMinors(self, inst_uuid):
1403 """Release temporary drbd minors allocated for a given instance. 1404 1405 This should be called on the error paths, on the success paths 1406 it's automatically called by the ConfigWriter add and update 1407 functions. 1408 1409 This function is just a wrapper over L{_UnlockedReleaseDRBDMinors}. 1410 1411 @type inst_uuid: string 1412 @param inst_uuid: the instance for which temporary minors should be 1413 released 1414 1415 """ 1416 self._UnlockedReleaseDRBDMinors(inst_uuid)
1417 1418 @_ConfigSync(shared=1)
1419 - def GetConfigVersion(self):
1420 """Get the configuration version. 1421 1422 @return: Config version 1423 1424 """ 1425 return self._ConfigData().version
1426 1427 @_ConfigSync(shared=1)
1428 - def GetClusterName(self):
1429 """Get cluster name. 1430 1431 @return: Cluster name 1432 1433 """ 1434 return self._ConfigData().cluster.cluster_name
1435 1436 @_ConfigSync(shared=1)
1437 - def GetMasterNode(self):
1438 """Get the UUID of the master node for this cluster. 1439 1440 @return: Master node UUID 1441 1442 """ 1443 return self._ConfigData().cluster.master_node
1444 1445 @_ConfigSync(shared=1)
1446 - def GetMasterNodeName(self):
1447 """Get the hostname of the master node for this cluster. 1448 1449 @return: Master node hostname 1450 1451 """ 1452 return self._UnlockedGetNodeName(self._ConfigData().cluster.master_node)
1453 1454 @_ConfigSync(shared=1)
1455 - def GetMasterNodeInfo(self):
1456 """Get the master node information for this cluster. 1457 1458 @rtype: objects.Node 1459 @return: Master node L{objects.Node} object 1460 1461 """ 1462 return self._UnlockedGetNodeInfo(self._ConfigData().cluster.master_node)
1463 1464 @_ConfigSync(shared=1)
1465 - def GetMasterIP(self):
1466 """Get the IP of the master node for this cluster. 1467 1468 @return: Master IP 1469 1470 """ 1471 return self._ConfigData().cluster.master_ip
1472 1473 @_ConfigSync(shared=1)
1474 - def GetMasterNetdev(self):
1475 """Get the master network device for this cluster. 1476 1477 """ 1478 return self._ConfigData().cluster.master_netdev
1479 1480 @_ConfigSync(shared=1)
1481 - def GetMasterNetmask(self):
1482 """Get the netmask of the master node for this cluster. 1483 1484 """ 1485 return self._ConfigData().cluster.master_netmask
1486 1487 @_ConfigSync(shared=1)
1488 - def GetUseExternalMipScript(self):
1489 """Get flag representing whether to use the external master IP setup script. 1490 1491 """ 1492 return self._ConfigData().cluster.use_external_mip_script
1493 1494 @_ConfigSync(shared=1)
1495 - def GetFileStorageDir(self):
1496 """Get the file storage dir for this cluster. 1497 1498 """ 1499 return self._ConfigData().cluster.file_storage_dir
1500 1501 @_ConfigSync(shared=1)
1502 - def GetSharedFileStorageDir(self):
1503 """Get the shared file storage dir for this cluster. 1504 1505 """ 1506 return self._ConfigData().cluster.shared_file_storage_dir
1507 1508 @_ConfigSync(shared=1)
1509 - def GetGlusterStorageDir(self):
1510 """Get the Gluster storage dir for this cluster. 1511 1512 """ 1513 return self._ConfigData().cluster.gluster_storage_dir
1514 1515 @_ConfigSync(shared=1)
1516 - def GetHypervisorType(self):
1517 """Get the hypervisor type for this cluster. 1518 1519 """ 1520 return self._ConfigData().cluster.enabled_hypervisors[0]
1521 1522 @_ConfigSync(shared=1)
1523 - def GetRsaHostKey(self):
1524 """Return the rsa hostkey from the config. 1525 1526 @rtype: string 1527 @return: the rsa hostkey 1528 1529 """ 1530 return self._ConfigData().cluster.rsahostkeypub
1531 1532 @_ConfigSync(shared=1)
1533 - def GetDsaHostKey(self):
1534 """Return the dsa hostkey from the config. 1535 1536 @rtype: string 1537 @return: the dsa hostkey 1538 1539 """ 1540 return self._ConfigData().cluster.dsahostkeypub
1541 1542 @_ConfigSync(shared=1)
1543 - def GetDefaultIAllocator(self):
1544 """Get the default instance allocator for this cluster. 1545 1546 """ 1547 return self._ConfigData().cluster.default_iallocator
1548 1549 @_ConfigSync(shared=1)
1550 - def GetDefaultIAllocatorParameters(self):
1551 """Get the default instance allocator parameters for this cluster. 1552 1553 @rtype: dict 1554 @return: dict of iallocator parameters 1555 1556 """ 1557 return self._ConfigData().cluster.default_iallocator_params
1558 1559 @_ConfigSync(shared=1)
1560 - def GetPrimaryIPFamily(self):
1561 """Get cluster primary ip family. 1562 1563 @return: primary ip family 1564 1565 """ 1566 return self._ConfigData().cluster.primary_ip_family
1567 1568 @_ConfigSync(shared=1)
1569 - def GetMasterNetworkParameters(self):
1570 """Get network parameters of the master node. 1571 1572 @rtype: L{object.MasterNetworkParameters} 1573 @return: network parameters of the master node 1574 1575 """ 1576 cluster = self._ConfigData().cluster 1577 result = objects.MasterNetworkParameters( 1578 uuid=cluster.master_node, ip=cluster.master_ip, 1579 netmask=cluster.master_netmask, netdev=cluster.master_netdev, 1580 ip_family=cluster.primary_ip_family) 1581 1582 return result
1583 1584 @_ConfigSync(shared=1)
1585 - def GetInstallImage(self):
1586 """Get the install image location 1587 1588 @rtype: string 1589 @return: location of the install image 1590 1591 """ 1592 return self._ConfigData().cluster.install_image
1593 1594 @_ConfigSync()
1595 - def SetInstallImage(self, install_image):
1596 """Set the install image location 1597 1598 @type install_image: string 1599 @param install_image: location of the install image 1600 1601 """ 1602 self._ConfigData().cluster.install_image = install_image
1603 1604 @_ConfigSync(shared=1)
1605 - def GetInstanceCommunicationNetwork(self):
1606 """Get cluster instance communication network 1607 1608 @rtype: string 1609 @return: instance communication network, which is the name of the 1610 network used for instance communication 1611 1612 """ 1613 return self._ConfigData().cluster.instance_communication_network
1614 1615 @_ConfigSync()
1616 - def SetInstanceCommunicationNetwork(self, network_name):
1617 """Set cluster instance communication network 1618 1619 @type network_name: string 1620 @param network_name: instance communication network, which is the name of 1621 the network used for instance communication 1622 1623 """ 1624 self._ConfigData().cluster.instance_communication_network = network_name
1625 1626 @_ConfigSync(shared=1)
1627 - def GetZeroingImage(self):
1628 """Get the zeroing image location 1629 1630 @rtype: string 1631 @return: the location of the zeroing image 1632 1633 """ 1634 return self._config_data.cluster.zeroing_image
1635 1636 @_ConfigSync(shared=1)
1637 - def GetCompressionTools(self):
1638 """Get cluster compression tools 1639 1640 @rtype: list of string 1641 @return: a list of tools that are cleared for use in this cluster for the 1642 purpose of compressing data 1643 1644 """ 1645 return self._ConfigData().cluster.compression_tools
1646 1647 @_ConfigSync()
1648 - def SetCompressionTools(self, tools):
1649 """Set cluster compression tools 1650 1651 @type tools: list of string 1652 @param tools: a list of tools that are cleared for use in this cluster for 1653 the purpose of compressing data 1654 1655 """ 1656 self._ConfigData().cluster.compression_tools = tools
1657 1658 @_ConfigSync()
1659 - def AddNodeGroup(self, group, ec_id, check_uuid=True):
1660 """Add a node group to the configuration. 1661 1662 This method calls group.UpgradeConfig() to fill any missing attributes 1663 according to their default values. 1664 1665 @type group: L{objects.NodeGroup} 1666 @param group: the NodeGroup object to add 1667 @type ec_id: string 1668 @param ec_id: unique id for the job to use when creating a missing UUID 1669 @type check_uuid: bool 1670 @param check_uuid: add an UUID to the group if it doesn't have one or, if 1671 it does, ensure that it does not exist in the 1672 configuration already 1673 1674 """ 1675 self._UnlockedAddNodeGroup(group, ec_id, check_uuid)
1676
1677 - def _UnlockedAddNodeGroup(self, group, ec_id, check_uuid):
1678 """Add a node group to the configuration. 1679 1680 """ 1681 logging.info("Adding node group %s to configuration", group.name) 1682 1683 # Some code might need to add a node group with a pre-populated UUID 1684 # generated with ConfigWriter.GenerateUniqueID(). We allow them to bypass 1685 # the "does this UUID" exist already check. 1686 if check_uuid: 1687 self._EnsureUUID(group, ec_id) 1688 1689 try: 1690 existing_uuid = self._UnlockedLookupNodeGroup(group.name) 1691 except errors.OpPrereqError: 1692 pass 1693 else: 1694 raise errors.OpPrereqError("Desired group name '%s' already exists as a" 1695 " node group (UUID: %s)" % 1696 (group.name, existing_uuid), 1697 errors.ECODE_EXISTS) 1698 1699 group.serial_no = 1 1700 group.ctime = group.mtime = time.time() 1701 group.UpgradeConfig() 1702 1703 self._ConfigData().nodegroups[group.uuid] = group 1704 self._ConfigData().cluster.serial_no += 1
1705 1706 @_ConfigSync()
1707 - def RemoveNodeGroup(self, group_uuid):
1708 """Remove a node group from the configuration. 1709 1710 @type group_uuid: string 1711 @param group_uuid: the UUID of the node group to remove 1712 1713 """ 1714 logging.info("Removing node group %s from configuration", group_uuid) 1715 1716 if group_uuid not in self._ConfigData().nodegroups: 1717 raise errors.ConfigurationError("Unknown node group '%s'" % group_uuid) 1718 1719 assert len(self._ConfigData().nodegroups) != 1, \ 1720 "Group '%s' is the only group, cannot be removed" % group_uuid 1721 1722 del self._ConfigData().nodegroups[group_uuid] 1723 self._ConfigData().cluster.serial_no += 1
1724
1725 - def _UnlockedLookupNodeGroup(self, target):
1726 """Lookup a node group's UUID. 1727 1728 @type target: string or None 1729 @param target: group name or UUID or None to look for the default 1730 @rtype: string 1731 @return: nodegroup UUID 1732 @raises errors.OpPrereqError: when the target group cannot be found 1733 1734 """ 1735 if target is None: 1736 if len(self._ConfigData().nodegroups) != 1: 1737 raise errors.OpPrereqError("More than one node group exists. Target" 1738 " group must be specified explicitly.") 1739 else: 1740 return self._ConfigData().nodegroups.keys()[0] 1741 if target in self._ConfigData().nodegroups: 1742 return target 1743 for nodegroup in self._ConfigData().nodegroups.values(): 1744 if nodegroup.name == target: 1745 return nodegroup.uuid 1746 raise errors.OpPrereqError("Node group '%s' not found" % target, 1747 errors.ECODE_NOENT)
1748 1749 @_ConfigSync(shared=1)
1750 - def LookupNodeGroup(self, target):
1751 """Lookup a node group's UUID. 1752 1753 This function is just a wrapper over L{_UnlockedLookupNodeGroup}. 1754 1755 @type target: string or None 1756 @param target: group name or UUID or None to look for the default 1757 @rtype: string 1758 @return: nodegroup UUID 1759 1760 """ 1761 return self._UnlockedLookupNodeGroup(target)
1762
1763 - def _UnlockedGetNodeGroup(self, uuid):
1764 """Lookup a node group. 1765 1766 @type uuid: string 1767 @param uuid: group UUID 1768 @rtype: L{objects.NodeGroup} or None 1769 @return: nodegroup object, or None if not found 1770 1771 """ 1772 if uuid not in self._ConfigData().nodegroups: 1773 return None 1774 1775 return self._ConfigData().nodegroups[uuid]
1776 1777 @_ConfigSync(shared=1)
1778 - def GetNodeGroup(self, uuid):
1779 """Lookup a node group. 1780 1781 @type uuid: string 1782 @param uuid: group UUID 1783 @rtype: L{objects.NodeGroup} or None 1784 @return: nodegroup object, or None if not found 1785 1786 """ 1787 return self._UnlockedGetNodeGroup(uuid)
1788
1789 - def _UnlockedGetAllNodeGroupsInfo(self):
1790 """Get the configuration of all node groups. 1791 1792 """ 1793 return dict(self._ConfigData().nodegroups)
1794 1795 @_ConfigSync(shared=1)
1796 - def GetAllNodeGroupsInfo(self):
1797 """Get the configuration of all node groups. 1798 1799 """ 1800 return self._UnlockedGetAllNodeGroupsInfo()
1801 1802 @_ConfigSync(shared=1)
1803 - def GetAllNodeGroupsInfoDict(self):
1804 """Get the configuration of all node groups expressed as a dictionary of 1805 dictionaries. 1806 1807 """ 1808 return dict(map(lambda (uuid, ng): (uuid, ng.ToDict()), 1809 self._UnlockedGetAllNodeGroupsInfo().items()))
1810 1811 @_ConfigSync(shared=1)
1812 - def GetNodeGroupList(self):
1813 """Get a list of node groups. 1814 1815 """ 1816 return self._ConfigData().nodegroups.keys()
1817 1818 @_ConfigSync(shared=1)
1819 - def GetNodeGroupMembersByNodes(self, nodes):
1820 """Get nodes which are member in the same nodegroups as the given nodes. 1821 1822 """ 1823 ngfn = lambda node_uuid: self._UnlockedGetNodeInfo(node_uuid).group 1824 return frozenset(member_uuid 1825 for node_uuid in nodes 1826 for member_uuid in 1827 self._UnlockedGetNodeGroup(ngfn(node_uuid)).members)
1828 1829 @_ConfigSync(shared=1)
1830 - def GetMultiNodeGroupInfo(self, group_uuids):
1831 """Get the configuration of multiple node groups. 1832 1833 @param group_uuids: List of node group UUIDs 1834 @rtype: list 1835 @return: List of tuples of (group_uuid, group_info) 1836 1837 """ 1838 return [(uuid, self._UnlockedGetNodeGroup(uuid)) for uuid in group_uuids]
1839 1840 @_ConfigSync()
1841 - def AddInstance(self, instance, ec_id):
1842 """Add an instance to the config. 1843 1844 This should be used after creating a new instance. 1845 1846 @type instance: L{objects.Instance} 1847 @param instance: the instance object 1848 1849 """ 1850 if not isinstance(instance, objects.Instance): 1851 raise errors.ProgrammerError("Invalid type passed to AddInstance") 1852 1853 all_macs = self._AllMACs() 1854 for nic in instance.nics: 1855 if nic.mac in all_macs: 1856 raise errors.ConfigurationError("Cannot add instance %s:" 1857 " MAC address '%s' already in use." % 1858 (instance.name, nic.mac)) 1859 1860 self._CheckUniqueUUID(instance, include_temporary=False) 1861 1862 instance.serial_no = 1 1863 instance.ctime = instance.mtime = time.time() 1864 self._ConfigData().instances[instance.uuid] = instance 1865 self._ConfigData().cluster.serial_no += 1 1866 self._UnlockedReleaseDRBDMinors(instance.uuid) 1867 # FIXME: After RemoveInstance is moved to WConfd, use its internal 1868 # function from TempRes module instead. 1869 self._UnlockedCommitTemporaryIps(ec_id)
1870
1871 - def _EnsureUUID(self, item, ec_id):
1872 """Ensures a given object has a valid UUID. 1873 1874 @param item: the instance or node to be checked 1875 @param ec_id: the execution context id for the uuid reservation 1876 1877 """ 1878 if not item.uuid: 1879 item.uuid = self._GenerateUniqueID(ec_id) 1880 else: 1881 self._CheckUniqueUUID(item, include_temporary=True)
1882
1883 - def _CheckUniqueUUID(self, item, include_temporary):
1884 """Checks that the UUID of the given object is unique. 1885 1886 @param item: the instance or node to be checked 1887 @param include_temporary: whether temporarily generated UUID's should be 1888 included in the check. If the UUID of the item to be checked is 1889 a temporarily generated one, this has to be C{False}. 1890 1891 """ 1892 if not item.uuid: 1893 raise errors.ConfigurationError("'%s' must have an UUID" % (item.name,)) 1894 if item.uuid in self._AllIDs(include_temporary=include_temporary): 1895 raise errors.ConfigurationError("Cannot add '%s': UUID %s already" 1896 " in use" % (item.name, item.uuid))
1897
1898 - def _SetInstanceStatus(self, inst_uuid, status, disks_active, 1899 admin_state_source):
1900 """Set the instance's status to a given value. 1901 1902 @rtype: L{objects.Instance} 1903 @return: the updated instance object 1904 1905 """ 1906 if inst_uuid not in self._ConfigData().instances: 1907 raise errors.ConfigurationError("Unknown instance '%s'" % 1908 inst_uuid) 1909 instance = self._ConfigData().instances[inst_uuid] 1910 1911 if status is None: 1912 status = instance.admin_state 1913 if disks_active is None: 1914 disks_active = instance.disks_active 1915 if admin_state_source is None: 1916 admin_state_source = instance.admin_state_source 1917 1918 assert status in constants.ADMINST_ALL, \ 1919 "Invalid status '%s' passed to SetInstanceStatus" % (status,) 1920 1921 if instance.admin_state != status or \ 1922 instance.disks_active != disks_active or \ 1923 instance.admin_state_source != admin_state_source: 1924 instance.admin_state = status 1925 instance.disks_active = disks_active 1926 instance.admin_state_source = admin_state_source 1927 instance.serial_no += 1 1928 instance.mtime = time.time() 1929 return instance
1930 1931 @_ConfigSync()
1932 - def MarkInstanceUp(self, inst_uuid):
1933 """Mark the instance status to up in the config. 1934 1935 This also sets the instance disks active flag. 1936 1937 @rtype: L{objects.Instance} 1938 @return: the updated instance object 1939 1940 """ 1941 return self._SetInstanceStatus(inst_uuid, constants.ADMINST_UP, True, 1942 constants.ADMIN_SOURCE)
1943 1944 @_ConfigSync()
1945 - def MarkInstanceOffline(self, inst_uuid):
1946 """Mark the instance status to down in the config. 1947 1948 This also clears the instance disks active flag. 1949 1950 @rtype: L{objects.Instance} 1951 @return: the updated instance object 1952 1953 """ 1954 return self._SetInstanceStatus(inst_uuid, constants.ADMINST_OFFLINE, False, 1955 constants.ADMIN_SOURCE)
1956 1957 @_ConfigSync()
1958 - def RemoveInstance(self, inst_uuid):
1959 """Remove the instance from the configuration. 1960 1961 """ 1962 if inst_uuid not in self._ConfigData().instances: 1963 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 1964 1965 # If a network port has been allocated to the instance, 1966 # return it to the pool of free ports. 1967 inst = self._ConfigData().instances[inst_uuid] 1968 network_port = getattr(inst, "network_port", None) 1969 if network_port is not None: 1970 self._ConfigData().cluster.tcpudp_port_pool.add(network_port) 1971 1972 instance = self._UnlockedGetInstanceInfo(inst_uuid) 1973 1974 # FIXME: After RemoveInstance is moved to WConfd, use its internal 1975 # function from TempRes module. 1976 for nic in instance.nics: 1977 if nic.network and nic.ip: 1978 # Return all IP addresses to the respective address pools 1979 self._UnlockedCommitIp(constants.RELEASE_ACTION, nic.network, nic.ip) 1980 1981 del self._ConfigData().instances[inst_uuid] 1982 self._ConfigData().cluster.serial_no += 1
1983 1984 @_ConfigSync()
1985 - def RenameInstance(self, inst_uuid, new_name):
1986 """Rename an instance. 1987 1988 This needs to be done in ConfigWriter and not by RemoveInstance 1989 combined with AddInstance as only we can guarantee an atomic 1990 rename. 1991 1992 """ 1993 if inst_uuid not in self._ConfigData().instances: 1994 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 1995 1996 inst = self._ConfigData().instances[inst_uuid] 1997 inst.name = new_name 1998 1999 instance_disks = self._UnlockedGetInstanceDisks(inst_uuid) 2000 for (_, disk) in enumerate(instance_disks): 2001 if disk.dev_type in [constants.DT_FILE, constants.DT_SHARED_FILE]: 2002 # rename the file paths in logical and physical id 2003 file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1])) 2004 disk.logical_id = (disk.logical_id[0], 2005 utils.PathJoin(file_storage_dir, inst.name, 2006 os.path.basename(disk.logical_id[1]))) 2007 2008 # Force update of ssconf files 2009 self._ConfigData().cluster.serial_no += 1
2010 2011 @_ConfigSync()
2012 - def MarkInstanceDown(self, inst_uuid):
2013 """Mark the status of an instance to down in the configuration. 2014 2015 This does not touch the instance disks active flag, as shut down instances 2016 can still have active disks. 2017 2018 @rtype: L{objects.Instance} 2019 @return: the updated instance object 2020 2021 """ 2022 return self._SetInstanceStatus(inst_uuid, constants.ADMINST_DOWN, None, 2023 constants.ADMIN_SOURCE)
2024 2025 @_ConfigSync()
2026 - def MarkInstanceUserDown(self, inst_uuid):
2027 """Mark the status of an instance to user down in the configuration. 2028 2029 This does not touch the instance disks active flag, as user shut 2030 down instances can still have active disks. 2031 2032 """ 2033 2034 self._SetInstanceStatus(inst_uuid, constants.ADMINST_DOWN, None, 2035 constants.USER_SOURCE)
2036 2037 @_ConfigSync()
2038 - def MarkInstanceDisksActive(self, inst_uuid):
2039 """Mark the status of instance disks active. 2040 2041 @rtype: L{objects.Instance} 2042 @return: the updated instance object 2043 2044 """ 2045 return self._SetInstanceStatus(inst_uuid, None, True, None)
2046 2047 @_ConfigSync()
2048 - def MarkInstanceDisksInactive(self, inst_uuid):
2049 """Mark the status of instance disks inactive. 2050 2051 @rtype: L{objects.Instance} 2052 @return: the updated instance object 2053 2054 """ 2055 return self._SetInstanceStatus(inst_uuid, None, False, None)
2056
2057 - def _UnlockedGetInstanceList(self):
2058 """Get the list of instances. 2059 2060 This function is for internal use, when the config lock is already held. 2061 2062 """ 2063 return self._ConfigData().instances.keys()
2064 2065 @_ConfigSync(shared=1)
2066 - def GetInstanceList(self):
2067 """Get the list of instances. 2068 2069 @return: array of instances, ex. ['instance2-uuid', 'instance1-uuid'] 2070 2071 """ 2072 return self._UnlockedGetInstanceList()
2073
2074 - def ExpandInstanceName(self, short_name):
2075 """Attempt to expand an incomplete instance name. 2076 2077 """ 2078 # Locking is done in L{ConfigWriter.GetAllInstancesInfo} 2079 all_insts = self.GetAllInstancesInfo().values() 2080 expanded_name = _MatchNameComponentIgnoreCase( 2081 short_name, [inst.name for inst in all_insts]) 2082 2083 if expanded_name is not None: 2084 # there has to be exactly one instance with that name 2085 inst = (filter(lambda n: n.name == expanded_name, all_insts)[0]) 2086 return (inst.uuid, inst.name) 2087 else: 2088 return (None, None)
2089
2090 - def _UnlockedGetInstanceInfo(self, inst_uuid):
2091 """Returns information about an instance. 2092 2093 This function is for internal use, when the config lock is already held. 2094 2095 """ 2096 if inst_uuid not in self._ConfigData().instances: 2097 return None 2098 2099 return self._ConfigData().instances[inst_uuid]
2100 2101 @_ConfigSync(shared=1)
2102 - def GetInstanceInfo(self, inst_uuid):
2103 """Returns information about an instance. 2104 2105 It takes the information from the configuration file. Other information of 2106 an instance are taken from the live systems. 2107 2108 @param inst_uuid: UUID of the instance 2109 2110 @rtype: L{objects.Instance} 2111 @return: the instance object 2112 2113 """ 2114 return self._UnlockedGetInstanceInfo(inst_uuid)
2115 2116 @_ConfigSync(shared=1)
2117 - def GetInstanceNodeGroups(self, inst_uuid, primary_only=False):
2118 """Returns set of node group UUIDs for instance's nodes. 2119 2120 @rtype: frozenset 2121 2122 """ 2123 instance = self._UnlockedGetInstanceInfo(inst_uuid) 2124 if not instance: 2125 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 2126 2127 if primary_only: 2128 nodes = [instance.primary_node] 2129 else: 2130 nodes = self._UnlockedGetInstanceNodes(instance.uuid) 2131 2132 return frozenset(self._UnlockedGetNodeInfo(node_uuid).group 2133 for node_uuid in nodes)
2134 2135 @_ConfigSync(shared=1)
2136 - def GetInstanceNetworks(self, inst_uuid):
2137 """Returns set of network UUIDs for instance's nics. 2138 2139 @rtype: frozenset 2140 2141 """ 2142 instance = self._UnlockedGetInstanceInfo(inst_uuid) 2143 if not instance: 2144 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 2145 2146 networks = set() 2147 for nic in instance.nics: 2148 if nic.network: 2149 networks.add(nic.network) 2150 2151 return frozenset(networks)
2152 2153 @_ConfigSync(shared=1)
2154 - def GetMultiInstanceInfo(self, inst_uuids):
2155 """Get the configuration of multiple instances. 2156 2157 @param inst_uuids: list of instance UUIDs 2158 @rtype: list 2159 @return: list of tuples (instance UUID, instance_info), where 2160 instance_info is what would GetInstanceInfo return for the 2161 node, while keeping the original order 2162 2163 """ 2164 return [(uuid, self._UnlockedGetInstanceInfo(uuid)) for uuid in inst_uuids]
2165 2166 @_ConfigSync(shared=1)
2167 - def GetMultiInstanceInfoByName(self, inst_names):
2168 """Get the configuration of multiple instances. 2169 2170 @param inst_names: list of instance names 2171 @rtype: list 2172 @return: list of tuples (instance, instance_info), where 2173 instance_info is what would GetInstanceInfo return for the 2174 node, while keeping the original order 2175 2176 """ 2177 result = [] 2178 for name in inst_names: 2179 instance = self._UnlockedGetInstanceInfoByName(name) 2180 if instance: 2181 result.append((instance.uuid, instance)) 2182 else: 2183 raise errors.ConfigurationError("Instance data of instance '%s'" 2184 " not found." % name) 2185 return result
2186 2187 @_ConfigSync(shared=1)
2188 - def GetAllInstancesInfo(self):
2189 """Get the configuration of all instances. 2190 2191 @rtype: dict 2192 @return: dict of (instance, instance_info), where instance_info is what 2193 would GetInstanceInfo return for the node 2194 2195 """ 2196 return self._UnlockedGetAllInstancesInfo()
2197
2198 - def _UnlockedGetAllInstancesInfo(self):
2199 my_dict = dict([(inst_uuid, self._UnlockedGetInstanceInfo(inst_uuid)) 2200 for inst_uuid in self._UnlockedGetInstanceList()]) 2201 return my_dict
2202 2203 @_ConfigSync(shared=1)
2204 - def GetInstancesInfoByFilter(self, filter_fn):
2205 """Get instance configuration with a filter. 2206 2207 @type filter_fn: callable 2208 @param filter_fn: Filter function receiving instance object as parameter, 2209 returning boolean. Important: this function is called while the 2210 configuration locks is held. It must not do any complex work or call 2211 functions potentially leading to a deadlock. Ideally it doesn't call any 2212 other functions and just compares instance attributes. 2213 2214 """ 2215 return dict((uuid, inst) 2216 for (uuid, inst) in self._ConfigData().instances.items() 2217 if filter_fn(inst))
2218 2219 @_ConfigSync(shared=1)
2220 - def GetInstanceInfoByName(self, inst_name):
2221 """Get the L{objects.Instance} object for a named instance. 2222 2223 @param inst_name: name of the instance to get information for 2224 @type inst_name: string 2225 @return: the corresponding L{objects.Instance} instance or None if no 2226 information is available 2227 2228 """ 2229 return self._UnlockedGetInstanceInfoByName(inst_name)
2230
2231 - def _UnlockedGetInstanceInfoByName(self, inst_name):
2232 for inst in self._UnlockedGetAllInstancesInfo().values(): 2233 if inst.name == inst_name: 2234 return inst 2235 return None
2236
2237 - def _UnlockedGetInstanceName(self, inst_uuid):
2238 inst_info = self._UnlockedGetInstanceInfo(inst_uuid) 2239 if inst_info is None: 2240 raise errors.OpExecError("Unknown instance: %s" % inst_uuid) 2241 return inst_info.name
2242 2243 @_ConfigSync(shared=1)
2244 - def GetInstanceName(self, inst_uuid):
2245 """Gets the instance name for the passed instance. 2246 2247 @param inst_uuid: instance UUID to get name for 2248 @type inst_uuid: string 2249 @rtype: string 2250 @return: instance name 2251 2252 """ 2253 return self._UnlockedGetInstanceName(inst_uuid)
2254 2255 @_ConfigSync(shared=1)
2256 - def GetInstanceNames(self, inst_uuids):
2257 """Gets the instance names for the passed list of nodes. 2258 2259 @param inst_uuids: list of instance UUIDs to get names for 2260 @type inst_uuids: list of strings 2261 @rtype: list of strings 2262 @return: list of instance names 2263 2264 """ 2265 return self._UnlockedGetInstanceNames(inst_uuids)
2266 2267 @_ConfigSync()
2268 - def SetInstancePrimaryNode(self, inst_uuid, target_node_uuid):
2269 """Sets the primary node of an existing instance 2270 2271 @param inst_uuid: instance UUID 2272 @type inst_uuid: string 2273 @param target_node_uuid: the new primary node UUID 2274 @type target_node_uuid: string 2275 2276 """ 2277 self._UnlockedGetInstanceInfo(inst_uuid).primary_node = target_node_uuid
2278
2279 - def _UnlockedGetInstanceNames(self, inst_uuids):
2280 return [self._UnlockedGetInstanceName(uuid) for uuid in inst_uuids]
2281
2282 - def _UnlockedAddNode(self, node, ec_id):
2283 """Add a node to the configuration. 2284 2285 @type node: L{objects.Node} 2286 @param node: a Node instance 2287 2288 """ 2289 logging.info("Adding node %s to configuration", node.name) 2290 2291 self._EnsureUUID(node, ec_id) 2292 2293 node.serial_no = 1 2294 node.ctime = node.mtime = time.time() 2295 self._UnlockedAddNodeToGroup(node.uuid, node.group) 2296 assert node.uuid in self._ConfigData().nodegroups[node.group].members 2297 self._ConfigData().nodes[node.uuid] = node 2298 self._ConfigData().cluster.serial_no += 1
2299 2300 @_ConfigSync()
2301 - def AddNode(self, node, ec_id):
2302 """Add a node to the configuration. 2303 2304 @type node: L{objects.Node} 2305 @param node: a Node instance 2306 2307 """ 2308 self._UnlockedAddNode(node, ec_id)
2309 2310 @_ConfigSync()
2311 - def RemoveNode(self, node_uuid):
2312 """Remove a node from the configuration. 2313 2314 """ 2315 logging.info("Removing node %s from configuration", node_uuid) 2316 2317 if node_uuid not in self._ConfigData().nodes: 2318 raise errors.ConfigurationError("Unknown node '%s'" % node_uuid) 2319 2320 self._UnlockedRemoveNodeFromGroup(self._ConfigData().nodes[node_uuid]) 2321 del self._ConfigData().nodes[node_uuid] 2322 self._ConfigData().cluster.serial_no += 1
2323
2324 - def ExpandNodeName(self, short_name):
2325 """Attempt to expand an incomplete node name into a node UUID. 2326 2327 """ 2328 # Locking is done in L{ConfigWriter.GetAllNodesInfo} 2329 all_nodes = self.GetAllNodesInfo().values() 2330 expanded_name = _MatchNameComponentIgnoreCase( 2331 short_name, [node.name for node in all_nodes]) 2332 2333 if expanded_name is not None: 2334 # there has to be exactly one node with that name 2335 node = (filter(lambda n: n.name == expanded_name, all_nodes)[0]) 2336 return (node.uuid, node.name) 2337 else: 2338 return (None, None)
2339
2340 - def _UnlockedGetNodeInfo(self, node_uuid):
2341 """Get the configuration of a node, as stored in the config. 2342 2343 This function is for internal use, when the config lock is already 2344 held. 2345 2346 @param node_uuid: the node UUID 2347 2348 @rtype: L{objects.Node} 2349 @return: the node object 2350 2351 """ 2352 if node_uuid not in self._ConfigData().nodes: 2353 return None 2354 2355 return self._ConfigData().nodes[node_uuid]
2356 2357 @_ConfigSync(shared=1)
2358 - def GetNodeInfo(self, node_uuid):
2359 """Get the configuration of a node, as stored in the config. 2360 2361 This is just a locked wrapper over L{_UnlockedGetNodeInfo}. 2362 2363 @param node_uuid: the node UUID 2364 2365 @rtype: L{objects.Node} 2366 @return: the node object 2367 2368 """ 2369 return self._UnlockedGetNodeInfo(node_uuid)
2370 2371 @_ConfigSync(shared=1)
2372 - def GetNodeInstances(self, node_uuid):
2373 """Get the instances of a node, as stored in the config. 2374 2375 @param node_uuid: the node UUID 2376 2377 @rtype: (list, list) 2378 @return: a tuple with two lists: the primary and the secondary instances 2379 2380 """ 2381 pri = [] 2382 sec = [] 2383 for inst in self._ConfigData().instances.values(): 2384 if inst.primary_node == node_uuid: 2385 pri.append(inst.uuid) 2386 if node_uuid in self._UnlockedGetInstanceSecondaryNodes(inst.uuid): 2387 sec.append(inst.uuid) 2388 return (pri, sec)
2389 2390 @_ConfigSync(shared=1)
2391 - def GetNodeGroupInstances(self, uuid, primary_only=False):
2392 """Get the instances of a node group. 2393 2394 @param uuid: Node group UUID 2395 @param primary_only: Whether to only consider primary nodes 2396 @rtype: frozenset 2397 @return: List of instance UUIDs in node group 2398 2399 """ 2400 if primary_only: 2401 nodes_fn = lambda inst: [inst.primary_node] 2402 else: 2403 nodes_fn = lambda inst: self._UnlockedGetInstanceNodes(inst.uuid) 2404 2405 return frozenset(inst.uuid 2406 for inst in self._ConfigData().instances.values() 2407 for node_uuid in nodes_fn(inst) 2408 if self._UnlockedGetNodeInfo(node_uuid).group == uuid)
2409
2410 - def _UnlockedGetHvparamsString(self, hvname):
2411 """Return the string representation of the list of hyervisor parameters of 2412 the given hypervisor. 2413 2414 @see: C{GetHvparams} 2415 2416 """ 2417 result = "" 2418 hvparams = self._ConfigData().cluster.hvparams[hvname] 2419 for key in hvparams: 2420 result += "%s=%s\n" % (key, hvparams[key]) 2421 return result
2422 2423 @_ConfigSync(shared=1)
2424 - def GetHvparamsString(self, hvname):
2425 """Return the hypervisor parameters of the given hypervisor. 2426 2427 @type hvname: string 2428 @param hvname: name of a hypervisor 2429 @rtype: string 2430 @return: string containing key-value-pairs, one pair on each line; 2431 format: KEY=VALUE 2432 2433 """ 2434 return self._UnlockedGetHvparamsString(hvname)
2435
2436 - def _UnlockedGetNodeList(self):
2437 """Return the list of nodes which are in the configuration. 2438 2439 This function is for internal use, when the config lock is already 2440 held. 2441 2442 @rtype: list 2443 2444 """ 2445 return self._ConfigData().nodes.keys()
2446 2447 @_ConfigSync(shared=1)
2448 - def GetNodeList(self):
2449 """Return the list of nodes which are in the configuration. 2450 2451 """ 2452 return self._UnlockedGetNodeList()
2453
2454 - def _UnlockedGetOnlineNodeList(self):
2455 """Return the list of nodes which are online. 2456 2457 """ 2458 all_nodes = [self._UnlockedGetNodeInfo(node) 2459 for node in self._UnlockedGetNodeList()] 2460 return [node.uuid for node in all_nodes if not node.offline]
2461 2462 @_ConfigSync(shared=1)
2463 - def GetOnlineNodeList(self):
2464 """Return the list of nodes which are online. 2465 2466 """ 2467 return self._UnlockedGetOnlineNodeList()
2468 2469 @_ConfigSync(shared=1)
2470 - def GetVmCapableNodeList(self):
2471 """Return the list of nodes which are not vm capable. 2472 2473 """ 2474 all_nodes = [self._UnlockedGetNodeInfo(node) 2475 for node in self._UnlockedGetNodeList()] 2476 return [node.uuid for node in all_nodes if node.vm_capable]
2477 2478 @_ConfigSync(shared=1)
2479 - def GetNonVmCapableNodeList(self):
2480 """Return the list of nodes' uuids which are not vm capable. 2481 2482 """ 2483 all_nodes = [self._UnlockedGetNodeInfo(node) 2484 for node in self._UnlockedGetNodeList()] 2485 return [node.uuid for node in all_nodes if not node.vm_capable]
2486 2487 @_ConfigSync(shared=1)
2488 - def GetNonVmCapableNodeNameList(self):
2489 """Return the list of nodes' names which are not vm capable. 2490 2491 """ 2492 all_nodes = [self._UnlockedGetNodeInfo(node) 2493 for node in self._UnlockedGetNodeList()] 2494 return [node.name for node in all_nodes if not node.vm_capable]
2495 2496 @_ConfigSync(shared=1)
2497 - def GetMultiNodeInfo(self, node_uuids):
2498 """Get the configuration of multiple nodes. 2499 2500 @param node_uuids: list of node UUIDs 2501 @rtype: list 2502 @return: list of tuples of (node, node_info), where node_info is 2503 what would GetNodeInfo return for the node, in the original 2504 order 2505 2506 """ 2507 return [(uuid, self._UnlockedGetNodeInfo(uuid)) for uuid in node_uuids]
2508
2509 - def _UnlockedGetAllNodesInfo(self):
2510 """Gets configuration of all nodes. 2511 2512 @note: See L{GetAllNodesInfo} 2513 2514 """ 2515 return dict([(node_uuid, self._UnlockedGetNodeInfo(node_uuid)) 2516 for node_uuid in self._UnlockedGetNodeList()])
2517 2518 @_ConfigSync(shared=1)
2519 - def GetAllNodesInfo(self):
2520 """Get the configuration of all nodes. 2521 2522 @rtype: dict 2523 @return: dict of (node, node_info), where node_info is what 2524 would GetNodeInfo return for the node 2525 2526 """ 2527 return self._UnlockedGetAllNodesInfo()
2528
2529 - def _UnlockedGetNodeInfoByName(self, node_name):
2530 for node in self._UnlockedGetAllNodesInfo().values(): 2531 if node.name == node_name: 2532 return node 2533 return None
2534 2535 @_ConfigSync(shared=1)
2536 - def GetNodeInfoByName(self, node_name):
2537 """Get the L{objects.Node} object for a named node. 2538 2539 @param node_name: name of the node to get information for 2540 @type node_name: string 2541 @return: the corresponding L{objects.Node} instance or None if no 2542 information is available 2543 2544 """ 2545 return self._UnlockedGetNodeInfoByName(node_name)
2546 2547 @_ConfigSync(shared=1)
2548 - def GetNodeGroupInfoByName(self, nodegroup_name):
2549 """Get the L{objects.NodeGroup} object for a named node group. 2550 2551 @param nodegroup_name: name of the node group to get information for 2552 @type nodegroup_name: string 2553 @return: the corresponding L{objects.NodeGroup} instance or None if no 2554 information is available 2555 2556 """ 2557 for nodegroup in self._UnlockedGetAllNodeGroupsInfo().values(): 2558 if nodegroup.name == nodegroup_name: 2559 return nodegroup 2560 return None
2561
2562 - def _UnlockedGetNodeName(self, node_spec):
2563 if isinstance(node_spec, objects.Node): 2564 return node_spec.name 2565 elif isinstance(node_spec, basestring): 2566 node_info = self._UnlockedGetNodeInfo(node_spec) 2567 if node_info is None: 2568 raise errors.OpExecError("Unknown node: %s" % node_spec) 2569 return node_info.name 2570 else: 2571 raise errors.ProgrammerError("Can't handle node spec '%s'" % node_spec)
2572 2573 @_ConfigSync(shared=1)
2574 - def GetNodeName(self, node_spec):
2575 """Gets the node name for the passed node. 2576 2577 @param node_spec: node to get names for 2578 @type node_spec: either node UUID or a L{objects.Node} object 2579 @rtype: string 2580 @return: node name 2581 2582 """ 2583 return self._UnlockedGetNodeName(node_spec)
2584
2585 - def _UnlockedGetNodeNames(self, node_specs):
2586 return [self._UnlockedGetNodeName(node_spec) for node_spec in node_specs]
2587 2588 @_ConfigSync(shared=1)
2589 - def GetNodeNames(self, node_specs):
2590 """Gets the node names for the passed list of nodes. 2591 2592 @param node_specs: list of nodes to get names for 2593 @type node_specs: list of either node UUIDs or L{objects.Node} objects 2594 @rtype: list of strings 2595 @return: list of node names 2596 2597 """ 2598 return self._UnlockedGetNodeNames(node_specs)
2599 2600 @_ConfigSync(shared=1)
2601 - def GetNodeGroupsFromNodes(self, node_uuids):
2602 """Returns groups for a list of nodes. 2603 2604 @type node_uuids: list of string 2605 @param node_uuids: List of node UUIDs 2606 @rtype: frozenset 2607 2608 """ 2609 return frozenset(self._UnlockedGetNodeInfo(uuid).group 2610 for uuid in node_uuids)
2611
2612 - def _UnlockedGetMasterCandidateUuids(self):
2613 """Get the list of UUIDs of master candidates. 2614 2615 @rtype: list of strings 2616 @return: list of UUIDs of all master candidates. 2617 2618 """ 2619 return [node.uuid for node in self._ConfigData().nodes.values() 2620 if node.master_candidate]
2621 2622 @_ConfigSync(shared=1)
2623 - def GetMasterCandidateUuids(self):
2624 """Get the list of UUIDs of master candidates. 2625 2626 @rtype: list of strings 2627 @return: list of UUIDs of all master candidates. 2628 2629 """ 2630 return self._UnlockedGetMasterCandidateUuids()
2631
2632 - def _UnlockedGetMasterCandidateStats(self, exceptions=None):
2633 """Get the number of current and maximum desired and possible candidates. 2634 2635 @type exceptions: list 2636 @param exceptions: if passed, list of nodes that should be ignored 2637 @rtype: tuple 2638 @return: tuple of (current, desired and possible, possible) 2639 2640 """ 2641 mc_now = mc_should = mc_max = 0 2642 for node in self._ConfigData().nodes.values(): 2643 if exceptions and node.uuid in exceptions: 2644 continue 2645 if not (node.offline or node.drained) and node.master_capable: 2646 mc_max += 1 2647 if node.master_candidate: 2648 mc_now += 1 2649 mc_should = min(mc_max, self._ConfigData().cluster.candidate_pool_size) 2650 return (mc_now, mc_should, mc_max)
2651 2652 @_ConfigSync(shared=1)
2653 - def GetMasterCandidateStats(self, exceptions=None):
2654 """Get the number of current and maximum possible candidates. 2655 2656 This is just a wrapper over L{_UnlockedGetMasterCandidateStats}. 2657 2658 @type exceptions: list 2659 @param exceptions: if passed, list of nodes that should be ignored 2660 @rtype: tuple 2661 @return: tuple of (current, max) 2662 2663 """ 2664 return self._UnlockedGetMasterCandidateStats(exceptions)
2665 2666 @_ConfigSync()
2667 - def MaintainCandidatePool(self, exception_node_uuids):
2668 """Try to grow the candidate pool to the desired size. 2669 2670 @type exception_node_uuids: list 2671 @param exception_node_uuids: if passed, list of nodes that should be ignored 2672 @rtype: list 2673 @return: list with the adjusted nodes (L{objects.Node} instances) 2674 2675 """ 2676 mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats( 2677 exception_node_uuids) 2678 mod_list = [] 2679 if mc_now < mc_max: 2680 node_list = self._ConfigData().nodes.keys() 2681 random.shuffle(node_list) 2682 for uuid in node_list: 2683 if mc_now >= mc_max: 2684 break 2685 node = self._ConfigData().nodes[uuid] 2686 if (node.master_candidate or node.offline or node.drained or 2687 node.uuid in exception_node_uuids or not node.master_capable): 2688 continue 2689 mod_list.append(node) 2690 node.master_candidate = True 2691 node.serial_no += 1 2692 mc_now += 1 2693 if mc_now != mc_max: 2694 # this should not happen 2695 logging.warning("Warning: MaintainCandidatePool didn't manage to" 2696 " fill the candidate pool (%d/%d)", mc_now, mc_max) 2697 if mod_list: 2698 self._ConfigData().cluster.serial_no += 1 2699 2700 return mod_list
2701
2702 - def _UnlockedAddNodeToGroup(self, node_uuid, nodegroup_uuid):
2703 """Add a given node to the specified group. 2704 2705 """ 2706 if nodegroup_uuid not in self._ConfigData().nodegroups: 2707 # This can happen if a node group gets deleted between its lookup and 2708 # when we're adding the first node to it, since we don't keep a lock in 2709 # the meantime. It's ok though, as we'll fail cleanly if the node group 2710 # is not found anymore. 2711 raise errors.OpExecError("Unknown node group: %s" % nodegroup_uuid) 2712 if node_uuid not in self._ConfigData().nodegroups[nodegroup_uuid].members: 2713 self._ConfigData().nodegroups[nodegroup_uuid].members.append(node_uuid)
2714
2715 - def _UnlockedRemoveNodeFromGroup(self, node):
2716 """Remove a given node from its group. 2717 2718 """ 2719 nodegroup = node.group 2720 if nodegroup not in self._ConfigData().nodegroups: 2721 logging.warning("Warning: node '%s' has unknown node group '%s'" 2722 " (while being removed from it)", node.uuid, nodegroup) 2723 nodegroup_obj = self._ConfigData().nodegroups[nodegroup] 2724 if node.uuid not in nodegroup_obj.members: 2725 logging.warning("Warning: node '%s' not a member of its node group '%s'" 2726 " (while being removed from it)", node.uuid, nodegroup) 2727 else: 2728 nodegroup_obj.members.remove(node.uuid)
2729 2730 @_ConfigSync()
2731 - def AssignGroupNodes(self, mods):
2732 """Changes the group of a number of nodes. 2733 2734 @type mods: list of tuples; (node name, new group UUID) 2735 @param mods: Node membership modifications 2736 2737 """ 2738 groups = self._ConfigData().nodegroups 2739 nodes = self._ConfigData().nodes 2740 2741 resmod = [] 2742 2743 # Try to resolve UUIDs first 2744 for (node_uuid, new_group_uuid) in mods: 2745 try: 2746 node = nodes[node_uuid] 2747 except KeyError: 2748 raise errors.ConfigurationError("Unable to find node '%s'" % node_uuid) 2749 2750 if node.group == new_group_uuid: 2751 # Node is being assigned to its current group 2752 logging.debug("Node '%s' was assigned to its current group (%s)", 2753 node_uuid, node.group) 2754 continue 2755 2756 # Try to find current group of node 2757 try: 2758 old_group = groups[node.group] 2759 except KeyError: 2760 raise errors.ConfigurationError("Unable to find old group '%s'" % 2761 node.group) 2762 2763 # Try to find new group for node 2764 try: 2765 new_group = groups[new_group_uuid] 2766 except KeyError: 2767 raise errors.ConfigurationError("Unable to find new group '%s'" % 2768 new_group_uuid) 2769 2770 assert node.uuid in old_group.members, \ 2771 ("Inconsistent configuration: node '%s' not listed in members for its" 2772 " old group '%s'" % (node.uuid, old_group.uuid)) 2773 assert node.uuid not in new_group.members, \ 2774 ("Inconsistent configuration: node '%s' already listed in members for" 2775 " its new group '%s'" % (node.uuid, new_group.uuid)) 2776 2777 resmod.append((node, old_group, new_group)) 2778 2779 # Apply changes 2780 for (node, old_group, new_group) in resmod: 2781 assert node.uuid != new_group.uuid and old_group.uuid != new_group.uuid, \ 2782 "Assigning to current group is not possible" 2783 2784 node.group = new_group.uuid 2785 2786 # Update members of involved groups 2787 if node.uuid in old_group.members: 2788 old_group.members.remove(node.uuid) 2789 if node.uuid not in new_group.members: 2790 new_group.members.append(node.uuid) 2791 2792 # Update timestamps and serials (only once per node/group object) 2793 now = time.time() 2794 for obj in frozenset(itertools.chain(*resmod)): # pylint: disable=W0142 2795 obj.serial_no += 1 2796 obj.mtime = now 2797 2798 # Force ssconf update 2799 self._ConfigData().cluster.serial_no += 1
2800
2801 - def _BumpSerialNo(self):
2802 """Bump up the serial number of the config. 2803 2804 """ 2805 self._ConfigData().serial_no += 1 2806 self._ConfigData().mtime = time.time()
2807
2808 - def _AllUUIDObjects(self):
2809 """Returns all objects with uuid attributes. 2810 2811 """ 2812 return (self._ConfigData().instances.values() + 2813 self._ConfigData().nodes.values() + 2814 self._ConfigData().nodegroups.values() + 2815 self._ConfigData().networks.values() + 2816 self._ConfigData().disks.values() + 2817 self._AllNICs() + 2818 [self._ConfigData().cluster])
2819
2820 - def GetConfigManager(self, shared=False):
2821 """Returns a ConfigManager, which is suitable to perform a synchronized 2822 block of configuration operations. 2823 2824 WARNING: This blocks all other configuration operations, so anything that 2825 runs inside the block should be very fast, preferably not using any IO. 2826 """ 2827 2828 return ConfigManager(self, shared)
2829
2830 - def _AddLockCount(self, count):
2831 self._lock_count += count 2832 return self._lock_count
2833
2834 - def _LockCount(self):
2835 return self._lock_count
2836
2837 - def _OpenConfig(self, shared):
2838 """Read the config data from WConfd or disk. 2839 2840 """ 2841 if self._AddLockCount(1) > 1: 2842 if self._lock_current_shared and not shared: 2843 self._AddLockCount(-1) 2844 raise errors.ConfigurationError("Can't request an exclusive" 2845 " configuration lock while holding" 2846 " shared") 2847 else: 2848 return # we already have the lock, do nothing 2849 else: 2850 self._lock_current_shared = shared 2851 # Read the configuration data. If offline, read the file directly. 2852 # If online, call WConfd. 2853 if self._offline: 2854 try: 2855 raw_data = utils.ReadFile(self._cfg_file) 2856 data_dict = serializer.Load(raw_data) 2857 # Make sure the configuration has the right version 2858 _ValidateConfig(data_dict) 2859 data = objects.ConfigData.FromDict(data_dict) 2860 except errors.ConfigVersionMismatch: 2861 raise 2862 except Exception, err: 2863 raise errors.ConfigurationError(err) 2864 2865 self._cfg_id = utils.GetFileID(path=self._cfg_file) 2866 2867 if (not hasattr(data, "cluster") or 2868 not hasattr(data.cluster, "rsahostkeypub")): 2869 raise errors.ConfigurationError("Incomplete configuration" 2870 " (missing cluster.rsahostkeypub)") 2871 2872 if not data.cluster.master_node in data.nodes: 2873 msg = ("The configuration denotes node %s as master, but does not" 2874 " contain information about this node" % 2875 data.cluster.master_node) 2876 raise errors.ConfigurationError(msg) 2877 2878 master_info = data.nodes[data.cluster.master_node] 2879 if master_info.name != self._my_hostname and not self._accept_foreign: 2880 msg = ("The configuration denotes node %s as master, while my" 2881 " hostname is %s; opening a foreign configuration is only" 2882 " possible in accept_foreign mode" % 2883 (master_info.name, self._my_hostname)) 2884 raise errors.ConfigurationError(msg) 2885 2886 self._SetConfigData(data) 2887 2888 # Upgrade configuration if needed 2889 self._UpgradeConfig(saveafter=True) 2890 else: 2891 if shared: 2892 if self._config_data is None: 2893 logging.debug("Requesting config, as I have no up-to-date copy") 2894 dict_data = self._wconfd.ReadConfig() 2895 else: 2896 logging.debug("My config copy is up to date.") 2897 dict_data = None 2898 else: 2899 # poll until we acquire the lock 2900 while True: 2901 dict_data = \ 2902 self._wconfd.LockConfig(self._GetWConfdContext(), bool(shared)) 2903 logging.debug("Received config from WConfd.LockConfig [shared=%s]", 2904 bool(shared)) 2905 if dict_data is not None: 2906 break 2907 time.sleep(random.random()) 2908 2909 try: 2910 if dict_data is not None: 2911 self._SetConfigData(objects.ConfigData.FromDict(dict_data)) 2912 self._UpgradeConfig() 2913 except Exception, err: 2914 raise errors.ConfigurationError(err)
2915
2916 - def _CloseConfig(self, save):
2917 """Release resources relating the config data. 2918 2919 """ 2920 if self._AddLockCount(-1) > 0: 2921 return # we still have the lock, do nothing 2922 try: 2923 if save: 2924 self._WriteConfig() 2925 except Exception, err: 2926 logging.critical("Can't write the configuration: %s", str(err)) 2927 raise 2928 finally: 2929 if not self._offline and not self._lock_current_shared: 2930 try: 2931 self._wconfd.UnlockConfig(self._GetWConfdContext()) 2932 except AttributeError: 2933 # If the configuration hasn't been initialized yet, just ignore it. 2934 pass 2935 logging.debug("Configuration in WConfd unlocked")
2936 2937 # TODO: To WConfd
2938 - def _UpgradeConfig(self, saveafter=False):
2939 """Run any upgrade steps. 2940 2941 This method performs both in-object upgrades and also update some data 2942 elements that need uniqueness across the whole configuration or interact 2943 with other objects. 2944 2945 @warning: if 'saveafter' is 'True', this function will call 2946 L{_WriteConfig()} so it needs to be called only from a 2947 "safe" place. 2948 2949 """ 2950 # Keep a copy of the persistent part of _config_data to check for changes 2951 # Serialization doesn't guarantee order in dictionaries 2952 oldconf = copy.deepcopy(self._ConfigData().ToDict()) 2953 2954 # In-object upgrades 2955 self._ConfigData().UpgradeConfig() 2956 2957 for item in self._AllUUIDObjects(): 2958 if item.uuid is None: 2959 item.uuid = self._GenerateUniqueID(_UPGRADE_CONFIG_JID) 2960 if not self._ConfigData().nodegroups: 2961 default_nodegroup_name = constants.INITIAL_NODE_GROUP_NAME 2962 default_nodegroup = objects.NodeGroup(name=default_nodegroup_name, 2963 members=[]) 2964 self._UnlockedAddNodeGroup(default_nodegroup, _UPGRADE_CONFIG_JID, True) 2965 for node in self._ConfigData().nodes.values(): 2966 if not node.group: 2967 node.group = self._UnlockedLookupNodeGroup(None) 2968 # This is technically *not* an upgrade, but needs to be done both when 2969 # nodegroups are being added, and upon normally loading the config, 2970 # because the members list of a node group is discarded upon 2971 # serializing/deserializing the object. 2972 self._UnlockedAddNodeToGroup(node.uuid, node.group) 2973 2974 modified = (oldconf != self._ConfigData().ToDict()) 2975 if modified and saveafter: 2976 self._WriteConfig() 2977 self._UnlockedDropECReservations(_UPGRADE_CONFIG_JID) 2978 else: 2979 if self._offline: 2980 self._UnlockedVerifyConfigAndLog()
2981
2982 - def _WriteConfig(self, destination=None):
2983 """Write the configuration data to persistent storage. 2984 2985 """ 2986 if destination is None: 2987 destination = self._cfg_file 2988 2989 # Save the configuration data. If offline, write the file directly. 2990 # If online, call WConfd. 2991 if self._offline: 2992 self._BumpSerialNo() 2993 txt = serializer.DumpJson( 2994 self._ConfigData().ToDict(_with_private=True), 2995 private_encoder=serializer.EncodeWithPrivateFields 2996 ) 2997 2998 getents = self._getents() 2999 try: 3000 fd = utils.SafeWriteFile(destination, self._cfg_id, data=txt, 3001 close=False, gid=getents.confd_gid, mode=0640) 3002 except errors.LockError: 3003 raise errors.ConfigurationError("The configuration file has been" 3004 " modified since the last write, cannot" 3005 " update") 3006 try: 3007 self._cfg_id = utils.GetFileID(fd=fd) 3008 finally: 3009 os.close(fd) 3010 else: 3011 try: 3012 self._wconfd.WriteConfig(self._GetWConfdContext(), 3013 self._ConfigData().ToDict()) 3014 except errors.LockError: 3015 raise errors.ConfigurationError("The configuration file has been" 3016 " modified since the last write, cannot" 3017 " update") 3018 3019 self.write_count += 1
3020
3021 - def _GetAllHvparamsStrings(self, hypervisors):
3022 """Get the hvparams of all given hypervisors from the config. 3023 3024 @type hypervisors: list of string 3025 @param hypervisors: list of hypervisor names 3026 @rtype: dict of strings 3027 @returns: dictionary mapping the hypervisor name to a string representation 3028 of the hypervisor's hvparams 3029 3030 """ 3031 hvparams = {} 3032 for hv in hypervisors: 3033 hvparams[hv] = self._UnlockedGetHvparamsString(hv) 3034 return hvparams
3035 3036 @staticmethod
3037 - def _ExtendByAllHvparamsStrings(ssconf_values, all_hvparams):
3038 """Extends the ssconf_values dictionary by hvparams. 3039 3040 @type ssconf_values: dict of strings 3041 @param ssconf_values: dictionary mapping ssconf_keys to strings 3042 representing the content of ssconf files 3043 @type all_hvparams: dict of strings 3044 @param all_hvparams: dictionary mapping hypervisor names to a string 3045 representation of their hvparams 3046 @rtype: same as ssconf_values 3047 @returns: the ssconf_values dictionary extended by hvparams 3048 3049 """ 3050 for hv in all_hvparams: 3051 ssconf_key = constants.SS_HVPARAMS_PREF + hv 3052 ssconf_values[ssconf_key] = all_hvparams[hv] 3053 return ssconf_values
3054
3055 - def _UnlockedGetSshPortMap(self, node_infos):
3056 node_ports = dict([(node.name, 3057 self._UnlockedGetNdParams(node).get( 3058 constants.ND_SSH_PORT)) 3059 for node in node_infos]) 3060 return node_ports
3061
3062 - def _UnlockedGetSsconfValues(self):
3063 """Return the values needed by ssconf. 3064 3065 @rtype: dict 3066 @return: a dictionary with keys the ssconf names and values their 3067 associated value 3068 3069 """ 3070 fn = "\n".join 3071 instance_names = utils.NiceSort( 3072 [inst.name for inst in 3073 self._UnlockedGetAllInstancesInfo().values()]) 3074 node_infos = self._UnlockedGetAllNodesInfo().values() 3075 node_names = [node.name for node in node_infos] 3076 node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip) 3077 for ninfo in node_infos] 3078 node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip) 3079 for ninfo in node_infos] 3080 node_vm_capable = ["%s=%s" % (ninfo.name, str(ninfo.vm_capable)) 3081 for ninfo in node_infos] 3082 3083 instance_data = fn(instance_names) 3084 off_data = fn(node.name for node in node_infos if node.offline) 3085 on_data = fn(node.name for node in node_infos if not node.offline) 3086 mc_data = fn(node.name for node in node_infos if node.master_candidate) 3087 mc_ips_data = fn(node.primary_ip for node in node_infos 3088 if node.master_candidate) 3089 node_data = fn(node_names) 3090 node_pri_ips_data = fn(node_pri_ips) 3091 node_snd_ips_data = fn(node_snd_ips) 3092 node_vm_capable_data = fn(node_vm_capable) 3093 3094 cluster = self._ConfigData().cluster 3095 cluster_tags = fn(cluster.GetTags()) 3096 3097 master_candidates_certs = fn("%s=%s" % (mc_uuid, mc_cert) 3098 for mc_uuid, mc_cert 3099 in cluster.candidate_certs.items()) 3100 3101 hypervisor_list = fn(cluster.enabled_hypervisors) 3102 all_hvparams = self._GetAllHvparamsStrings(constants.HYPER_TYPES) 3103 3104 uid_pool = uidpool.FormatUidPool(cluster.uid_pool, separator="\n") 3105 3106 nodegroups = ["%s %s" % (nodegroup.uuid, nodegroup.name) for nodegroup in 3107 self._ConfigData().nodegroups.values()] 3108 nodegroups_data = fn(utils.NiceSort(nodegroups)) 3109 networks = ["%s %s" % (net.uuid, net.name) for net in 3110 self._ConfigData().networks.values()] 3111 networks_data = fn(utils.NiceSort(networks)) 3112 3113 ssh_ports = fn("%s=%s" % (node_name, port) 3114 for node_name, port 3115 in self._UnlockedGetSshPortMap(node_infos).items()) 3116 3117 ssconf_values = { 3118 constants.SS_CLUSTER_NAME: cluster.cluster_name, 3119 constants.SS_CLUSTER_TAGS: cluster_tags, 3120 constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir, 3121 constants.SS_SHARED_FILE_STORAGE_DIR: cluster.shared_file_storage_dir, 3122 constants.SS_GLUSTER_STORAGE_DIR: cluster.gluster_storage_dir, 3123 constants.SS_MASTER_CANDIDATES: mc_data, 3124 constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data, 3125 constants.SS_MASTER_CANDIDATES_CERTS: master_candidates_certs, 3126 constants.SS_MASTER_IP: cluster.master_ip, 3127 constants.SS_MASTER_NETDEV: cluster.master_netdev, 3128 constants.SS_MASTER_NETMASK: str(cluster.master_netmask), 3129 constants.SS_MASTER_NODE: self._UnlockedGetNodeName(cluster.master_node), 3130 constants.SS_NODE_LIST: node_data, 3131 constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data, 3132 constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data, 3133 constants.SS_NODE_VM_CAPABLE: node_vm_capable_data, 3134 constants.SS_OFFLINE_NODES: off_data, 3135 constants.SS_ONLINE_NODES: on_data, 3136 constants.SS_PRIMARY_IP_FAMILY: str(cluster.primary_ip_family), 3137 constants.SS_INSTANCE_LIST: instance_data, 3138 constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION, 3139 constants.SS_HYPERVISOR_LIST: hypervisor_list, 3140 constants.SS_MAINTAIN_NODE_HEALTH: str(cluster.maintain_node_health), 3141 constants.SS_UID_POOL: uid_pool, 3142 constants.SS_NODEGROUPS: nodegroups_data, 3143 constants.SS_NETWORKS: networks_data, 3144 constants.SS_ENABLED_USER_SHUTDOWN: str(cluster.enabled_user_shutdown), 3145 constants.SS_SSH_PORTS: ssh_ports, 3146 } 3147 ssconf_values = self._ExtendByAllHvparamsStrings(ssconf_values, 3148 all_hvparams) 3149 bad_values = [(k, v) for k, v in ssconf_values.items() 3150 if not isinstance(v, (str, basestring))] 3151 if bad_values: 3152 err = utils.CommaJoin("%s=%s" % (k, v) for k, v in bad_values) 3153 raise errors.ConfigurationError("Some ssconf key(s) have non-string" 3154 " values: %s" % err) 3155 return ssconf_values
3156 3157 @_ConfigSync(shared=1)
3158 - def GetSsconfValues(self):
3159 """Wrapper using lock around _UnlockedGetSsconf(). 3160 3161 """ 3162 return self._UnlockedGetSsconfValues()
3163 3164 @_ConfigSync(shared=1)
3165 - def GetVGName(self):
3166 """Return the volume group name. 3167 3168 """ 3169 return self._ConfigData().cluster.volume_group_name
3170 3171 @_ConfigSync()
3172 - def SetVGName(self, vg_name):
3173 """Set the volume group name. 3174 3175 """ 3176 self._ConfigData().cluster.volume_group_name = vg_name 3177 self._ConfigData().cluster.serial_no += 1
3178 3179 @_ConfigSync(shared=1)
3180 - def GetDRBDHelper(self):
3181 """Return DRBD usermode helper. 3182 3183 """ 3184 return self._ConfigData().cluster.drbd_usermode_helper
3185 3186 @_ConfigSync()
3187 - def SetDRBDHelper(self, drbd_helper):
3188 """Set DRBD usermode helper. 3189 3190 """ 3191 self._ConfigData().cluster.drbd_usermode_helper = drbd_helper 3192 self._ConfigData().cluster.serial_no += 1
3193 3194 @_ConfigSync(shared=1)
3195 - def GetMACPrefix(self):
3196 """Return the mac prefix. 3197 3198 """ 3199 return self._ConfigData().cluster.mac_prefix
3200 3201 @_ConfigSync(shared=1)
3202 - def GetClusterInfo(self):
3203 """Returns information about the cluster 3204 3205 @rtype: L{objects.Cluster} 3206 @return: the cluster object 3207 3208 """ 3209 return self._ConfigData().cluster
3210 3211 @_ConfigSync(shared=1)
3212 - def HasAnyDiskOfType(self, dev_type):
3213 """Check if in there is at disk of the given type in the configuration. 3214 3215 """ 3216 return self._ConfigData().HasAnyDiskOfType(dev_type)
3217 3218 @_ConfigSync(shared=1)
3219 - def GetDetachedConfig(self):
3220 """Returns a detached version of a ConfigManager, which represents 3221 a read-only snapshot of the configuration at this particular time. 3222 3223 """ 3224 return DetachedConfig(self._ConfigData())
3225 3226 @_ConfigSync()
3227 - def Update(self, target, feedback_fn, ec_id=None):
3228 """Notify function to be called after updates. 3229 3230 This function must be called when an object (as returned by 3231 GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the 3232 caller wants the modifications saved to the backing store. Note 3233 that all modified objects will be saved, but the target argument 3234 is the one the caller wants to ensure that it's saved. 3235 3236 @param target: an instance of either L{objects.Cluster}, 3237 L{objects.Node} or L{objects.Instance} which is existing in 3238 the cluster 3239 @param feedback_fn: Callable feedback function 3240 3241 """ 3242 if self._ConfigData() is None: 3243 raise errors.ProgrammerError("Configuration file not read," 3244 " cannot save.") 3245 3246 def check_serial(target, current): 3247 if current is None: 3248 raise errors.ConfigurationError("Configuration object unknown") 3249 elif current.serial_no != target.serial_no: 3250 raise errors.ConfigurationError("Configuration object updated since" 3251 " it has been read: %d != %d", 3252 current.serial_no, target.serial_no)
3253 3254 def replace_in(target, tdict): 3255 check_serial(target, tdict.get(target.uuid)) 3256 tdict[target.uuid] = target 3257 3258 update_serial = False 3259 if isinstance(target, objects.Cluster): 3260 check_serial(target, self._ConfigData().cluster) 3261 self._ConfigData().cluster = target 3262 elif isinstance(target, objects.Node): 3263 replace_in(target, self._ConfigData().nodes) 3264 update_serial = True 3265 elif isinstance(target, objects.Instance): 3266 replace_in(target, self._ConfigData().instances) 3267 elif isinstance(target, objects.NodeGroup): 3268 replace_in(target, self._ConfigData().nodegroups) 3269 elif isinstance(target, objects.Network): 3270 replace_in(target, self._ConfigData().networks) 3271 elif isinstance(target, objects.Disk): 3272 replace_in(target, self._ConfigData().disks) 3273 else: 3274 raise errors.ProgrammerError("Invalid object type (%s) passed to" 3275 " ConfigWriter.Update" % type(target)) 3276 target.serial_no += 1 3277 target.mtime = now = time.time() 3278 3279 if update_serial: 3280 # for node updates, we need to increase the cluster serial too 3281 self._ConfigData().cluster.serial_no += 1 3282 self._ConfigData().cluster.mtime = now 3283 3284 if isinstance(target, objects.Instance): 3285 self._UnlockedReleaseDRBDMinors(target.uuid) 3286 3287 if ec_id is not None: 3288 # Commit all ips reserved by OpInstanceSetParams and OpGroupSetParams 3289 # FIXME: After RemoveInstance is moved to WConfd, use its internal 3290 # functions from TempRes module. 3291 self._UnlockedCommitTemporaryIps(ec_id) 3292 3293 # Just verify the configuration with our feedback function. 3294 # It will get written automatically by the decorator. 3295 self._UnlockedVerifyConfigAndLog(feedback_fn=feedback_fn) 3296
3297 - def _UnlockedDropECReservations(self, _ec_id):
3298 """Drop per-execution-context reservations 3299 3300 """ 3301 # FIXME: Remove the following two lines after all reservations are moved to 3302 # wconfd. 3303 for rm in self._all_rms: 3304 rm.DropECReservations(_ec_id) 3305 if not self._offline: 3306 self._wconfd.DropAllReservations(self._GetWConfdContext())
3307
3308 - def DropECReservations(self, ec_id):
3309 self._UnlockedDropECReservations(ec_id)
3310 3311 @_ConfigSync(shared=1)
3312 - def GetAllNetworksInfo(self):
3313 """Get configuration info of all the networks. 3314 3315 """ 3316 return dict(self._ConfigData().networks)
3317
3318 - def _UnlockedGetNetworkList(self):
3319 """Get the list of networks. 3320 3321 This function is for internal use, when the config lock is already held. 3322 3323 """ 3324 return self._ConfigData().networks.keys()
3325 3326 @_ConfigSync(shared=1)
3327 - def GetNetworkList(self):
3328 """Get the list of networks. 3329 3330 @return: array of networks, ex. ["main", "vlan100", "200] 3331 3332 """ 3333 return self._UnlockedGetNetworkList()
3334 3335 @_ConfigSync(shared=1)
3336 - def GetNetworkNames(self):
3337 """Get a list of network names 3338 3339 """ 3340 names = [net.name 3341 for net in self._ConfigData().networks.values()] 3342 return names
3343
3344 - def _UnlockedGetNetwork(self, uuid):
3345 """Returns information about a network. 3346 3347 This function is for internal use, when the config lock is already held. 3348 3349 """ 3350 if uuid not in self._ConfigData().networks: 3351 return None 3352 3353 return self._ConfigData().networks[uuid]
3354 3355 @_ConfigSync(shared=1)
3356 - def GetNetwork(self, uuid):
3357 """Returns information about a network. 3358 3359 It takes the information from the configuration file. 3360 3361 @param uuid: UUID of the network 3362 3363 @rtype: L{objects.Network} 3364 @return: the network object 3365 3366 """ 3367 return self._UnlockedGetNetwork(uuid)
3368 3369 @_ConfigSync()
3370 - def AddNetwork(self, net, ec_id, check_uuid=True):
3371 """Add a network to the configuration. 3372 3373 @type net: L{objects.Network} 3374 @param net: the Network object to add 3375 @type ec_id: string 3376 @param ec_id: unique id for the job to use when creating a missing UUID 3377 3378 """ 3379 self._UnlockedAddNetwork(net, ec_id, check_uuid)
3380
3381 - def _UnlockedAddNetwork(self, net, ec_id, check_uuid):
3382 """Add a network to the configuration. 3383 3384 """ 3385 logging.info("Adding network %s to configuration", net.name) 3386 3387 if check_uuid: 3388 self._EnsureUUID(net, ec_id) 3389 3390 net.serial_no = 1 3391 net.ctime = net.mtime = time.time() 3392 self._ConfigData().networks[net.uuid] = net 3393 self._ConfigData().cluster.serial_no += 1
3394
3395 - def _UnlockedLookupNetwork(self, target):
3396 """Lookup a network's UUID. 3397 3398 @type target: string 3399 @param target: network name or UUID 3400 @rtype: string 3401 @return: network UUID 3402 @raises errors.OpPrereqError: when the target network cannot be found 3403 3404 """ 3405 if target is None: 3406 return None 3407 if target in self._ConfigData().networks: 3408 return target 3409 for net in self._ConfigData().networks.values(): 3410 if net.name == target: 3411 return net.uuid 3412 raise errors.OpPrereqError("Network '%s' not found" % target, 3413 errors.ECODE_NOENT)
3414 3415 @_ConfigSync(shared=1)
3416 - def LookupNetwork(self, target):
3417 """Lookup a network's UUID. 3418 3419 This function is just a wrapper over L{_UnlockedLookupNetwork}. 3420 3421 @type target: string 3422 @param target: network name or UUID 3423 @rtype: string 3424 @return: network UUID 3425 3426 """ 3427 return self._UnlockedLookupNetwork(target)
3428 3429 @_ConfigSync()
3430 - def RemoveNetwork(self, network_uuid):
3431 """Remove a network from the configuration. 3432 3433 @type network_uuid: string 3434 @param network_uuid: the UUID of the network to remove 3435 3436 """ 3437 logging.info("Removing network %s from configuration", network_uuid) 3438 3439 if network_uuid not in self._ConfigData().networks: 3440 raise errors.ConfigurationError("Unknown network '%s'" % network_uuid) 3441 3442 del self._ConfigData().networks[network_uuid] 3443 self._ConfigData().cluster.serial_no += 1
3444
3445 - def _UnlockedGetGroupNetParams(self, net_uuid, node_uuid):
3446 """Get the netparams (mode, link) of a network. 3447 3448 Get a network's netparams for a given node. 3449 3450 @type net_uuid: string 3451 @param net_uuid: network uuid 3452 @type node_uuid: string 3453 @param node_uuid: node UUID 3454 @rtype: dict or None 3455 @return: netparams 3456 3457 """ 3458 node_info = self._UnlockedGetNodeInfo(node_uuid) 3459 nodegroup_info = self._UnlockedGetNodeGroup(node_info.group) 3460 netparams = nodegroup_info.networks.get(net_uuid, None) 3461 3462 return netparams
3463 3464 @_ConfigSync(shared=1)
3465 - def GetGroupNetParams(self, net_uuid, node_uuid):
3466 """Locking wrapper of _UnlockedGetGroupNetParams() 3467 3468 """ 3469 return self._UnlockedGetGroupNetParams(net_uuid, node_uuid)
3470 3471 @_ConfigSync(shared=1)
3472 - def CheckIPInNodeGroup(self, ip, node_uuid):
3473 """Check IP uniqueness in nodegroup. 3474 3475 Check networks that are connected in the node's node group 3476 if ip is contained in any of them. Used when creating/adding 3477 a NIC to ensure uniqueness among nodegroups. 3478 3479 @type ip: string 3480 @param ip: ip address 3481 @type node_uuid: string 3482 @param node_uuid: node UUID 3483 @rtype: (string, dict) or (None, None) 3484 @return: (network name, netparams) 3485 3486 """ 3487 if ip is None: 3488 return (None, None) 3489 node_info = self._UnlockedGetNodeInfo(node_uuid) 3490 nodegroup_info = self._UnlockedGetNodeGroup(node_info.group) 3491 for net_uuid in nodegroup_info.networks.keys(): 3492 net_info = self._UnlockedGetNetwork(net_uuid) 3493 pool = network.AddressPool(net_info) 3494 if pool.Contains(ip): 3495 return (net_info.name, nodegroup_info.networks[net_uuid]) 3496 3497 return (None, None)
3498 3499 @_ConfigSync(shared=1)
3500 - def GetCandidateCerts(self):
3501 """Returns the candidate certificate map. 3502 3503 """ 3504 return self._ConfigData().cluster.candidate_certs
3505 3506 @_ConfigSync()
3507 - def SetCandidateCerts(self, certs):
3508 """Replaces the master candidate cert list with the new values. 3509 3510 @type certs: dict of string to string 3511 @param certs: map of node UUIDs to SSL client certificate digests. 3512 3513 """ 3514 self._ConfigData().cluster.candidate_certs = certs
3515 3516 @_ConfigSync()
3517 - def AddNodeToCandidateCerts(self, node_uuid, cert_digest, 3518 info_fn=logging.info, warn_fn=logging.warn):
3519 """Adds an entry to the candidate certificate map. 3520 3521 @type node_uuid: string 3522 @param node_uuid: the node's UUID 3523 @type cert_digest: string 3524 @param cert_digest: the digest of the node's client SSL certificate 3525 @type info_fn: function 3526 @param info_fn: logging function for information messages 3527 @type warn_fn: function 3528 @param warn_fn: logging function for warning messages 3529 3530 """ 3531 cluster = self._ConfigData().cluster 3532 if node_uuid in cluster.candidate_certs: 3533 old_cert_digest = cluster.candidate_certs[node_uuid] 3534 if old_cert_digest == cert_digest: 3535 if info_fn is not None: 3536 info_fn("Certificate digest for node %s already in config." 3537 "Not doing anything." % node_uuid) 3538 return 3539 else: 3540 if warn_fn is not None: 3541 warn_fn("Overriding differing certificate digest for node %s" 3542 % node_uuid) 3543 cluster.candidate_certs[node_uuid] = cert_digest
3544 3545 @_ConfigSync()
3546 - def RemoveNodeFromCandidateCerts(self, node_uuid, 3547 warn_fn=logging.warn):
3548 """Removes the entry of the given node in the certificate map. 3549 3550 @type node_uuid: string 3551 @param node_uuid: the node's UUID 3552 @type warn_fn: function 3553 @param warn_fn: logging function for warning messages 3554 3555 """ 3556 cluster = self._ConfigData().cluster 3557 if node_uuid not in cluster.candidate_certs: 3558 if warn_fn is not None: 3559 warn_fn("Cannot remove certifcate for node %s, because it's not" 3560 " in the candidate map." % node_uuid) 3561 return 3562 del cluster.candidate_certs[node_uuid]
3563
3564 - def FlushConfig(self):
3565 """Force the distribution of configuration to master candidates. 3566 3567 It is not necessary to hold a lock for this operation, it is handled 3568 internally by WConfd. 3569 3570 """ 3571 if not self._offline: 3572 self._wconfd.FlushConfig()
3573
3574 3575 -class DetachedConfig(ConfigWriter):
3576 - def __init__(self, config_data):
3577 super(DetachedConfig, self).__init__(self, offline=True) 3578 self._SetConfigData(config_data)
3579 3580 @staticmethod
3581 - def _WriteCallError():
3582 raise errors.ProgrammerError("DetachedConfig supports only read-only" 3583 " operations")
3584
3585 - def _OpenConfig(self, shared):
3586 if not shared: 3587 DetachedConfig._WriteCallError()
3588
3589 - def _CloseConfig(self, save):
3590 if save: 3591 DetachedConfig._WriteCallError()
3592