Package ganeti :: Package config
[hide private]
[frames] | no frames]

Source Code for Package ganeti.config

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Configuration management for Ganeti 
  32   
  33  This module provides the interface to the Ganeti cluster configuration. 
  34   
  35  The configuration data is stored on every node but is updated on the master 
  36  only. After each update, the master distributes the data to the other nodes. 
  37   
  38  Currently, the data storage format is JSON. YAML was slow and consuming too 
  39  much memory. 
  40   
  41  """ 
  42   
  43  # pylint: disable=R0904 
  44  # R0904: Too many public methods 
  45   
  46  import copy 
  47  import os 
  48  import random 
  49  import logging 
  50  import time 
  51  import threading 
  52  import itertools 
  53   
  54  from ganeti.config.temporary_reservations import TemporaryReservationManager 
  55  from ganeti.config.utils import ConfigSync, ConfigManager 
  56  from ganeti.config.verify import (VerifyType, VerifyNic, VerifyIpolicy, 
  57                                    ValidateConfig) 
  58   
  59  from ganeti import errors 
  60  from ganeti import utils 
  61  from ganeti import constants 
  62  import ganeti.wconfd as wc 
  63  from ganeti import objects 
  64  from ganeti import serializer 
  65  from ganeti import uidpool 
  66  from ganeti import netutils 
  67  from ganeti import runtime 
  68  from ganeti import pathutils 
  69  from ganeti import network 
70 71 72 -def GetWConfdContext(ec_id, livelock):
73 """Prepare a context for communication with WConfd. 74 75 WConfd needs to know the identity of each caller to properly manage locks and 76 detect job death. This helper function prepares the identity object given a 77 job ID (optional) and a livelock file. 78 79 @type ec_id: int, or None 80 @param ec_id: the job ID or None, if the caller isn't a job 81 @type livelock: L{ganeti.utils.livelock.LiveLock} 82 @param livelock: a livelock object holding the lockfile needed for WConfd 83 @return: the WConfd context 84 85 """ 86 if ec_id is None: 87 return (threading.current_thread().getName(), 88 livelock.GetPath(), os.getpid()) 89 else: 90 return (ec_id, 91 livelock.GetPath(), os.getpid())
92
93 94 -def GetConfig(ec_id, livelock, **kwargs):
95 """A utility function for constructing instances of ConfigWriter. 96 97 It prepares a WConfd context and uses it to create a ConfigWriter instance. 98 99 @type ec_id: int, or None 100 @param ec_id: the job ID or None, if the caller isn't a job 101 @type livelock: L{ganeti.utils.livelock.LiveLock} 102 @param livelock: a livelock object holding the lockfile needed for WConfd 103 @type kwargs: dict 104 @param kwargs: Any additional arguments for the ConfigWriter constructor 105 @rtype: L{ConfigWriter} 106 @return: the ConfigWriter context 107 108 """ 109 kwargs['wconfdcontext'] = GetWConfdContext(ec_id, livelock) 110 111 # if the config is to be opened in the accept_foreign mode, we should 112 # also tell the RPC client not to check for the master node 113 accept_foreign = kwargs.get('accept_foreign', False) 114 kwargs['wconfd'] = wc.Client(allow_non_master=accept_foreign) 115 116 return ConfigWriter(**kwargs)
117 118 119 # job id used for resource management at config upgrade time 120 _UPGRADE_CONFIG_JID = "jid-cfg-upgrade"
121 122 123 -def _MatchNameComponentIgnoreCase(short_name, names):
124 """Wrapper around L{utils.text.MatchNameComponent}. 125 126 """ 127 return utils.MatchNameComponent(short_name, names, case_sensitive=False)
128
129 130 -def _CheckInstanceDiskIvNames(disks):
131 """Checks if instance's disks' C{iv_name} attributes are in order. 132 133 @type disks: list of L{objects.Disk} 134 @param disks: List of disks 135 @rtype: list of tuples; (int, string, string) 136 @return: List of wrongly named disks, each tuple contains disk index, 137 expected and actual name 138 139 """ 140 result = [] 141 142 for (idx, disk) in enumerate(disks): 143 exp_iv_name = "disk/%s" % idx 144 if disk.iv_name != exp_iv_name: 145 result.append((idx, exp_iv_name, disk.iv_name)) 146 147 return result
148
149 150 -class ConfigWriter(object):
151 """The interface to the cluster configuration. 152 153 WARNING: The class is no longer thread-safe! 154 Each thread must construct a separate instance. 155 156 @ivar _all_rms: a list of all temporary reservation managers 157 158 Currently the class fulfills 3 main functions: 159 1. lock the configuration for access (monitor) 160 2. reload and write the config if necessary (bridge) 161 3. provide convenient access methods to config data (facade) 162 163 """
164 - def __init__(self, cfg_file=None, offline=False, _getents=runtime.GetEnts, 165 accept_foreign=False, wconfdcontext=None, wconfd=None):
166 self.write_count = 0 167 self._config_data = None 168 self._SetConfigData(None) 169 self._offline = offline 170 if cfg_file is None: 171 self._cfg_file = pathutils.CLUSTER_CONF_FILE 172 else: 173 self._cfg_file = cfg_file 174 self._getents = _getents 175 self._temporary_ids = TemporaryReservationManager() 176 self._all_rms = [self._temporary_ids] 177 # Note: in order to prevent errors when resolving our name later, 178 # we compute it here once and reuse it; it's 179 # better to raise an error before starting to modify the config 180 # file than after it was modified 181 self._my_hostname = netutils.Hostname.GetSysName() 182 self._cfg_id = None 183 self._wconfdcontext = wconfdcontext 184 self._wconfd = wconfd 185 self._accept_foreign = accept_foreign 186 self._lock_count = 0 187 self._lock_current_shared = None 188 self._lock_forced = False
189
190 - def _ConfigData(self):
191 return self._config_data
192
193 - def OutDate(self):
194 self._config_data = None
195
196 - def _SetConfigData(self, cfg):
197 self._config_data = cfg
198
199 - def _GetWConfdContext(self):
200 return self._wconfdcontext
201 202 # this method needs to be static, so that we can call it on the class 203 @staticmethod
204 - def IsCluster():
205 """Check if the cluster is configured. 206 207 """ 208 return os.path.exists(pathutils.CLUSTER_CONF_FILE)
209
210 - def _UnlockedGetNdParams(self, node):
211 nodegroup = self._UnlockedGetNodeGroup(node.group) 212 return self._ConfigData().cluster.FillND(node, nodegroup)
213 214 @ConfigSync(shared=1)
215 - def GetNdParams(self, node):
216 """Get the node params populated with cluster defaults. 217 218 @type node: L{objects.Node} 219 @param node: The node we want to know the params for 220 @return: A dict with the filled in node params 221 222 """ 223 return self._UnlockedGetNdParams(node)
224
225 - def _UnlockedGetFilledHvStateParams(self, node):
226 cfg = self._ConfigData() 227 cluster_hv_state = cfg.cluster.hv_state_static 228 def_hv = self._UnlockedGetHypervisorType() 229 cluster_fv = constants.HVST_DEFAULTS if def_hv not in cluster_hv_state \ 230 else cluster_hv_state[def_hv] 231 group_hv_state = self._UnlockedGetNodeGroup(node.group).hv_state_static 232 group_fv = cluster_fv if def_hv not in group_hv_state else \ 233 objects.FillDict(cluster_fv, group_hv_state[def_hv]) 234 node_fv = group_fv if def_hv not in node.hv_state_static else \ 235 objects.FillDict(group_fv, node.hv_state_static[def_hv]) 236 return {def_hv: node_fv}
237 238 @ConfigSync(shared=1)
239 - def GetFilledHvStateParams(self, node):
240 """Get the node params populated with cluster defaults. 241 242 @type node: L{objects.Node} 243 @param node: The node we want to know the params for 244 @return: A dict with the filled in node hv_state params for the default hv 245 246 """ 247 return self._UnlockedGetFilledHvStateParams(node)
248 249 @ConfigSync(shared=1)
250 - def GetNdGroupParams(self, nodegroup):
251 """Get the node groups params populated with cluster defaults. 252 253 @type nodegroup: L{objects.NodeGroup} 254 @param nodegroup: The node group we want to know the params for 255 @return: A dict with the filled in node group params 256 257 """ 258 return self._UnlockedGetNdGroupParams(nodegroup)
259
260 - def _UnlockedGetNdGroupParams(self, group):
261 """Get the ndparams of the group. 262 263 @type group: L{objects.NodeGroup} 264 @param group: The group we want to know the params for 265 @rtype: dict of str to int 266 @return: A dict with the filled in node group params 267 268 """ 269 return self._ConfigData().cluster.FillNDGroup(group)
270 271 @ConfigSync(shared=1)
272 - def GetGroupSshPorts(self):
273 """Get a map of group UUIDs to SSH ports. 274 275 @rtype: dict of str to int 276 @return: a dict mapping the UUIDs to the SSH ports 277 278 """ 279 port_map = {} 280 for uuid, group in self._config_data.nodegroups.items(): 281 ndparams = self._UnlockedGetNdGroupParams(group) 282 port = ndparams.get(constants.ND_SSH_PORT) 283 port_map[uuid] = port 284 return port_map
285 286 @ConfigSync(shared=1)
287 - def GetInstanceDiskParams(self, instance):
288 """Get the disk params populated with inherit chain. 289 290 @type instance: L{objects.Instance} 291 @param instance: The instance we want to know the params for 292 @return: A dict with the filled in disk params 293 294 """ 295 node = self._UnlockedGetNodeInfo(instance.primary_node) 296 nodegroup = self._UnlockedGetNodeGroup(node.group) 297 return self._UnlockedGetGroupDiskParams(nodegroup)
298
299 - def _UnlockedGetInstanceDisks(self, inst_uuid):
300 """Return the disks' info for the given instance 301 302 @type inst_uuid: string 303 @param inst_uuid: The UUID of the instance we want to know the disks for 304 305 @rtype: List of L{objects.Disk} 306 @return: A list with all the disks' info 307 308 """ 309 instance = self._UnlockedGetInstanceInfo(inst_uuid) 310 if instance is None: 311 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 312 313 return [self._UnlockedGetDiskInfo(disk_uuid) 314 for disk_uuid in instance.disks]
315 316 @ConfigSync(shared=1)
317 - def GetInstanceDisks(self, inst_uuid):
318 """Return the disks' info for the given instance 319 320 This is a simple wrapper over L{_UnlockedGetInstanceDisks}. 321 322 """ 323 return self._UnlockedGetInstanceDisks(inst_uuid)
324
325 - def AddInstanceDisk(self, inst_uuid, disk, idx=None, replace=False):
326 """Add a disk to the config and attach it to instance.""" 327 if not isinstance(disk, objects.Disk): 328 raise errors.ProgrammerError("Invalid type passed to AddInstanceDisk") 329 330 disk.UpgradeConfig() 331 utils.SimpleRetry(True, self._wconfd.AddInstanceDisk, 0.1, 30, 332 args=[inst_uuid, disk.ToDict(), idx, replace]) 333 self.OutDate()
334
335 - def AttachInstanceDisk(self, inst_uuid, disk_uuid, idx=None):
336 """Attach an existing disk to an instance.""" 337 utils.SimpleRetry(True, self._wconfd.AttachInstanceDisk, 0.1, 30, 338 args=[inst_uuid, disk_uuid, idx]) 339 self.OutDate()
340
341 - def _UnlockedRemoveDisk(self, disk_uuid):
342 """Remove the disk from the configuration. 343 344 @type disk_uuid: string 345 @param disk_uuid: The UUID of the disk object 346 347 """ 348 if disk_uuid not in self._ConfigData().disks: 349 raise errors.ConfigurationError("Disk %s doesn't exist" % disk_uuid) 350 351 # Disk must not be attached anywhere 352 for inst in self._ConfigData().instances.values(): 353 if disk_uuid in inst.disks: 354 raise errors.ReservationError("Cannot remove disk %s. Disk is" 355 " attached to instance %s" 356 % (disk_uuid, inst.name)) 357 358 # Remove disk from config file 359 del self._ConfigData().disks[disk_uuid] 360 self._ConfigData().cluster.serial_no += 1
361
362 - def RemoveInstanceDisk(self, inst_uuid, disk_uuid):
363 """Detach a disk from an instance and remove it from the config.""" 364 utils.SimpleRetry(True, self._wconfd.RemoveInstanceDisk, 0.1, 30, 365 args=[inst_uuid, disk_uuid]) 366 self.OutDate()
367
368 - def DetachInstanceDisk(self, inst_uuid, disk_uuid):
369 """Detach a disk from an instance.""" 370 utils.SimpleRetry(True, self._wconfd.DetachInstanceDisk, 0.1, 30, 371 args=[inst_uuid, disk_uuid]) 372 self.OutDate()
373
374 - def _UnlockedGetDiskInfo(self, disk_uuid):
375 """Returns information about a disk. 376 377 It takes the information from the configuration file. 378 379 @param disk_uuid: UUID of the disk 380 381 @rtype: L{objects.Disk} 382 @return: the disk object 383 384 """ 385 if disk_uuid not in self._ConfigData().disks: 386 return None 387 388 return self._ConfigData().disks[disk_uuid]
389 390 @ConfigSync(shared=1)
391 - def GetDiskInfo(self, disk_uuid):
392 """Returns information about a disk. 393 394 This is a simple wrapper over L{_UnlockedGetDiskInfo}. 395 396 """ 397 return self._UnlockedGetDiskInfo(disk_uuid)
398
399 - def _UnlockedGetDiskInfoByName(self, disk_name):
400 """Return information about a named disk. 401 402 Return disk information from the configuration file, searching with the 403 name of the disk. 404 405 @param disk_name: Name of the disk 406 407 @rtype: L{objects.Disk} 408 @return: the disk object 409 410 """ 411 disk = None 412 count = 0 413 for d in self._ConfigData().disks.itervalues(): 414 if d.name == disk_name: 415 count += 1 416 disk = d 417 418 if count > 1: 419 raise errors.ConfigurationError("There are %s disks with this name: %s" 420 % (count, disk_name)) 421 422 return disk
423 424 @ConfigSync(shared=1)
425 - def GetDiskInfoByName(self, disk_name):
426 """Return information about a named disk. 427 428 This is a simple wrapper over L{_UnlockedGetDiskInfoByName}. 429 430 """ 431 return self._UnlockedGetDiskInfoByName(disk_name)
432
433 - def _UnlockedGetDiskList(self):
434 """Get the list of disks. 435 436 @return: array of disks, ex. ['disk2-uuid', 'disk1-uuid'] 437 438 """ 439 return self._ConfigData().disks.keys()
440 441 @ConfigSync(shared=1)
442 - def GetAllDisksInfo(self):
443 """Get the configuration of all disks. 444 445 This is a simple wrapper over L{_UnlockedGetAllDisksInfo}. 446 447 """ 448 return self._UnlockedGetAllDisksInfo()
449
450 - def _UnlockedGetAllDisksInfo(self):
451 """Get the configuration of all disks. 452 453 @rtype: dict 454 @return: dict of (disk, disk_info), where disk_info is what 455 would GetDiskInfo return for the node 456 457 """ 458 my_dict = dict([(disk_uuid, self._UnlockedGetDiskInfo(disk_uuid)) 459 for disk_uuid in self._UnlockedGetDiskList()]) 460 return my_dict
461
462 - def _AllInstanceNodes(self, inst_uuid):
463 """Compute the set of all disk-related nodes for an instance. 464 465 This abstracts away some work from '_UnlockedGetInstanceNodes' 466 and '_UnlockedGetInstanceSecondaryNodes'. 467 468 @type inst_uuid: string 469 @param inst_uuid: The UUID of the instance we want to get nodes for 470 @rtype: set of strings 471 @return: A set of names for all the nodes of the instance 472 473 """ 474 instance = self._UnlockedGetInstanceInfo(inst_uuid) 475 if instance is None: 476 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 477 478 instance_disks = self._UnlockedGetInstanceDisks(inst_uuid) 479 all_nodes = [] 480 for disk in instance_disks: 481 all_nodes.extend(disk.all_nodes) 482 return (set(all_nodes), instance)
483
484 - def _UnlockedGetInstanceNodes(self, inst_uuid):
485 """Get all disk-related nodes for an instance. 486 487 For non-DRBD instances, this will contain only the instance's primary node, 488 whereas for DRBD instances, it will contain both the primary and the 489 secondaries. 490 491 @type inst_uuid: string 492 @param inst_uuid: The UUID of the instance we want to get nodes for 493 @rtype: list of strings 494 @return: A list of names for all the nodes of the instance 495 496 """ 497 (all_nodes, instance) = self._AllInstanceNodes(inst_uuid) 498 # ensure that primary node is always the first 499 all_nodes.discard(instance.primary_node) 500 return (instance.primary_node, ) + tuple(all_nodes)
501 502 @ConfigSync(shared=1)
503 - def GetInstanceNodes(self, inst_uuid):
504 """Get all disk-related nodes for an instance. 505 506 This is just a wrapper over L{_UnlockedGetInstanceNodes} 507 508 """ 509 return self._UnlockedGetInstanceNodes(inst_uuid)
510
511 - def _UnlockedGetInstanceSecondaryNodes(self, inst_uuid):
512 """Get the list of secondary nodes. 513 514 @type inst_uuid: string 515 @param inst_uuid: The UUID of the instance we want to get nodes for 516 @rtype: list of strings 517 @return: A tuple of names for all the secondary nodes of the instance 518 519 """ 520 (all_nodes, instance) = self._AllInstanceNodes(inst_uuid) 521 all_nodes.discard(instance.primary_node) 522 return tuple(all_nodes)
523 524 @ConfigSync(shared=1)
525 - def GetInstanceSecondaryNodes(self, inst_uuid):
526 """Get the list of secondary nodes. 527 528 This is a simple wrapper over L{_UnlockedGetInstanceSecondaryNodes}. 529 530 """ 531 return self._UnlockedGetInstanceSecondaryNodes(inst_uuid)
532
533 - def _UnlockedGetInstanceLVsByNode(self, inst_uuid, lvmap=None):
534 """Provide a mapping of node to LVs a given instance owns. 535 536 @type inst_uuid: string 537 @param inst_uuid: The UUID of the instance we want to 538 compute the LVsByNode for 539 @type lvmap: dict 540 @param lvmap: Optional dictionary to receive the 541 'node' : ['lv', ...] data. 542 @rtype: dict or None 543 @return: None if lvmap arg is given, otherwise, a dictionary of 544 the form { 'node_uuid' : ['volume1', 'volume2', ...], ... }; 545 volumeN is of the form "vg_name/lv_name", compatible with 546 GetVolumeList() 547 548 """ 549 def _MapLVsByNode(lvmap, devices, node_uuid): 550 """Recursive helper function.""" 551 if not node_uuid in lvmap: 552 lvmap[node_uuid] = [] 553 554 for dev in devices: 555 if dev.dev_type == constants.DT_PLAIN: 556 if not dev.forthcoming: 557 lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1]) 558 559 elif dev.dev_type in constants.DTS_DRBD: 560 if dev.children: 561 _MapLVsByNode(lvmap, dev.children, dev.logical_id[0]) 562 _MapLVsByNode(lvmap, dev.children, dev.logical_id[1]) 563 564 elif dev.children: 565 _MapLVsByNode(lvmap, dev.children, node_uuid)
566 567 instance = self._UnlockedGetInstanceInfo(inst_uuid) 568 if instance is None: 569 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 570 571 if lvmap is None: 572 lvmap = {} 573 ret = lvmap 574 else: 575 ret = None 576 577 _MapLVsByNode(lvmap, 578 self._UnlockedGetInstanceDisks(instance.uuid), 579 instance.primary_node) 580 return ret
581 582 @ConfigSync(shared=1)
583 - def GetInstanceLVsByNode(self, inst_uuid, lvmap=None):
584 """Provide a mapping of node to LVs a given instance owns. 585 586 This is a simple wrapper over L{_UnlockedGetInstanceLVsByNode} 587 588 """ 589 return self._UnlockedGetInstanceLVsByNode(inst_uuid, lvmap=lvmap)
590 591 @ConfigSync(shared=1)
592 - def GetGroupDiskParams(self, group):
593 """Get the disk params populated with inherit chain. 594 595 @type group: L{objects.NodeGroup} 596 @param group: The group we want to know the params for 597 @return: A dict with the filled in disk params 598 599 """ 600 return self._UnlockedGetGroupDiskParams(group)
601
602 - def _UnlockedGetGroupDiskParams(self, group):
603 """Get the disk params populated with inherit chain down to node-group. 604 605 @type group: L{objects.NodeGroup} 606 @param group: The group we want to know the params for 607 @return: A dict with the filled in disk params 608 609 """ 610 data = self._ConfigData().cluster.SimpleFillDP(group.diskparams) 611 assert isinstance(data, dict), "Not a dictionary: " + str(data) 612 return data
613 614 @ConfigSync(shared=1)
615 - def GetPotentialMasterCandidates(self):
616 """Gets the list of node names of potential master candidates. 617 618 @rtype: list of str 619 @return: list of node names of potential master candidates 620 621 """ 622 # FIXME: Note that currently potential master candidates are nodes 623 # but this definition will be extended once RAPI-unmodifiable 624 # parameters are introduced. 625 nodes = self._UnlockedGetAllNodesInfo() 626 return [node_info.name for node_info in nodes.values()]
627
628 - def GenerateMAC(self, net_uuid, _ec_id):
629 """Generate a MAC for an instance. 630 631 This should check the current instances for duplicates. 632 633 """ 634 return self._wconfd.GenerateMAC(self._GetWConfdContext(), net_uuid)
635
636 - def ReserveMAC(self, mac, _ec_id):
637 """Reserve a MAC for an instance. 638 639 This only checks instances managed by this cluster, it does not 640 check for potential collisions elsewhere. 641 642 """ 643 self._wconfd.ReserveMAC(self._GetWConfdContext(), mac)
644 645 @ConfigSync(shared=1)
646 - def CommitTemporaryIps(self, _ec_id):
647 """Tell WConfD to commit all temporary ids""" 648 self._wconfd.CommitTemporaryIps(self._GetWConfdContext())
649
650 - def ReleaseIp(self, net_uuid, address, _ec_id):
651 """Give a specific IP address back to an IP pool. 652 653 The IP address is returned to the IP pool and marked as reserved. 654 655 """ 656 if net_uuid: 657 if self._offline: 658 raise errors.ProgrammerError("Can't call ReleaseIp in offline mode") 659 self._wconfd.ReleaseIp(self._GetWConfdContext(), net_uuid, address)
660
661 - def GenerateIp(self, net_uuid, _ec_id):
662 """Find a free IPv4 address for an instance. 663 664 """ 665 if self._offline: 666 raise errors.ProgrammerError("Can't call GenerateIp in offline mode") 667 return self._wconfd.GenerateIp(self._GetWConfdContext(), net_uuid)
668
669 - def ReserveIp(self, net_uuid, address, _ec_id, check=True):
670 """Reserve a given IPv4 address for use by an instance. 671 672 """ 673 if self._offline: 674 raise errors.ProgrammerError("Can't call ReserveIp in offline mode") 675 return self._wconfd.ReserveIp(self._GetWConfdContext(), net_uuid, address, 676 check)
677
678 - def ReserveLV(self, lv_name, _ec_id):
679 """Reserve an VG/LV pair for an instance. 680 681 @type lv_name: string 682 @param lv_name: the logical volume name to reserve 683 684 """ 685 return self._wconfd.ReserveLV(self._GetWConfdContext(), lv_name)
686
687 - def GenerateDRBDSecret(self, _ec_id):
688 """Generate a DRBD secret. 689 690 This checks the current disks for duplicates. 691 692 """ 693 return self._wconfd.GenerateDRBDSecret(self._GetWConfdContext())
694 695 # FIXME: After _AllIDs is removed, move it to config_mock.py
696 - def _AllLVs(self):
697 """Compute the list of all LVs. 698 699 """ 700 lvnames = set() 701 for instance in self._ConfigData().instances.values(): 702 node_data = self._UnlockedGetInstanceLVsByNode(instance.uuid) 703 for lv_list in node_data.values(): 704 lvnames.update(lv_list) 705 return lvnames
706
707 - def _AllNICs(self):
708 """Compute the list of all NICs. 709 710 """ 711 nics = [] 712 for instance in self._ConfigData().instances.values(): 713 nics.extend(instance.nics) 714 return nics
715
716 - def _AllIDs(self, include_temporary):
717 """Compute the list of all UUIDs and names we have. 718 719 @type include_temporary: boolean 720 @param include_temporary: whether to include the _temporary_ids set 721 @rtype: set 722 @return: a set of IDs 723 724 """ 725 existing = set() 726 if include_temporary: 727 existing.update(self._temporary_ids.GetReserved()) 728 existing.update(self._AllLVs()) 729 existing.update(self._ConfigData().instances.keys()) 730 existing.update(self._ConfigData().nodes.keys()) 731 existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid]) 732 return existing
733
734 - def _GenerateUniqueID(self, ec_id):
735 """Generate an unique UUID. 736 737 This checks the current node, instances and disk names for 738 duplicates. 739 740 @rtype: string 741 @return: the unique id 742 743 """ 744 existing = self._AllIDs(include_temporary=False) 745 return self._temporary_ids.Generate(existing, utils.NewUUID, ec_id)
746 747 @ConfigSync(shared=1)
748 - def GenerateUniqueID(self, ec_id):
749 """Generate an unique ID. 750 751 This is just a wrapper over the unlocked version. 752 753 @type ec_id: string 754 @param ec_id: unique id for the job to reserve the id to 755 756 """ 757 return self._GenerateUniqueID(ec_id)
758
759 - def _AllMACs(self):
760 """Return all MACs present in the config. 761 762 @rtype: list 763 @return: the list of all MACs 764 765 """ 766 result = [] 767 for instance in self._ConfigData().instances.values(): 768 for nic in instance.nics: 769 result.append(nic.mac) 770 771 return result
772
773 - def _AllDRBDSecrets(self):
774 """Return all DRBD secrets present in the config. 775 776 @rtype: list 777 @return: the list of all DRBD secrets 778 779 """ 780 def helper(disk, result): 781 """Recursively gather secrets from this disk.""" 782 if disk.dev_type == constants.DT_DRBD8: 783 result.append(disk.logical_id[5]) 784 if disk.children: 785 for child in disk.children: 786 helper(child, result)
787 788 result = [] 789 for disk in self._ConfigData().disks.values(): 790 helper(disk, result) 791 792 return result 793 794 @staticmethod
795 - def _VerifyDisks(data, result):
796 """Per-disk verification checks 797 798 Extends L{result} with diagnostic information about the disks. 799 800 @type data: see L{_ConfigData} 801 @param data: configuration data 802 803 @type result: list of strings 804 @param result: list containing diagnostic messages 805 806 """ 807 for disk_uuid in data.disks: 808 disk = data.disks[disk_uuid] 809 result.extend(["disk %s error: %s" % (disk.uuid, msg) 810 for msg in disk.Verify()]) 811 if disk.uuid != disk_uuid: 812 result.append("disk '%s' is indexed by wrong UUID '%s'" % 813 (disk.name, disk_uuid))
814
815 - def _UnlockedVerifyConfig(self):
816 """Verify function. 817 818 @rtype: list 819 @return: a list of error messages; a non-empty list signifies 820 configuration errors 821 822 """ 823 # pylint: disable=R0914 824 result = [] 825 seen_macs = [] 826 ports = {} 827 data = self._ConfigData() 828 cluster = data.cluster 829 830 # First call WConfd to perform its checks, if we're not offline 831 if not self._offline: 832 try: 833 self._wconfd.VerifyConfig() 834 except errors.ConfigVerifyError, err: 835 try: 836 for msg in err.args[1]: 837 result.append(msg) 838 except IndexError: 839 pass 840 841 # check cluster parameters 842 VerifyType("cluster", "beparams", cluster.SimpleFillBE({}), 843 constants.BES_PARAMETER_TYPES, result.append) 844 VerifyType("cluster", "nicparams", cluster.SimpleFillNIC({}), 845 constants.NICS_PARAMETER_TYPES, result.append) 846 VerifyNic("cluster", cluster.SimpleFillNIC({}), result.append) 847 VerifyType("cluster", "ndparams", cluster.SimpleFillND({}), 848 constants.NDS_PARAMETER_TYPES, result.append) 849 VerifyIpolicy("cluster", cluster.ipolicy, True, result.append) 850 851 for disk_template in cluster.diskparams: 852 if disk_template not in constants.DTS_HAVE_ACCESS: 853 continue 854 855 access = cluster.diskparams[disk_template].get(constants.LDP_ACCESS, 856 constants.DISK_KERNELSPACE) 857 if access not in constants.DISK_VALID_ACCESS_MODES: 858 result.append( 859 "Invalid value of '%s:%s': '%s' (expected one of %s)" % ( 860 disk_template, constants.LDP_ACCESS, access, 861 utils.CommaJoin(constants.DISK_VALID_ACCESS_MODES) 862 ) 863 ) 864 865 self._VerifyDisks(data, result) 866 867 # per-instance checks 868 for instance_uuid in data.instances: 869 instance = data.instances[instance_uuid] 870 if instance.uuid != instance_uuid: 871 result.append("instance '%s' is indexed by wrong UUID '%s'" % 872 (instance.name, instance_uuid)) 873 if instance.primary_node not in data.nodes: 874 result.append("instance '%s' has invalid primary node '%s'" % 875 (instance.name, instance.primary_node)) 876 for snode in self._UnlockedGetInstanceSecondaryNodes(instance.uuid): 877 if snode not in data.nodes: 878 result.append("instance '%s' has invalid secondary node '%s'" % 879 (instance.name, snode)) 880 for idx, nic in enumerate(instance.nics): 881 if nic.mac in seen_macs: 882 result.append("instance '%s' has NIC %d mac %s duplicate" % 883 (instance.name, idx, nic.mac)) 884 else: 885 seen_macs.append(nic.mac) 886 if nic.nicparams: 887 filled = cluster.SimpleFillNIC(nic.nicparams) 888 owner = "instance %s nic %d" % (instance.name, idx) 889 VerifyType(owner, "nicparams", 890 filled, constants.NICS_PARAMETER_TYPES, result.append) 891 VerifyNic(owner, filled, result.append) 892 893 # parameter checks 894 if instance.beparams: 895 VerifyType("instance %s" % instance.name, "beparams", 896 cluster.FillBE(instance), constants.BES_PARAMETER_TYPES, 897 result.append) 898 899 # check that disks exists 900 for disk_uuid in instance.disks: 901 if disk_uuid not in data.disks: 902 result.append("Instance '%s' has invalid disk '%s'" % 903 (instance.name, disk_uuid)) 904 905 instance_disks = self._UnlockedGetInstanceDisks(instance.uuid) 906 # gather the drbd ports for duplicate checks 907 for (idx, dsk) in enumerate(instance_disks): 908 if dsk.dev_type in constants.DTS_DRBD: 909 tcp_port = dsk.logical_id[2] 910 if tcp_port not in ports: 911 ports[tcp_port] = [] 912 ports[tcp_port].append((instance.name, "drbd disk %s" % idx)) 913 # gather network port reservation 914 net_port = getattr(instance, "network_port", None) 915 if net_port is not None: 916 if net_port not in ports: 917 ports[net_port] = [] 918 ports[net_port].append((instance.name, "network port")) 919 920 wrong_names = _CheckInstanceDiskIvNames(instance_disks) 921 if wrong_names: 922 tmp = "; ".join(("name of disk %s should be '%s', but is '%s'" % 923 (idx, exp_name, actual_name)) 924 for (idx, exp_name, actual_name) in wrong_names) 925 926 result.append("Instance '%s' has wrongly named disks: %s" % 927 (instance.name, tmp)) 928 929 # cluster-wide pool of free ports 930 for free_port in cluster.tcpudp_port_pool: 931 if free_port not in ports: 932 ports[free_port] = [] 933 ports[free_port].append(("cluster", "port marked as free")) 934 935 # compute tcp/udp duplicate ports 936 keys = ports.keys() 937 keys.sort() 938 for pnum in keys: 939 pdata = ports[pnum] 940 if len(pdata) > 1: 941 txt = utils.CommaJoin(["%s/%s" % val for val in pdata]) 942 result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt)) 943 944 # highest used tcp port check 945 if keys: 946 if keys[-1] > cluster.highest_used_port: 947 result.append("Highest used port mismatch, saved %s, computed %s" % 948 (cluster.highest_used_port, keys[-1])) 949 950 if not data.nodes[cluster.master_node].master_candidate: 951 result.append("Master node is not a master candidate") 952 953 # master candidate checks 954 mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats() 955 if mc_now < mc_max: 956 result.append("Not enough master candidates: actual %d, target %d" % 957 (mc_now, mc_max)) 958 959 # node checks 960 for node_uuid, node in data.nodes.items(): 961 if node.uuid != node_uuid: 962 result.append("Node '%s' is indexed by wrong UUID '%s'" % 963 (node.name, node_uuid)) 964 if [node.master_candidate, node.drained, node.offline].count(True) > 1: 965 result.append("Node %s state is invalid: master_candidate=%s," 966 " drain=%s, offline=%s" % 967 (node.name, node.master_candidate, node.drained, 968 node.offline)) 969 if node.group not in data.nodegroups: 970 result.append("Node '%s' has invalid group '%s'" % 971 (node.name, node.group)) 972 else: 973 VerifyType("node %s" % node.name, "ndparams", 974 cluster.FillND(node, data.nodegroups[node.group]), 975 constants.NDS_PARAMETER_TYPES, result.append) 976 used_globals = constants.NDC_GLOBALS.intersection(node.ndparams) 977 if used_globals: 978 result.append("Node '%s' has some global parameters set: %s" % 979 (node.name, utils.CommaJoin(used_globals))) 980 981 # nodegroups checks 982 nodegroups_names = set() 983 for nodegroup_uuid in data.nodegroups: 984 nodegroup = data.nodegroups[nodegroup_uuid] 985 if nodegroup.uuid != nodegroup_uuid: 986 result.append("node group '%s' (uuid: '%s') indexed by wrong uuid '%s'" 987 % (nodegroup.name, nodegroup.uuid, nodegroup_uuid)) 988 if utils.UUID_RE.match(nodegroup.name.lower()): 989 result.append("node group '%s' (uuid: '%s') has uuid-like name" % 990 (nodegroup.name, nodegroup.uuid)) 991 if nodegroup.name in nodegroups_names: 992 result.append("duplicate node group name '%s'" % nodegroup.name) 993 else: 994 nodegroups_names.add(nodegroup.name) 995 group_name = "group %s" % nodegroup.name 996 VerifyIpolicy(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy), 997 False, result.append) 998 if nodegroup.ndparams: 999 VerifyType(group_name, "ndparams", 1000 cluster.SimpleFillND(nodegroup.ndparams), 1001 constants.NDS_PARAMETER_TYPES, result.append) 1002 1003 # drbd minors check 1004 # FIXME: The check for DRBD map needs to be implemented in WConfd 1005 1006 # IP checks 1007 default_nicparams = cluster.nicparams[constants.PP_DEFAULT] 1008 ips = {} 1009 1010 def _AddIpAddress(ip, name): 1011 ips.setdefault(ip, []).append(name)
1012 1013 _AddIpAddress(cluster.master_ip, "cluster_ip") 1014 1015 for node in data.nodes.values(): 1016 _AddIpAddress(node.primary_ip, "node:%s/primary" % node.name) 1017 if node.secondary_ip != node.primary_ip: 1018 _AddIpAddress(node.secondary_ip, "node:%s/secondary" % node.name) 1019 1020 for instance in data.instances.values(): 1021 for idx, nic in enumerate(instance.nics): 1022 if nic.ip is None: 1023 continue 1024 1025 nicparams = objects.FillDict(default_nicparams, nic.nicparams) 1026 nic_mode = nicparams[constants.NIC_MODE] 1027 nic_link = nicparams[constants.NIC_LINK] 1028 1029 if nic_mode == constants.NIC_MODE_BRIDGED: 1030 link = "bridge:%s" % nic_link 1031 elif nic_mode == constants.NIC_MODE_ROUTED: 1032 link = "route:%s" % nic_link 1033 elif nic_mode == constants.NIC_MODE_OVS: 1034 link = "ovs:%s" % nic_link 1035 else: 1036 raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode) 1037 1038 _AddIpAddress("%s/%s/%s" % (link, nic.ip, nic.network), 1039 "instance:%s/nic:%d" % (instance.name, idx)) 1040 1041 for ip, owners in ips.items(): 1042 if len(owners) > 1: 1043 result.append("IP address %s is used by multiple owners: %s" % 1044 (ip, utils.CommaJoin(owners))) 1045 1046 return result 1047 1048 @ConfigSync(shared=1)
1049 - def VerifyConfigAndLog(self, feedback_fn=None):
1050 """A simple wrapper around L{_UnlockedVerifyConfigAndLog}""" 1051 return self._UnlockedVerifyConfigAndLog(feedback_fn=feedback_fn)
1052
1053 - def _UnlockedVerifyConfigAndLog(self, feedback_fn=None):
1054 """Verify the configuration and log any errors. 1055 1056 The errors get logged as critical errors and also to the feedback function, 1057 if given. 1058 1059 @param feedback_fn: Callable feedback function 1060 @rtype: list 1061 @return: a list of error messages; a non-empty list signifies 1062 configuration errors 1063 1064 """ 1065 assert feedback_fn is None or callable(feedback_fn) 1066 1067 # Warn on config errors, but don't abort the save - the 1068 # configuration has already been modified, and we can't revert; 1069 # the best we can do is to warn the user and save as is, leaving 1070 # recovery to the user 1071 config_errors = self._UnlockedVerifyConfig() 1072 if config_errors: 1073 errmsg = ("Configuration data is not consistent: %s" % 1074 (utils.CommaJoin(config_errors))) 1075 logging.critical(errmsg) 1076 if feedback_fn: 1077 feedback_fn(errmsg) 1078 return config_errors
1079 1080 @ConfigSync(shared=1)
1081 - def VerifyConfig(self):
1082 """Verify function. 1083 1084 This is just a wrapper over L{_UnlockedVerifyConfig}. 1085 1086 @rtype: list 1087 @return: a list of error messages; a non-empty list signifies 1088 configuration errors 1089 1090 """ 1091 return self._UnlockedVerifyConfig()
1092
1093 - def AddTcpUdpPort(self, port):
1094 """Adds a new port to the available port pool.""" 1095 utils.SimpleRetry(True, self._wconfd.AddTcpUdpPort, 0.1, 30, args=[port]) 1096 self.OutDate()
1097 1098 @ConfigSync(shared=1)
1099 - def GetPortList(self):
1100 """Returns a copy of the current port list. 1101 1102 """ 1103 return self._ConfigData().cluster.tcpudp_port_pool.copy()
1104
1105 - def AllocatePort(self):
1106 """Allocate a port.""" 1107 def WithRetry(): 1108 port = self._wconfd.AllocatePort() 1109 self.OutDate() 1110 1111 if port is None: 1112 raise utils.RetryAgain() 1113 else: 1114 return port
1115 return utils.Retry(WithRetry, 0.1, 30) 1116 1117 @ConfigSync(shared=1)
1118 - def ComputeDRBDMap(self):
1119 """Compute the used DRBD minor/nodes. 1120 1121 This is just a wrapper over a call to WConfd. 1122 1123 @return: dictionary of node_uuid: dict of minor: instance_uuid; 1124 the returned dict will have all the nodes in it (even if with 1125 an empty list). 1126 1127 """ 1128 if self._offline: 1129 raise errors.ProgrammerError("Can't call ComputeDRBDMap in offline mode") 1130 else: 1131 return dict(map(lambda (k, v): (k, dict(v)), 1132 self._wconfd.ComputeDRBDMap()))
1133
1134 - def AllocateDRBDMinor(self, node_uuids, disk_uuid):
1135 """Allocate a drbd minor. 1136 1137 This is just a wrapper over a call to WConfd. 1138 1139 The free minor will be automatically computed from the existing 1140 devices. A node can not be given multiple times. 1141 The result is the list of minors, in the same 1142 order as the passed nodes. 1143 1144 @type node_uuids: list of strings 1145 @param node_uuids: the nodes in which we allocate minors 1146 @type disk_uuid: string 1147 @param disk_uuid: the disk for which we allocate minors 1148 @rtype: list of ints 1149 @return: A list of minors in the same order as the passed nodes 1150 1151 """ 1152 assert isinstance(disk_uuid, basestring), \ 1153 "Invalid argument '%s' passed to AllocateDRBDMinor" % disk_uuid 1154 1155 if self._offline: 1156 raise errors.ProgrammerError("Can't call AllocateDRBDMinor" 1157 " in offline mode") 1158 1159 result = self._wconfd.AllocateDRBDMinor(disk_uuid, node_uuids) 1160 logging.debug("Request to allocate drbd minors, input: %s, returning %s", 1161 node_uuids, result) 1162 return result
1163
1164 - def ReleaseDRBDMinors(self, disk_uuid):
1165 """Release temporary drbd minors allocated for a given disk. 1166 1167 This is just a wrapper over a call to WConfd. 1168 1169 @type disk_uuid: string 1170 @param disk_uuid: the disk for which temporary minors should be released 1171 1172 """ 1173 assert isinstance(disk_uuid, basestring), \ 1174 "Invalid argument passed to ReleaseDRBDMinors" 1175 # in offline mode we allow the calls to release DRBD minors, 1176 # because then nothing can be allocated anyway; 1177 # this is useful for testing 1178 if not self._offline: 1179 self._wconfd.ReleaseDRBDMinors(disk_uuid)
1180 1181 @ConfigSync(shared=1)
1182 - def GetInstanceDiskTemplate(self, inst_uuid):
1183 """Return the disk template of an instance. 1184 1185 This corresponds to the currently attached disks. If no disks are attached, 1186 it is L{constants.DT_DISKLESS}, if homogeneous disk types are attached, 1187 that type is returned, if that isn't the case, L{constants.DT_MIXED} is 1188 returned. 1189 1190 @type inst_uuid: str 1191 @param inst_uuid: The uuid of the instance. 1192 """ 1193 return utils.GetDiskTemplate(self._UnlockedGetInstanceDisks(inst_uuid))
1194 1195 @ConfigSync(shared=1)
1196 - def GetConfigVersion(self):
1197 """Get the configuration version. 1198 1199 @return: Config version 1200 1201 """ 1202 return self._ConfigData().version
1203 1204 @ConfigSync(shared=1)
1205 - def GetClusterName(self):
1206 """Get cluster name. 1207 1208 @return: Cluster name 1209 1210 """ 1211 return self._ConfigData().cluster.cluster_name
1212 1213 @ConfigSync(shared=1)
1214 - def GetMasterNode(self):
1215 """Get the UUID of the master node for this cluster. 1216 1217 @return: Master node UUID 1218 1219 """ 1220 return self._ConfigData().cluster.master_node
1221 1222 @ConfigSync(shared=1)
1223 - def GetMasterNodeName(self):
1224 """Get the hostname of the master node for this cluster. 1225 1226 @return: Master node hostname 1227 1228 """ 1229 return self._UnlockedGetNodeName(self._ConfigData().cluster.master_node)
1230 1231 @ConfigSync(shared=1)
1232 - def GetMasterNodeInfo(self):
1233 """Get the master node information for this cluster. 1234 1235 @rtype: objects.Node 1236 @return: Master node L{objects.Node} object 1237 1238 """ 1239 return self._UnlockedGetNodeInfo(self._ConfigData().cluster.master_node)
1240 1241 @ConfigSync(shared=1)
1242 - def GetMasterIP(self):
1243 """Get the IP of the master node for this cluster. 1244 1245 @return: Master IP 1246 1247 """ 1248 return self._ConfigData().cluster.master_ip
1249 1250 @ConfigSync(shared=1)
1251 - def GetMasterNetdev(self):
1252 """Get the master network device for this cluster. 1253 1254 """ 1255 return self._ConfigData().cluster.master_netdev
1256 1257 @ConfigSync(shared=1)
1258 - def GetMasterNetmask(self):
1259 """Get the netmask of the master node for this cluster. 1260 1261 """ 1262 return self._ConfigData().cluster.master_netmask
1263 1264 @ConfigSync(shared=1)
1265 - def GetUseExternalMipScript(self):
1266 """Get flag representing whether to use the external master IP setup script. 1267 1268 """ 1269 return self._ConfigData().cluster.use_external_mip_script
1270 1271 @ConfigSync(shared=1)
1272 - def GetFileStorageDir(self):
1273 """Get the file storage dir for this cluster. 1274 1275 """ 1276 return self._ConfigData().cluster.file_storage_dir
1277 1278 @ConfigSync(shared=1)
1279 - def GetSharedFileStorageDir(self):
1280 """Get the shared file storage dir for this cluster. 1281 1282 """ 1283 return self._ConfigData().cluster.shared_file_storage_dir
1284 1285 @ConfigSync(shared=1)
1286 - def GetGlusterStorageDir(self):
1287 """Get the Gluster storage dir for this cluster. 1288 1289 """ 1290 return self._ConfigData().cluster.gluster_storage_dir
1291
1292 - def _UnlockedGetHypervisorType(self):
1293 """Get the hypervisor type for this cluster. 1294 1295 """ 1296 return self._ConfigData().cluster.enabled_hypervisors[0]
1297 1298 @ConfigSync(shared=1)
1299 - def GetHypervisorType(self):
1300 """Get the hypervisor type for this cluster. 1301 1302 """ 1303 return self._UnlockedGetHypervisorType()
1304 1305 @ConfigSync(shared=1)
1306 - def GetRsaHostKey(self):
1307 """Return the rsa hostkey from the config. 1308 1309 @rtype: string 1310 @return: the rsa hostkey 1311 1312 """ 1313 return self._ConfigData().cluster.rsahostkeypub
1314 1315 @ConfigSync(shared=1)
1316 - def GetDsaHostKey(self):
1317 """Return the dsa hostkey from the config. 1318 1319 @rtype: string 1320 @return: the dsa hostkey 1321 1322 """ 1323 return self._ConfigData().cluster.dsahostkeypub
1324 1325 @ConfigSync(shared=1)
1326 - def GetDefaultIAllocator(self):
1327 """Get the default instance allocator for this cluster. 1328 1329 """ 1330 return self._ConfigData().cluster.default_iallocator
1331 1332 @ConfigSync(shared=1)
1333 - def GetDefaultIAllocatorParameters(self):
1334 """Get the default instance allocator parameters for this cluster. 1335 1336 @rtype: dict 1337 @return: dict of iallocator parameters 1338 1339 """ 1340 return self._ConfigData().cluster.default_iallocator_params
1341 1342 @ConfigSync(shared=1)
1343 - def GetPrimaryIPFamily(self):
1344 """Get cluster primary ip family. 1345 1346 @return: primary ip family 1347 1348 """ 1349 return self._ConfigData().cluster.primary_ip_family
1350 1351 @ConfigSync(shared=1)
1352 - def GetMasterNetworkParameters(self):
1353 """Get network parameters of the master node. 1354 1355 @rtype: L{object.MasterNetworkParameters} 1356 @return: network parameters of the master node 1357 1358 """ 1359 cluster = self._ConfigData().cluster 1360 result = objects.MasterNetworkParameters( 1361 uuid=cluster.master_node, ip=cluster.master_ip, 1362 netmask=cluster.master_netmask, netdev=cluster.master_netdev, 1363 ip_family=cluster.primary_ip_family) 1364 1365 return result
1366 1367 @ConfigSync(shared=1)
1368 - def GetInstallImage(self):
1369 """Get the install image location 1370 1371 @rtype: string 1372 @return: location of the install image 1373 1374 """ 1375 return self._ConfigData().cluster.install_image
1376 1377 @ConfigSync()
1378 - def SetInstallImage(self, install_image):
1379 """Set the install image location 1380 1381 @type install_image: string 1382 @param install_image: location of the install image 1383 1384 """ 1385 self._ConfigData().cluster.install_image = install_image
1386 1387 @ConfigSync(shared=1)
1388 - def GetInstanceCommunicationNetwork(self):
1389 """Get cluster instance communication network 1390 1391 @rtype: string 1392 @return: instance communication network, which is the name of the 1393 network used for instance communication 1394 1395 """ 1396 return self._ConfigData().cluster.instance_communication_network
1397 1398 @ConfigSync()
1399 - def SetInstanceCommunicationNetwork(self, network_name):
1400 """Set cluster instance communication network 1401 1402 @type network_name: string 1403 @param network_name: instance communication network, which is the name of 1404 the network used for instance communication 1405 1406 """ 1407 self._ConfigData().cluster.instance_communication_network = network_name
1408 1409 @ConfigSync(shared=1)
1410 - def GetZeroingImage(self):
1411 """Get the zeroing image location 1412 1413 @rtype: string 1414 @return: the location of the zeroing image 1415 1416 """ 1417 return self._config_data.cluster.zeroing_image
1418 1419 @ConfigSync(shared=1)
1420 - def GetCompressionTools(self):
1421 """Get cluster compression tools 1422 1423 @rtype: list of string 1424 @return: a list of tools that are cleared for use in this cluster for the 1425 purpose of compressing data 1426 1427 """ 1428 return self._ConfigData().cluster.compression_tools
1429 1430 @ConfigSync()
1431 - def SetCompressionTools(self, tools):
1432 """Set cluster compression tools 1433 1434 @type tools: list of string 1435 @param tools: a list of tools that are cleared for use in this cluster for 1436 the purpose of compressing data 1437 1438 """ 1439 self._ConfigData().cluster.compression_tools = tools
1440 1441 @ConfigSync()
1442 - def AddNodeGroup(self, group, ec_id, check_uuid=True):
1443 """Add a node group to the configuration. 1444 1445 This method calls group.UpgradeConfig() to fill any missing attributes 1446 according to their default values. 1447 1448 @type group: L{objects.NodeGroup} 1449 @param group: the NodeGroup object to add 1450 @type ec_id: string 1451 @param ec_id: unique id for the job to use when creating a missing UUID 1452 @type check_uuid: bool 1453 @param check_uuid: add an UUID to the group if it doesn't have one or, if 1454 it does, ensure that it does not exist in the 1455 configuration already 1456 1457 """ 1458 self._UnlockedAddNodeGroup(group, ec_id, check_uuid)
1459
1460 - def _UnlockedAddNodeGroup(self, group, ec_id, check_uuid):
1461 """Add a node group to the configuration. 1462 1463 """ 1464 logging.info("Adding node group %s to configuration", group.name) 1465 1466 # Some code might need to add a node group with a pre-populated UUID 1467 # generated with ConfigWriter.GenerateUniqueID(). We allow them to bypass 1468 # the "does this UUID" exist already check. 1469 if check_uuid: 1470 self._EnsureUUID(group, ec_id) 1471 1472 try: 1473 existing_uuid = self._UnlockedLookupNodeGroup(group.name) 1474 except errors.OpPrereqError: 1475 pass 1476 else: 1477 raise errors.OpPrereqError("Desired group name '%s' already exists as a" 1478 " node group (UUID: %s)" % 1479 (group.name, existing_uuid), 1480 errors.ECODE_EXISTS) 1481 1482 group.serial_no = 1 1483 group.ctime = group.mtime = time.time() 1484 group.UpgradeConfig() 1485 1486 self._ConfigData().nodegroups[group.uuid] = group 1487 self._ConfigData().cluster.serial_no += 1
1488 1489 @ConfigSync()
1490 - def RemoveNodeGroup(self, group_uuid):
1491 """Remove a node group from the configuration. 1492 1493 @type group_uuid: string 1494 @param group_uuid: the UUID of the node group to remove 1495 1496 """ 1497 logging.info("Removing node group %s from configuration", group_uuid) 1498 1499 if group_uuid not in self._ConfigData().nodegroups: 1500 raise errors.ConfigurationError("Unknown node group '%s'" % group_uuid) 1501 1502 assert len(self._ConfigData().nodegroups) != 1, \ 1503 "Group '%s' is the only group, cannot be removed" % group_uuid 1504 1505 del self._ConfigData().nodegroups[group_uuid] 1506 self._ConfigData().cluster.serial_no += 1
1507
1508 - def _UnlockedLookupNodeGroup(self, target):
1509 """Lookup a node group's UUID. 1510 1511 @type target: string or None 1512 @param target: group name or UUID or None to look for the default 1513 @rtype: string 1514 @return: nodegroup UUID 1515 @raises errors.OpPrereqError: when the target group cannot be found 1516 1517 """ 1518 if target is None: 1519 if len(self._ConfigData().nodegroups) != 1: 1520 raise errors.OpPrereqError("More than one node group exists. Target" 1521 " group must be specified explicitly.") 1522 else: 1523 return self._ConfigData().nodegroups.keys()[0] 1524 if target in self._ConfigData().nodegroups: 1525 return target 1526 for nodegroup in self._ConfigData().nodegroups.values(): 1527 if nodegroup.name == target: 1528 return nodegroup.uuid 1529 raise errors.OpPrereqError("Node group '%s' not found" % target, 1530 errors.ECODE_NOENT)
1531 1532 @ConfigSync(shared=1)
1533 - def LookupNodeGroup(self, target):
1534 """Lookup a node group's UUID. 1535 1536 This function is just a wrapper over L{_UnlockedLookupNodeGroup}. 1537 1538 @type target: string or None 1539 @param target: group name or UUID or None to look for the default 1540 @rtype: string 1541 @return: nodegroup UUID 1542 1543 """ 1544 return self._UnlockedLookupNodeGroup(target)
1545
1546 - def _UnlockedGetNodeGroup(self, uuid):
1547 """Lookup a node group. 1548 1549 @type uuid: string 1550 @param uuid: group UUID 1551 @rtype: L{objects.NodeGroup} or None 1552 @return: nodegroup object, or None if not found 1553 1554 """ 1555 if uuid not in self._ConfigData().nodegroups: 1556 return None 1557 1558 return self._ConfigData().nodegroups[uuid]
1559 1560 @ConfigSync(shared=1)
1561 - def GetNodeGroup(self, uuid):
1562 """Lookup a node group. 1563 1564 @type uuid: string 1565 @param uuid: group UUID 1566 @rtype: L{objects.NodeGroup} or None 1567 @return: nodegroup object, or None if not found 1568 1569 """ 1570 return self._UnlockedGetNodeGroup(uuid)
1571
1572 - def _UnlockedGetAllNodeGroupsInfo(self):
1573 """Get the configuration of all node groups. 1574 1575 """ 1576 return dict(self._ConfigData().nodegroups)
1577 1578 @ConfigSync(shared=1)
1579 - def GetAllNodeGroupsInfo(self):
1580 """Get the configuration of all node groups. 1581 1582 """ 1583 return self._UnlockedGetAllNodeGroupsInfo()
1584 1585 @ConfigSync(shared=1)
1586 - def GetAllNodeGroupsInfoDict(self):
1587 """Get the configuration of all node groups expressed as a dictionary of 1588 dictionaries. 1589 1590 """ 1591 return dict(map(lambda (uuid, ng): (uuid, ng.ToDict()), 1592 self._UnlockedGetAllNodeGroupsInfo().items()))
1593 1594 @ConfigSync(shared=1)
1595 - def GetNodeGroupList(self):
1596 """Get a list of node groups. 1597 1598 """ 1599 return self._ConfigData().nodegroups.keys()
1600 1601 @ConfigSync(shared=1)
1602 - def GetNodeGroupMembersByNodes(self, nodes):
1603 """Get nodes which are member in the same nodegroups as the given nodes. 1604 1605 """ 1606 ngfn = lambda node_uuid: self._UnlockedGetNodeInfo(node_uuid).group 1607 return frozenset(member_uuid 1608 for node_uuid in nodes 1609 for member_uuid in 1610 self._UnlockedGetNodeGroup(ngfn(node_uuid)).members)
1611 1612 @ConfigSync(shared=1)
1613 - def GetMultiNodeGroupInfo(self, group_uuids):
1614 """Get the configuration of multiple node groups. 1615 1616 @param group_uuids: List of node group UUIDs 1617 @rtype: list 1618 @return: List of tuples of (group_uuid, group_info) 1619 1620 """ 1621 return [(uuid, self._UnlockedGetNodeGroup(uuid)) for uuid in group_uuids]
1622
1623 - def AddInstance(self, instance, _ec_id, replace=False):
1624 """Add an instance to the config. 1625 1626 This should be used after creating a new instance. 1627 1628 @type instance: L{objects.Instance} 1629 @param instance: the instance object 1630 @type replace: bool 1631 @param replace: if true, expect the instance to be present and 1632 replace rather than add. 1633 1634 """ 1635 if not isinstance(instance, objects.Instance): 1636 raise errors.ProgrammerError("Invalid type passed to AddInstance") 1637 1638 instance.serial_no = 1 1639 1640 utils.SimpleRetry(True, self._wconfd.AddInstance, 0.1, 30, 1641 args=[instance.ToDict(), 1642 self._GetWConfdContext(), 1643 replace]) 1644 self.OutDate()
1645
1646 - def _EnsureUUID(self, item, ec_id):
1647 """Ensures a given object has a valid UUID. 1648 1649 @param item: the instance or node to be checked 1650 @param ec_id: the execution context id for the uuid reservation 1651 1652 """ 1653 if not item.uuid: 1654 item.uuid = self._GenerateUniqueID(ec_id) 1655 else: 1656 self._CheckUniqueUUID(item, include_temporary=True)
1657
1658 - def _CheckUniqueUUID(self, item, include_temporary):
1659 """Checks that the UUID of the given object is unique. 1660 1661 @param item: the instance or node to be checked 1662 @param include_temporary: whether temporarily generated UUID's should be 1663 included in the check. If the UUID of the item to be checked is 1664 a temporarily generated one, this has to be C{False}. 1665 1666 """ 1667 if not item.uuid: 1668 raise errors.ConfigurationError("'%s' must have an UUID" % (item.name,)) 1669 if item.uuid in self._AllIDs(include_temporary=include_temporary): 1670 raise errors.ConfigurationError("Cannot add '%s': UUID %s already" 1671 " in use" % (item.name, item.uuid))
1672
1673 - def _CheckUUIDpresent(self, item):
1674 """Checks that an object with the given UUID exists. 1675 1676 @param item: the instance or other UUID possessing object to verify that 1677 its UUID is present 1678 1679 """ 1680 if not item.uuid: 1681 raise errors.ConfigurationError("'%s' must have an UUID" % (item.name,)) 1682 if item.uuid not in self._AllIDs(include_temporary=False): 1683 raise errors.ConfigurationError("Cannot replace '%s': UUID %s not present" 1684 % (item.name, item.uuid))
1685
1686 - def _SetInstanceStatus(self, inst_uuid, status, disks_active, 1687 admin_state_source):
1688 """Set the instance's status to a given value. 1689 1690 @rtype: L{objects.Instance} 1691 @return: the updated instance object 1692 1693 """ 1694 def WithRetry(): 1695 result = self._wconfd.SetInstanceStatus(inst_uuid, status, 1696 disks_active, admin_state_source) 1697 self.OutDate() 1698 1699 if result is None: 1700 raise utils.RetryAgain() 1701 else: 1702 return result
1703 return objects.Instance.FromDict(utils.Retry(WithRetry, 0.1, 30)) 1704
1705 - def MarkInstanceUp(self, inst_uuid):
1706 """Mark the instance status to up in the config. 1707 1708 This also sets the instance disks active flag. 1709 1710 @rtype: L{objects.Instance} 1711 @return: the updated instance object 1712 1713 """ 1714 return self._SetInstanceStatus(inst_uuid, constants.ADMINST_UP, True, 1715 constants.ADMIN_SOURCE)
1716
1717 - def MarkInstanceOffline(self, inst_uuid):
1718 """Mark the instance status to down in the config. 1719 1720 This also clears the instance disks active flag. 1721 1722 @rtype: L{objects.Instance} 1723 @return: the updated instance object 1724 1725 """ 1726 return self._SetInstanceStatus(inst_uuid, constants.ADMINST_OFFLINE, False, 1727 constants.ADMIN_SOURCE)
1728
1729 - def RemoveInstance(self, inst_uuid):
1730 """Remove the instance from the configuration. 1731 1732 """ 1733 utils.SimpleRetry(True, self._wconfd.RemoveInstance, 0.1, 30, 1734 args=[inst_uuid]) 1735 self.OutDate()
1736 1737 @ConfigSync()
1738 - def RenameInstance(self, inst_uuid, new_name):
1739 """Rename an instance. 1740 1741 This needs to be done in ConfigWriter and not by RemoveInstance 1742 combined with AddInstance as only we can guarantee an atomic 1743 rename. 1744 1745 """ 1746 if inst_uuid not in self._ConfigData().instances: 1747 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 1748 1749 inst = self._ConfigData().instances[inst_uuid] 1750 inst.name = new_name 1751 1752 instance_disks = self._UnlockedGetInstanceDisks(inst_uuid) 1753 for (_, disk) in enumerate(instance_disks): 1754 if disk.dev_type in [constants.DT_FILE, constants.DT_SHARED_FILE]: 1755 # rename the file paths in logical and physical id 1756 file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1])) 1757 disk.logical_id = (disk.logical_id[0], 1758 utils.PathJoin(file_storage_dir, inst.name, 1759 os.path.basename(disk.logical_id[1]))) 1760 1761 # Force update of ssconf files 1762 self._ConfigData().cluster.serial_no += 1
1763
1764 - def MarkInstanceDown(self, inst_uuid):
1765 """Mark the status of an instance to down in the configuration. 1766 1767 This does not touch the instance disks active flag, as shut down instances 1768 can still have active disks. 1769 1770 @rtype: L{objects.Instance} 1771 @return: the updated instance object 1772 1773 """ 1774 return self._SetInstanceStatus(inst_uuid, constants.ADMINST_DOWN, None, 1775 constants.ADMIN_SOURCE)
1776
1777 - def MarkInstanceUserDown(self, inst_uuid):
1778 """Mark the status of an instance to user down in the configuration. 1779 1780 This does not touch the instance disks active flag, as user shut 1781 down instances can still have active disks. 1782 1783 """ 1784 1785 self._SetInstanceStatus(inst_uuid, constants.ADMINST_DOWN, None, 1786 constants.USER_SOURCE)
1787
1788 - def MarkInstanceDisksActive(self, inst_uuid):
1789 """Mark the status of instance disks active. 1790 1791 @rtype: L{objects.Instance} 1792 @return: the updated instance object 1793 1794 """ 1795 return self._SetInstanceStatus(inst_uuid, None, True, None)
1796
1797 - def MarkInstanceDisksInactive(self, inst_uuid):
1798 """Mark the status of instance disks inactive. 1799 1800 @rtype: L{objects.Instance} 1801 @return: the updated instance object 1802 1803 """ 1804 return self._SetInstanceStatus(inst_uuid, None, False, None)
1805
1806 - def _UnlockedGetInstanceList(self):
1807 """Get the list of instances. 1808 1809 This function is for internal use, when the config lock is already held. 1810 1811 """ 1812 return self._ConfigData().instances.keys()
1813 1814 @ConfigSync(shared=1)
1815 - def GetInstanceList(self):
1816 """Get the list of instances. 1817 1818 @return: array of instances, ex. ['instance2-uuid', 'instance1-uuid'] 1819 1820 """ 1821 return self._UnlockedGetInstanceList()
1822
1823 - def ExpandInstanceName(self, short_name):
1824 """Attempt to expand an incomplete instance name. 1825 1826 """ 1827 # Locking is done in L{ConfigWriter.GetAllInstancesInfo} 1828 all_insts = self.GetAllInstancesInfo().values() 1829 expanded_name = _MatchNameComponentIgnoreCase( 1830 short_name, [inst.name for inst in all_insts]) 1831 1832 if expanded_name is not None: 1833 # there has to be exactly one instance with that name 1834 inst = (filter(lambda n: n.name == expanded_name, all_insts)[0]) 1835 return (inst.uuid, inst.name) 1836 else: 1837 return (None, None)
1838
1839 - def _UnlockedGetInstanceInfo(self, inst_uuid):
1840 """Returns information about an instance. 1841 1842 This function is for internal use, when the config lock is already held. 1843 1844 """ 1845 if inst_uuid not in self._ConfigData().instances: 1846 return None 1847 1848 return self._ConfigData().instances[inst_uuid]
1849 1850 @ConfigSync(shared=1)
1851 - def GetInstanceInfo(self, inst_uuid):
1852 """Returns information about an instance. 1853 1854 It takes the information from the configuration file. Other information of 1855 an instance are taken from the live systems. 1856 1857 @param inst_uuid: UUID of the instance 1858 1859 @rtype: L{objects.Instance} 1860 @return: the instance object 1861 1862 """ 1863 return self._UnlockedGetInstanceInfo(inst_uuid)
1864 1865 @ConfigSync(shared=1)
1866 - def GetInstanceNodeGroups(self, inst_uuid, primary_only=False):
1867 """Returns set of node group UUIDs for instance's nodes. 1868 1869 @rtype: frozenset 1870 1871 """ 1872 instance = self._UnlockedGetInstanceInfo(inst_uuid) 1873 if not instance: 1874 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 1875 1876 if primary_only: 1877 nodes = [instance.primary_node] 1878 else: 1879 nodes = self._UnlockedGetInstanceNodes(instance.uuid) 1880 1881 return frozenset(self._UnlockedGetNodeInfo(node_uuid).group 1882 for node_uuid in nodes)
1883 1884 @ConfigSync(shared=1)
1885 - def GetInstanceNetworks(self, inst_uuid):
1886 """Returns set of network UUIDs for instance's nics. 1887 1888 @rtype: frozenset 1889 1890 """ 1891 instance = self._UnlockedGetInstanceInfo(inst_uuid) 1892 if not instance: 1893 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 1894 1895 networks = set() 1896 for nic in instance.nics: 1897 if nic.network: 1898 networks.add(nic.network) 1899 1900 return frozenset(networks)
1901 1902 @ConfigSync(shared=1)
1903 - def GetMultiInstanceInfo(self, inst_uuids):
1904 """Get the configuration of multiple instances. 1905 1906 @param inst_uuids: list of instance UUIDs 1907 @rtype: list 1908 @return: list of tuples (instance UUID, instance_info), where 1909 instance_info is what would GetInstanceInfo return for the 1910 node, while keeping the original order 1911 1912 """ 1913 return [(uuid, self._UnlockedGetInstanceInfo(uuid)) for uuid in inst_uuids]
1914 1915 @ConfigSync(shared=1)
1916 - def GetMultiInstanceInfoByName(self, inst_names):
1917 """Get the configuration of multiple instances. 1918 1919 @param inst_names: list of instance names 1920 @rtype: list 1921 @return: list of tuples (instance, instance_info), where 1922 instance_info is what would GetInstanceInfo return for the 1923 node, while keeping the original order 1924 1925 """ 1926 result = [] 1927 for name in inst_names: 1928 instance = self._UnlockedGetInstanceInfoByName(name) 1929 if instance: 1930 result.append((instance.uuid, instance)) 1931 else: 1932 raise errors.ConfigurationError("Instance data of instance '%s'" 1933 " not found." % name) 1934 return result
1935 1936 @ConfigSync(shared=1)
1937 - def GetAllInstancesInfo(self):
1938 """Get the configuration of all instances. 1939 1940 @rtype: dict 1941 @return: dict of (instance, instance_info), where instance_info is what 1942 would GetInstanceInfo return for the node 1943 1944 """ 1945 return self._UnlockedGetAllInstancesInfo()
1946
1947 - def _UnlockedGetAllInstancesInfo(self):
1948 my_dict = dict([(inst_uuid, self._UnlockedGetInstanceInfo(inst_uuid)) 1949 for inst_uuid in self._UnlockedGetInstanceList()]) 1950 return my_dict
1951 1952 @ConfigSync(shared=1)
1953 - def GetInstancesInfoByFilter(self, filter_fn):
1954 """Get instance configuration with a filter. 1955 1956 @type filter_fn: callable 1957 @param filter_fn: Filter function receiving instance object as parameter, 1958 returning boolean. Important: this function is called while the 1959 configuration locks is held. It must not do any complex work or call 1960 functions potentially leading to a deadlock. Ideally it doesn't call any 1961 other functions and just compares instance attributes. 1962 1963 """ 1964 return dict((uuid, inst) 1965 for (uuid, inst) in self._ConfigData().instances.items() 1966 if filter_fn(inst))
1967 1968 @ConfigSync(shared=1)
1969 - def GetInstanceInfoByName(self, inst_name):
1970 """Get the L{objects.Instance} object for a named instance. 1971 1972 @param inst_name: name of the instance to get information for 1973 @type inst_name: string 1974 @return: the corresponding L{objects.Instance} instance or None if no 1975 information is available 1976 1977 """ 1978 return self._UnlockedGetInstanceInfoByName(inst_name)
1979
1980 - def _UnlockedGetInstanceInfoByName(self, inst_name):
1981 for inst in self._UnlockedGetAllInstancesInfo().values(): 1982 if inst.name == inst_name: 1983 return inst 1984 return None
1985
1986 - def _UnlockedGetInstanceName(self, inst_uuid):
1987 inst_info = self._UnlockedGetInstanceInfo(inst_uuid) 1988 if inst_info is None: 1989 raise errors.OpExecError("Unknown instance: %s" % inst_uuid) 1990 return inst_info.name
1991 1992 @ConfigSync(shared=1)
1993 - def GetInstanceName(self, inst_uuid):
1994 """Gets the instance name for the passed instance. 1995 1996 @param inst_uuid: instance UUID to get name for 1997 @type inst_uuid: string 1998 @rtype: string 1999 @return: instance name 2000 2001 """ 2002 return self._UnlockedGetInstanceName(inst_uuid)
2003 2004 @ConfigSync(shared=1)
2005 - def GetInstanceNames(self, inst_uuids):
2006 """Gets the instance names for the passed list of nodes. 2007 2008 @param inst_uuids: list of instance UUIDs to get names for 2009 @type inst_uuids: list of strings 2010 @rtype: list of strings 2011 @return: list of instance names 2012 2013 """ 2014 return self._UnlockedGetInstanceNames(inst_uuids)
2015
2016 - def SetInstancePrimaryNode(self, inst_uuid, target_node_uuid):
2017 """Sets the primary node of an existing instance 2018 2019 @param inst_uuid: instance UUID 2020 @type inst_uuid: string 2021 @param target_node_uuid: the new primary node UUID 2022 @type target_node_uuid: string 2023 2024 """ 2025 utils.SimpleRetry(True, self._wconfd.SetInstancePrimaryNode, 0.1, 30, 2026 args=[inst_uuid, target_node_uuid]) 2027 self.OutDate()
2028 2029 @ConfigSync()
2030 - def SetDiskNodes(self, disk_uuid, nodes):
2031 """Sets the nodes of an existing disk 2032 2033 @param disk_uuid: disk UUID 2034 @type disk_uuid: string 2035 @param nodes: the new nodes for the disk 2036 @type nodes: list of node uuids 2037 2038 """ 2039 self._UnlockedGetDiskInfo(disk_uuid).nodes = nodes
2040 2041 @ConfigSync()
2042 - def SetDiskLogicalID(self, disk_uuid, logical_id):
2043 """Sets the logical_id of an existing disk 2044 2045 @param disk_uuid: disk UUID 2046 @type disk_uuid: string 2047 @param logical_id: the new logical_id for the disk 2048 @type logical_id: tuple 2049 2050 """ 2051 disk = self._UnlockedGetDiskInfo(disk_uuid) 2052 if disk is None: 2053 raise errors.ConfigurationError("Unknown disk UUID '%s'" % disk_uuid) 2054 2055 if len(disk.logical_id) != len(logical_id): 2056 raise errors.ProgrammerError("Logical ID format mismatch\n" 2057 "Existing logical ID: %s\n" 2058 "New logical ID: %s", disk.logical_id, 2059 logical_id) 2060 2061 disk.logical_id = logical_id
2062
2063 - def _UnlockedGetInstanceNames(self, inst_uuids):
2064 return [self._UnlockedGetInstanceName(uuid) for uuid in inst_uuids]
2065
2066 - def _UnlockedAddNode(self, node, ec_id):
2067 """Add a node to the configuration. 2068 2069 @type node: L{objects.Node} 2070 @param node: a Node instance 2071 2072 """ 2073 logging.info("Adding node %s to configuration", node.name) 2074 2075 self._EnsureUUID(node, ec_id) 2076 2077 node.serial_no = 1 2078 node.ctime = node.mtime = time.time() 2079 self._UnlockedAddNodeToGroup(node.uuid, node.group) 2080 assert node.uuid in self._ConfigData().nodegroups[node.group].members 2081 self._ConfigData().nodes[node.uuid] = node 2082 self._ConfigData().cluster.serial_no += 1
2083 2084 @ConfigSync()
2085 - def AddNode(self, node, ec_id):
2086 """Add a node to the configuration. 2087 2088 @type node: L{objects.Node} 2089 @param node: a Node instance 2090 2091 """ 2092 self._UnlockedAddNode(node, ec_id)
2093 2094 @ConfigSync()
2095 - def RemoveNode(self, node_uuid):
2096 """Remove a node from the configuration. 2097 2098 """ 2099 logging.info("Removing node %s from configuration", node_uuid) 2100 2101 if node_uuid not in self._ConfigData().nodes: 2102 raise errors.ConfigurationError("Unknown node '%s'" % node_uuid) 2103 2104 self._UnlockedRemoveNodeFromGroup(self._ConfigData().nodes[node_uuid]) 2105 del self._ConfigData().nodes[node_uuid] 2106 self._ConfigData().cluster.serial_no += 1
2107
2108 - def ExpandNodeName(self, short_name):
2109 """Attempt to expand an incomplete node name into a node UUID. 2110 2111 """ 2112 # Locking is done in L{ConfigWriter.GetAllNodesInfo} 2113 all_nodes = self.GetAllNodesInfo().values() 2114 expanded_name = _MatchNameComponentIgnoreCase( 2115 short_name, [node.name for node in all_nodes]) 2116 2117 if expanded_name is not None: 2118 # there has to be exactly one node with that name 2119 node = (filter(lambda n: n.name == expanded_name, all_nodes)[0]) 2120 return (node.uuid, node.name) 2121 else: 2122 return (None, None)
2123
2124 - def _UnlockedGetNodeInfo(self, node_uuid):
2125 """Get the configuration of a node, as stored in the config. 2126 2127 This function is for internal use, when the config lock is already 2128 held. 2129 2130 @param node_uuid: the node UUID 2131 2132 @rtype: L{objects.Node} 2133 @return: the node object 2134 2135 """ 2136 if node_uuid not in self._ConfigData().nodes: 2137 return None 2138 2139 return self._ConfigData().nodes[node_uuid]
2140 2141 @ConfigSync(shared=1)
2142 - def GetNodeInfo(self, node_uuid):
2143 """Get the configuration of a node, as stored in the config. 2144 2145 This is just a locked wrapper over L{_UnlockedGetNodeInfo}. 2146 2147 @param node_uuid: the node UUID 2148 2149 @rtype: L{objects.Node} 2150 @return: the node object 2151 2152 """ 2153 return self._UnlockedGetNodeInfo(node_uuid)
2154 2155 @ConfigSync(shared=1)
2156 - def GetNodeInstances(self, node_uuid):
2157 """Get the instances of a node, as stored in the config. 2158 2159 @param node_uuid: the node UUID 2160 2161 @rtype: (list, list) 2162 @return: a tuple with two lists: the primary and the secondary instances 2163 2164 """ 2165 pri = [] 2166 sec = [] 2167 for inst in self._ConfigData().instances.values(): 2168 if inst.primary_node == node_uuid: 2169 pri.append(inst.uuid) 2170 if node_uuid in self._UnlockedGetInstanceSecondaryNodes(inst.uuid): 2171 sec.append(inst.uuid) 2172 return (pri, sec)
2173 2174 @ConfigSync(shared=1)
2175 - def GetNodeGroupInstances(self, uuid, primary_only=False):
2176 """Get the instances of a node group. 2177 2178 @param uuid: Node group UUID 2179 @param primary_only: Whether to only consider primary nodes 2180 @rtype: frozenset 2181 @return: List of instance UUIDs in node group 2182 2183 """ 2184 if primary_only: 2185 nodes_fn = lambda inst: [inst.primary_node] 2186 else: 2187 nodes_fn = lambda inst: self._UnlockedGetInstanceNodes(inst.uuid) 2188 2189 return frozenset(inst.uuid 2190 for inst in self._ConfigData().instances.values() 2191 for node_uuid in nodes_fn(inst) 2192 if self._UnlockedGetNodeInfo(node_uuid).group == uuid)
2193
2194 - def _UnlockedGetHvparamsString(self, hvname):
2195 """Return the string representation of the list of hyervisor parameters of 2196 the given hypervisor. 2197 2198 @see: C{GetHvparams} 2199 2200 """ 2201 result = "" 2202 hvparams = self._ConfigData().cluster.hvparams[hvname] 2203 for key in hvparams: 2204 result += "%s=%s\n" % (key, hvparams[key]) 2205 return result
2206 2207 @ConfigSync(shared=1)
2208 - def GetHvparamsString(self, hvname):
2209 """Return the hypervisor parameters of the given hypervisor. 2210 2211 @type hvname: string 2212 @param hvname: name of a hypervisor 2213 @rtype: string 2214 @return: string containing key-value-pairs, one pair on each line; 2215 format: KEY=VALUE 2216 2217 """ 2218 return self._UnlockedGetHvparamsString(hvname)
2219
2220 - def _UnlockedGetNodeList(self):
2221 """Return the list of nodes which are in the configuration. 2222 2223 This function is for internal use, when the config lock is already 2224 held. 2225 2226 @rtype: list 2227 2228 """ 2229 return self._ConfigData().nodes.keys()
2230 2231 @ConfigSync(shared=1)
2232 - def GetNodeList(self):
2233 """Return the list of nodes which are in the configuration. 2234 2235 """ 2236 return self._UnlockedGetNodeList()
2237
2238 - def _UnlockedGetOnlineNodeList(self):
2239 """Return the list of nodes which are online. 2240 2241 """ 2242 all_nodes = [self._UnlockedGetNodeInfo(node) 2243 for node in self._UnlockedGetNodeList()] 2244 return [node.uuid for node in all_nodes if not node.offline]
2245 2246 @ConfigSync(shared=1)
2247 - def GetOnlineNodeList(self):
2248 """Return the list of nodes which are online. 2249 2250 """ 2251 return self._UnlockedGetOnlineNodeList()
2252 2253 @ConfigSync(shared=1)
2254 - def GetVmCapableNodeList(self):
2255 """Return the list of nodes which are not vm capable. 2256 2257 """ 2258 all_nodes = [self._UnlockedGetNodeInfo(node) 2259 for node in self._UnlockedGetNodeList()] 2260 return [node.uuid for node in all_nodes if node.vm_capable]
2261 2262 @ConfigSync(shared=1)
2263 - def GetNonVmCapableNodeList(self):
2264 """Return the list of nodes' uuids which are not vm capable. 2265 2266 """ 2267 all_nodes = [self._UnlockedGetNodeInfo(node) 2268 for node in self._UnlockedGetNodeList()] 2269 return [node.uuid for node in all_nodes if not node.vm_capable]
2270 2271 @ConfigSync(shared=1)
2272 - def GetNonVmCapableNodeNameList(self):
2273 """Return the list of nodes' names which are not vm capable. 2274 2275 """ 2276 all_nodes = [self._UnlockedGetNodeInfo(node) 2277 for node in self._UnlockedGetNodeList()] 2278 return [node.name for node in all_nodes if not node.vm_capable]
2279 2280 @ConfigSync(shared=1)
2281 - def GetMultiNodeInfo(self, node_uuids):
2282 """Get the configuration of multiple nodes. 2283 2284 @param node_uuids: list of node UUIDs 2285 @rtype: list 2286 @return: list of tuples of (node, node_info), where node_info is 2287 what would GetNodeInfo return for the node, in the original 2288 order 2289 2290 """ 2291 return [(uuid, self._UnlockedGetNodeInfo(uuid)) for uuid in node_uuids]
2292
2293 - def _UnlockedGetAllNodesInfo(self):
2294 """Gets configuration of all nodes. 2295 2296 @note: See L{GetAllNodesInfo} 2297 2298 """ 2299 return dict([(node_uuid, self._UnlockedGetNodeInfo(node_uuid)) 2300 for node_uuid in self._UnlockedGetNodeList()])
2301 2302 @ConfigSync(shared=1)
2303 - def GetAllNodesInfo(self):
2304 """Get the configuration of all nodes. 2305 2306 @rtype: dict 2307 @return: dict of (node, node_info), where node_info is what 2308 would GetNodeInfo return for the node 2309 2310 """ 2311 return self._UnlockedGetAllNodesInfo()
2312
2313 - def _UnlockedGetNodeInfoByName(self, node_name):
2314 for node in self._UnlockedGetAllNodesInfo().values(): 2315 if node.name == node_name: 2316 return node 2317 return None
2318 2319 @ConfigSync(shared=1)
2320 - def GetNodeInfoByName(self, node_name):
2321 """Get the L{objects.Node} object for a named node. 2322 2323 @param node_name: name of the node to get information for 2324 @type node_name: string 2325 @return: the corresponding L{objects.Node} instance or None if no 2326 information is available 2327 2328 """ 2329 return self._UnlockedGetNodeInfoByName(node_name)
2330 2331 @ConfigSync(shared=1)
2332 - def GetNodeGroupInfoByName(self, nodegroup_name):
2333 """Get the L{objects.NodeGroup} object for a named node group. 2334 2335 @param nodegroup_name: name of the node group to get information for 2336 @type nodegroup_name: string 2337 @return: the corresponding L{objects.NodeGroup} instance or None if no 2338 information is available 2339 2340 """ 2341 for nodegroup in self._UnlockedGetAllNodeGroupsInfo().values(): 2342 if nodegroup.name == nodegroup_name: 2343 return nodegroup 2344 return None
2345
2346 - def _UnlockedGetNodeName(self, node_spec):
2347 if isinstance(node_spec, objects.Node): 2348 return node_spec.name 2349 elif isinstance(node_spec, basestring): 2350 node_info = self._UnlockedGetNodeInfo(node_spec) 2351 if node_info is None: 2352 raise errors.OpExecError("Unknown node: %s" % node_spec) 2353 return node_info.name 2354 else: 2355 raise errors.ProgrammerError("Can't handle node spec '%s'" % node_spec)
2356 2357 @ConfigSync(shared=1)
2358 - def GetNodeName(self, node_spec):
2359 """Gets the node name for the passed node. 2360 2361 @param node_spec: node to get names for 2362 @type node_spec: either node UUID or a L{objects.Node} object 2363 @rtype: string 2364 @return: node name 2365 2366 """ 2367 return self._UnlockedGetNodeName(node_spec)
2368
2369 - def _UnlockedGetNodeNames(self, node_specs):
2370 return [self._UnlockedGetNodeName(node_spec) for node_spec in node_specs]
2371 2372 @ConfigSync(shared=1)
2373 - def GetNodeNames(self, node_specs):
2374 """Gets the node names for the passed list of nodes. 2375 2376 @param node_specs: list of nodes to get names for 2377 @type node_specs: list of either node UUIDs or L{objects.Node} objects 2378 @rtype: list of strings 2379 @return: list of node names 2380 2381 """ 2382 return self._UnlockedGetNodeNames(node_specs)
2383 2384 @ConfigSync(shared=1)
2385 - def GetNodeGroupsFromNodes(self, node_uuids):
2386 """Returns groups for a list of nodes. 2387 2388 @type node_uuids: list of string 2389 @param node_uuids: List of node UUIDs 2390 @rtype: frozenset 2391 2392 """ 2393 return frozenset(self._UnlockedGetNodeInfo(uuid).group 2394 for uuid in node_uuids)
2395
2396 - def _UnlockedGetMasterCandidateUuids(self):
2397 """Get the list of UUIDs of master candidates. 2398 2399 @rtype: list of strings 2400 @return: list of UUIDs of all master candidates. 2401 2402 """ 2403 return [node.uuid for node in self._ConfigData().nodes.values() 2404 if node.master_candidate]
2405 2406 @ConfigSync(shared=1)
2407 - def GetMasterCandidateUuids(self):
2408 """Get the list of UUIDs of master candidates. 2409 2410 @rtype: list of strings 2411 @return: list of UUIDs of all master candidates. 2412 2413 """ 2414 return self._UnlockedGetMasterCandidateUuids()
2415
2416 - def _UnlockedGetMasterCandidateStats(self, exceptions=None):
2417 """Get the number of current and maximum desired and possible candidates. 2418 2419 @type exceptions: list 2420 @param exceptions: if passed, list of nodes that should be ignored 2421 @rtype: tuple 2422 @return: tuple of (current, desired and possible, possible) 2423 2424 """ 2425 mc_now = mc_should = mc_max = 0 2426 for node in self._ConfigData().nodes.values(): 2427 if exceptions and node.uuid in exceptions: 2428 continue 2429 if not (node.offline or node.drained) and node.master_capable: 2430 mc_max += 1 2431 if node.master_candidate: 2432 mc_now += 1 2433 mc_should = min(mc_max, self._ConfigData().cluster.candidate_pool_size) 2434 return (mc_now, mc_should, mc_max)
2435 2436 @ConfigSync(shared=1)
2437 - def GetMasterCandidateStats(self, exceptions=None):
2438 """Get the number of current and maximum possible candidates. 2439 2440 This is just a wrapper over L{_UnlockedGetMasterCandidateStats}. 2441 2442 @type exceptions: list 2443 @param exceptions: if passed, list of nodes that should be ignored 2444 @rtype: tuple 2445 @return: tuple of (current, max) 2446 2447 """ 2448 return self._UnlockedGetMasterCandidateStats(exceptions)
2449 2450 @ConfigSync()
2451 - def MaintainCandidatePool(self, exception_node_uuids):
2452 """Try to grow the candidate pool to the desired size. 2453 2454 @type exception_node_uuids: list 2455 @param exception_node_uuids: if passed, list of nodes that should be ignored 2456 @rtype: list 2457 @return: list with the adjusted nodes (L{objects.Node} instances) 2458 2459 """ 2460 mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats( 2461 exception_node_uuids) 2462 mod_list = [] 2463 if mc_now < mc_max: 2464 node_list = self._ConfigData().nodes.keys() 2465 random.shuffle(node_list) 2466 for uuid in node_list: 2467 if mc_now >= mc_max: 2468 break 2469 node = self._ConfigData().nodes[uuid] 2470 if (node.master_candidate or node.offline or node.drained or 2471 node.uuid in exception_node_uuids or not node.master_capable): 2472 continue 2473 mod_list.append(node) 2474 node.master_candidate = True 2475 node.serial_no += 1 2476 mc_now += 1 2477 if mc_now != mc_max: 2478 # this should not happen 2479 logging.warning("Warning: MaintainCandidatePool didn't manage to" 2480 " fill the candidate pool (%d/%d)", mc_now, mc_max) 2481 if mod_list: 2482 self._ConfigData().cluster.serial_no += 1 2483 2484 return mod_list
2485
2486 - def _UnlockedAddNodeToGroup(self, node_uuid, nodegroup_uuid):
2487 """Add a given node to the specified group. 2488 2489 """ 2490 if nodegroup_uuid not in self._ConfigData().nodegroups: 2491 # This can happen if a node group gets deleted between its lookup and 2492 # when we're adding the first node to it, since we don't keep a lock in 2493 # the meantime. It's ok though, as we'll fail cleanly if the node group 2494 # is not found anymore. 2495 raise errors.OpExecError("Unknown node group: %s" % nodegroup_uuid) 2496 if node_uuid not in self._ConfigData().nodegroups[nodegroup_uuid].members: 2497 self._ConfigData().nodegroups[nodegroup_uuid].members.append(node_uuid)
2498
2499 - def _UnlockedRemoveNodeFromGroup(self, node):
2500 """Remove a given node from its group. 2501 2502 """ 2503 nodegroup = node.group 2504 if nodegroup not in self._ConfigData().nodegroups: 2505 logging.warning("Warning: node '%s' has unknown node group '%s'" 2506 " (while being removed from it)", node.uuid, nodegroup) 2507 nodegroup_obj = self._ConfigData().nodegroups[nodegroup] 2508 if node.uuid not in nodegroup_obj.members: 2509 logging.warning("Warning: node '%s' not a member of its node group '%s'" 2510 " (while being removed from it)", node.uuid, nodegroup) 2511 else: 2512 nodegroup_obj.members.remove(node.uuid)
2513 2514 @ConfigSync()
2515 - def AssignGroupNodes(self, mods):
2516 """Changes the group of a number of nodes. 2517 2518 @type mods: list of tuples; (node name, new group UUID) 2519 @param mods: Node membership modifications 2520 2521 """ 2522 groups = self._ConfigData().nodegroups 2523 nodes = self._ConfigData().nodes 2524 2525 resmod = [] 2526 2527 # Try to resolve UUIDs first 2528 for (node_uuid, new_group_uuid) in mods: 2529 try: 2530 node = nodes[node_uuid] 2531 except KeyError: 2532 raise errors.ConfigurationError("Unable to find node '%s'" % node_uuid) 2533 2534 if node.group == new_group_uuid: 2535 # Node is being assigned to its current group 2536 logging.debug("Node '%s' was assigned to its current group (%s)", 2537 node_uuid, node.group) 2538 continue 2539 2540 # Try to find current group of node 2541 try: 2542 old_group = groups[node.group] 2543 except KeyError: 2544 raise errors.ConfigurationError("Unable to find old group '%s'" % 2545 node.group) 2546 2547 # Try to find new group for node 2548 try: 2549 new_group = groups[new_group_uuid] 2550 except KeyError: 2551 raise errors.ConfigurationError("Unable to find new group '%s'" % 2552 new_group_uuid) 2553 2554 assert node.uuid in old_group.members, \ 2555 ("Inconsistent configuration: node '%s' not listed in members for its" 2556 " old group '%s'" % (node.uuid, old_group.uuid)) 2557 assert node.uuid not in new_group.members, \ 2558 ("Inconsistent configuration: node '%s' already listed in members for" 2559 " its new group '%s'" % (node.uuid, new_group.uuid)) 2560 2561 resmod.append((node, old_group, new_group)) 2562 2563 # Apply changes 2564 for (node, old_group, new_group) in resmod: 2565 assert node.uuid != new_group.uuid and old_group.uuid != new_group.uuid, \ 2566 "Assigning to current group is not possible" 2567 2568 node.group = new_group.uuid 2569 2570 # Update members of involved groups 2571 if node.uuid in old_group.members: 2572 old_group.members.remove(node.uuid) 2573 if node.uuid not in new_group.members: 2574 new_group.members.append(node.uuid) 2575 2576 # Update timestamps and serials (only once per node/group object) 2577 now = time.time() 2578 for obj in frozenset(itertools.chain(*resmod)): # pylint: disable=W0142 2579 obj.serial_no += 1 2580 obj.mtime = now 2581 2582 # Force ssconf update 2583 self._ConfigData().cluster.serial_no += 1
2584
2585 - def _BumpSerialNo(self):
2586 """Bump up the serial number of the config. 2587 2588 """ 2589 self._ConfigData().serial_no += 1 2590 self._ConfigData().mtime = time.time()
2591
2592 - def _AllUUIDObjects(self):
2593 """Returns all objects with uuid attributes. 2594 2595 """ 2596 return (self._ConfigData().instances.values() + 2597 self._ConfigData().nodes.values() + 2598 self._ConfigData().nodegroups.values() + 2599 self._ConfigData().networks.values() + 2600 self._ConfigData().disks.values() + 2601 self._AllNICs() + 2602 [self._ConfigData().cluster])
2603
2604 - def GetConfigManager(self, shared=False, forcelock=False):
2605 """Returns a ConfigManager, which is suitable to perform a synchronized 2606 block of configuration operations. 2607 2608 WARNING: This blocks all other configuration operations, so anything that 2609 runs inside the block should be very fast, preferably not using any IO. 2610 """ 2611 2612 return ConfigManager(self, shared=shared, forcelock=forcelock)
2613
2614 - def _AddLockCount(self, count):
2615 self._lock_count += count 2616 return self._lock_count
2617
2618 - def _LockCount(self):
2619 return self._lock_count
2620
2621 - def _OpenConfig(self, shared, force=False):
2622 """Read the config data from WConfd or disk. 2623 2624 """ 2625 if self._AddLockCount(1) > 1: 2626 if self._lock_current_shared and not shared: 2627 self._AddLockCount(-1) 2628 raise errors.ConfigurationError("Can't request an exclusive" 2629 " configuration lock while holding" 2630 " shared") 2631 elif not force or self._lock_forced or not shared or self._offline: 2632 return # we already have the lock, do nothing 2633 else: 2634 self._lock_current_shared = shared 2635 if force: 2636 self._lock_forced = True 2637 # Read the configuration data. If offline, read the file directly. 2638 # If online, call WConfd. 2639 if self._offline: 2640 try: 2641 raw_data = utils.ReadFile(self._cfg_file) 2642 data_dict = serializer.Load(raw_data) 2643 # Make sure the configuration has the right version 2644 ValidateConfig(data_dict) 2645 data = objects.ConfigData.FromDict(data_dict) 2646 except errors.ConfigVersionMismatch: 2647 raise 2648 except Exception, err: 2649 raise errors.ConfigurationError(err) 2650 2651 self._cfg_id = utils.GetFileID(path=self._cfg_file) 2652 2653 if (not hasattr(data, "cluster") or 2654 not hasattr(data.cluster, "rsahostkeypub")): 2655 raise errors.ConfigurationError("Incomplete configuration" 2656 " (missing cluster.rsahostkeypub)") 2657 2658 if not data.cluster.master_node in data.nodes: 2659 msg = ("The configuration denotes node %s as master, but does not" 2660 " contain information about this node" % 2661 data.cluster.master_node) 2662 raise errors.ConfigurationError(msg) 2663 2664 master_info = data.nodes[data.cluster.master_node] 2665 if master_info.name != self._my_hostname and not self._accept_foreign: 2666 msg = ("The configuration denotes node %s as master, while my" 2667 " hostname is %s; opening a foreign configuration is only" 2668 " possible in accept_foreign mode" % 2669 (master_info.name, self._my_hostname)) 2670 raise errors.ConfigurationError(msg) 2671 2672 self._SetConfigData(data) 2673 2674 # Upgrade configuration if needed 2675 self._UpgradeConfig(saveafter=True) 2676 else: 2677 if shared and not force: 2678 if self._config_data is None: 2679 logging.debug("Requesting config, as I have no up-to-date copy") 2680 dict_data = self._wconfd.ReadConfig() 2681 else: 2682 dict_data = None 2683 else: 2684 # poll until we acquire the lock 2685 while True: 2686 dict_data = \ 2687 self._wconfd.LockConfig(self._GetWConfdContext(), bool(shared)) 2688 logging.debug("Received config from WConfd.LockConfig [shared=%s]", 2689 bool(shared)) 2690 if dict_data is not None: 2691 break 2692 time.sleep(random.random()) 2693 2694 try: 2695 if dict_data is not None: 2696 self._SetConfigData(objects.ConfigData.FromDict(dict_data)) 2697 self._UpgradeConfig() 2698 except Exception, err: 2699 raise errors.ConfigurationError(err)
2700
2701 - def _CloseConfig(self, save):
2702 """Release resources relating the config data. 2703 2704 """ 2705 if self._AddLockCount(-1) > 0: 2706 return # we still have the lock, do nothing 2707 if save: 2708 try: 2709 logging.debug("Writing configuration and unlocking it") 2710 self._WriteConfig(releaselock=True) 2711 except Exception, err: 2712 logging.critical("Can't write the configuration: %s", str(err)) 2713 raise 2714 elif not self._offline and \ 2715 not (self._lock_current_shared and not self._lock_forced): 2716 logging.debug("Unlocking configuration without writing") 2717 self._wconfd.UnlockConfig(self._GetWConfdContext()) 2718 self._lock_forced = False
2719 2720 # TODO: To WConfd
2721 - def _UpgradeConfig(self, saveafter=False):
2722 """Run any upgrade steps. 2723 2724 This method performs both in-object upgrades and also update some data 2725 elements that need uniqueness across the whole configuration or interact 2726 with other objects. 2727 2728 @warning: if 'saveafter' is 'True', this function will call 2729 L{_WriteConfig()} so it needs to be called only from a 2730 "safe" place. 2731 2732 """ 2733 # Keep a copy of the persistent part of _config_data to check for changes 2734 # Serialization doesn't guarantee order in dictionaries 2735 if saveafter: 2736 oldconf = copy.deepcopy(self._ConfigData().ToDict()) 2737 else: 2738 oldconf = None 2739 2740 # In-object upgrades 2741 self._ConfigData().UpgradeConfig() 2742 2743 for item in self._AllUUIDObjects(): 2744 if item.uuid is None: 2745 item.uuid = self._GenerateUniqueID(_UPGRADE_CONFIG_JID) 2746 if not self._ConfigData().nodegroups: 2747 default_nodegroup_name = constants.INITIAL_NODE_GROUP_NAME 2748 default_nodegroup = objects.NodeGroup(name=default_nodegroup_name, 2749 members=[]) 2750 self._UnlockedAddNodeGroup(default_nodegroup, _UPGRADE_CONFIG_JID, True) 2751 for node in self._ConfigData().nodes.values(): 2752 if not node.group: 2753 node.group = self._UnlockedLookupNodeGroup(None) 2754 # This is technically *not* an upgrade, but needs to be done both when 2755 # nodegroups are being added, and upon normally loading the config, 2756 # because the members list of a node group is discarded upon 2757 # serializing/deserializing the object. 2758 self._UnlockedAddNodeToGroup(node.uuid, node.group) 2759 2760 if saveafter: 2761 modified = (oldconf != self._ConfigData().ToDict()) 2762 else: 2763 modified = True # can't prove it didn't change, but doesn't matter 2764 if modified and saveafter: 2765 self._WriteConfig() 2766 self._UnlockedDropECReservations(_UPGRADE_CONFIG_JID) 2767 else: 2768 if self._offline: 2769 self._UnlockedVerifyConfigAndLog()
2770
2771 - def _WriteConfig(self, destination=None, releaselock=False):
2772 """Write the configuration data to persistent storage. 2773 2774 """ 2775 if destination is None: 2776 destination = self._cfg_file 2777 2778 # Save the configuration data. If offline, write the file directly. 2779 # If online, call WConfd. 2780 if self._offline: 2781 self._BumpSerialNo() 2782 txt = serializer.DumpJson( 2783 self._ConfigData().ToDict(_with_private=True), 2784 private_encoder=serializer.EncodeWithPrivateFields 2785 ) 2786 2787 getents = self._getents() 2788 try: 2789 fd = utils.SafeWriteFile(destination, self._cfg_id, data=txt, 2790 close=False, gid=getents.confd_gid, mode=0640) 2791 except errors.LockError: 2792 raise errors.ConfigurationError("The configuration file has been" 2793 " modified since the last write, cannot" 2794 " update") 2795 try: 2796 self._cfg_id = utils.GetFileID(fd=fd) 2797 finally: 2798 os.close(fd) 2799 else: 2800 try: 2801 if releaselock: 2802 res = self._wconfd.WriteConfigAndUnlock(self._GetWConfdContext(), 2803 self._ConfigData().ToDict()) 2804 if not res: 2805 logging.warning("WriteConfigAndUnlock indicates we already have" 2806 " released the lock; assuming this was just a retry" 2807 " and the initial call succeeded") 2808 else: 2809 self._wconfd.WriteConfig(self._GetWConfdContext(), 2810 self._ConfigData().ToDict()) 2811 except errors.LockError: 2812 raise errors.ConfigurationError("The configuration file has been" 2813 " modified since the last write, cannot" 2814 " update") 2815 2816 self.write_count += 1
2817
2818 - def _GetAllHvparamsStrings(self, hypervisors):
2819 """Get the hvparams of all given hypervisors from the config. 2820 2821 @type hypervisors: list of string 2822 @param hypervisors: list of hypervisor names 2823 @rtype: dict of strings 2824 @returns: dictionary mapping the hypervisor name to a string representation 2825 of the hypervisor's hvparams 2826 2827 """ 2828 hvparams = {} 2829 for hv in hypervisors: 2830 hvparams[hv] = self._UnlockedGetHvparamsString(hv) 2831 return hvparams
2832 2833 @staticmethod
2834 - def _ExtendByAllHvparamsStrings(ssconf_values, all_hvparams):
2835 """Extends the ssconf_values dictionary by hvparams. 2836 2837 @type ssconf_values: dict of strings 2838 @param ssconf_values: dictionary mapping ssconf_keys to strings 2839 representing the content of ssconf files 2840 @type all_hvparams: dict of strings 2841 @param all_hvparams: dictionary mapping hypervisor names to a string 2842 representation of their hvparams 2843 @rtype: same as ssconf_values 2844 @returns: the ssconf_values dictionary extended by hvparams 2845 2846 """ 2847 for hv in all_hvparams: 2848 ssconf_key = constants.SS_HVPARAMS_PREF + hv 2849 ssconf_values[ssconf_key] = all_hvparams[hv] 2850 return ssconf_values
2851
2852 - def _UnlockedGetSshPortMap(self, node_infos):
2853 node_ports = dict([(node.name, 2854 self._UnlockedGetNdParams(node).get( 2855 constants.ND_SSH_PORT)) 2856 for node in node_infos]) 2857 return node_ports
2858
2859 - def _UnlockedGetSsconfValues(self):
2860 """Return the values needed by ssconf. 2861 2862 @rtype: dict 2863 @return: a dictionary with keys the ssconf names and values their 2864 associated value 2865 2866 """ 2867 fn = "\n".join 2868 instance_names = utils.NiceSort( 2869 [inst.name for inst in 2870 self._UnlockedGetAllInstancesInfo().values()]) 2871 node_infos = self._UnlockedGetAllNodesInfo().values() 2872 node_names = [node.name for node in node_infos] 2873 node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip) 2874 for ninfo in node_infos] 2875 node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip) 2876 for ninfo in node_infos] 2877 node_vm_capable = ["%s=%s" % (ninfo.name, str(ninfo.vm_capable)) 2878 for ninfo in node_infos] 2879 2880 instance_data = fn(instance_names) 2881 off_data = fn(node.name for node in node_infos if node.offline) 2882 on_data = fn(node.name for node in node_infos if not node.offline) 2883 mc_data = fn(node.name for node in node_infos if node.master_candidate) 2884 mc_ips_data = fn(node.primary_ip for node in node_infos 2885 if node.master_candidate) 2886 node_data = fn(node_names) 2887 node_pri_ips_data = fn(node_pri_ips) 2888 node_snd_ips_data = fn(node_snd_ips) 2889 node_vm_capable_data = fn(node_vm_capable) 2890 2891 cluster = self._ConfigData().cluster 2892 cluster_tags = fn(cluster.GetTags()) 2893 2894 master_candidates_certs = fn("%s=%s" % (mc_uuid, mc_cert) 2895 for mc_uuid, mc_cert 2896 in cluster.candidate_certs.items()) 2897 2898 hypervisor_list = fn(cluster.enabled_hypervisors) 2899 all_hvparams = self._GetAllHvparamsStrings(constants.HYPER_TYPES) 2900 2901 uid_pool = uidpool.FormatUidPool(cluster.uid_pool, separator="\n") 2902 2903 nodegroups = ["%s %s" % (nodegroup.uuid, nodegroup.name) for nodegroup in 2904 self._ConfigData().nodegroups.values()] 2905 nodegroups_data = fn(utils.NiceSort(nodegroups)) 2906 networks = ["%s %s" % (net.uuid, net.name) for net in 2907 self._ConfigData().networks.values()] 2908 networks_data = fn(utils.NiceSort(networks)) 2909 2910 ssh_ports = fn("%s=%s" % (node_name, port) 2911 for node_name, port 2912 in self._UnlockedGetSshPortMap(node_infos).items()) 2913 2914 ssconf_values = { 2915 constants.SS_CLUSTER_NAME: cluster.cluster_name, 2916 constants.SS_CLUSTER_TAGS: cluster_tags, 2917 constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir, 2918 constants.SS_SHARED_FILE_STORAGE_DIR: cluster.shared_file_storage_dir, 2919 constants.SS_GLUSTER_STORAGE_DIR: cluster.gluster_storage_dir, 2920 constants.SS_MASTER_CANDIDATES: mc_data, 2921 constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data, 2922 constants.SS_MASTER_CANDIDATES_CERTS: master_candidates_certs, 2923 constants.SS_MASTER_IP: cluster.master_ip, 2924 constants.SS_MASTER_NETDEV: cluster.master_netdev, 2925 constants.SS_MASTER_NETMASK: str(cluster.master_netmask), 2926 constants.SS_MASTER_NODE: self._UnlockedGetNodeName(cluster.master_node), 2927 constants.SS_NODE_LIST: node_data, 2928 constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data, 2929 constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data, 2930 constants.SS_NODE_VM_CAPABLE: node_vm_capable_data, 2931 constants.SS_OFFLINE_NODES: off_data, 2932 constants.SS_ONLINE_NODES: on_data, 2933 constants.SS_PRIMARY_IP_FAMILY: str(cluster.primary_ip_family), 2934 constants.SS_INSTANCE_LIST: instance_data, 2935 constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION, 2936 constants.SS_HYPERVISOR_LIST: hypervisor_list, 2937 constants.SS_MAINTAIN_NODE_HEALTH: str(cluster.maintain_node_health), 2938 constants.SS_UID_POOL: uid_pool, 2939 constants.SS_NODEGROUPS: nodegroups_data, 2940 constants.SS_NETWORKS: networks_data, 2941 constants.SS_ENABLED_USER_SHUTDOWN: str(cluster.enabled_user_shutdown), 2942 constants.SS_SSH_PORTS: ssh_ports, 2943 } 2944 ssconf_values = self._ExtendByAllHvparamsStrings(ssconf_values, 2945 all_hvparams) 2946 bad_values = [(k, v) for k, v in ssconf_values.items() 2947 if not isinstance(v, (str, basestring))] 2948 if bad_values: 2949 err = utils.CommaJoin("%s=%s" % (k, v) for k, v in bad_values) 2950 raise errors.ConfigurationError("Some ssconf key(s) have non-string" 2951 " values: %s" % err) 2952 return ssconf_values
2953 2954 @ConfigSync(shared=1)
2955 - def GetSsconfValues(self):
2956 """Wrapper using lock around _UnlockedGetSsconf(). 2957 2958 """ 2959 return self._UnlockedGetSsconfValues()
2960 2961 @ConfigSync(shared=1)
2962 - def GetVGName(self):
2963 """Return the volume group name. 2964 2965 """ 2966 return self._ConfigData().cluster.volume_group_name
2967 2968 @ConfigSync()
2969 - def SetVGName(self, vg_name):
2970 """Set the volume group name. 2971 2972 """ 2973 self._ConfigData().cluster.volume_group_name = vg_name 2974 self._ConfigData().cluster.serial_no += 1
2975 2976 @ConfigSync(shared=1)
2977 - def GetDiagnoseDataCollectorFilename(self):
2978 """Return the diagnose data collector filename 2979 2980 """ 2981 return self._ConfigData().cluster.diagnose_data_collector_filename
2982 2983 @ConfigSync()
2984 - def SetDiagnoseDataCollectorFilename(self, fn):
2985 """Set the volume group name. 2986 2987 """ 2988 self._ConfigData().cluster.diagnose_data_collector_filename = fn 2989 self._ConfigData().cluster.serial_no += 1
2990 2991 @ConfigSync(shared=1)
2992 - def GetDRBDHelper(self):
2993 """Return DRBD usermode helper. 2994 2995 """ 2996 return self._ConfigData().cluster.drbd_usermode_helper
2997 2998 @ConfigSync()
2999 - def SetDRBDHelper(self, drbd_helper):
3000 """Set DRBD usermode helper. 3001 3002 """ 3003 self._ConfigData().cluster.drbd_usermode_helper = drbd_helper 3004 self._ConfigData().cluster.serial_no += 1
3005 3006 @ConfigSync(shared=1)
3007 - def GetMACPrefix(self):
3008 """Return the mac prefix. 3009 3010 """ 3011 return self._ConfigData().cluster.mac_prefix
3012 3013 @ConfigSync(shared=1)
3014 - def GetClusterInfo(self):
3015 """Returns information about the cluster 3016 3017 @rtype: L{objects.Cluster} 3018 @return: the cluster object 3019 3020 """ 3021 return self._ConfigData().cluster
3022 3023 @ConfigSync(shared=1)
3024 - def DisksOfType(self, dev_type):
3025 """Check if in there is at disk of the given type in the configuration. 3026 3027 """ 3028 return self._ConfigData().DisksOfType(dev_type)
3029 3030 @ConfigSync(shared=1)
3031 - def GetDetachedConfig(self):
3032 """Returns a detached version of a ConfigManager, which represents 3033 a read-only snapshot of the configuration at this particular time. 3034 3035 """ 3036 return DetachedConfig(self._ConfigData())
3037
3038 - def Update(self, target, feedback_fn, ec_id=None):
3039 """Notify function to be called after updates. 3040 3041 This function must be called when an object (as returned by 3042 GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the 3043 caller wants the modifications saved to the backing store. Note 3044 that all modified objects will be saved, but the target argument 3045 is the one the caller wants to ensure that it's saved. 3046 3047 @param target: an instance of either L{objects.Cluster}, 3048 L{objects.Node} or L{objects.Instance} which is existing in 3049 the cluster 3050 @param feedback_fn: Callable feedback function 3051 3052 """ 3053 3054 update_function = None 3055 if isinstance(target, objects.Cluster): 3056 if self._offline: 3057 self.UpdateOfflineCluster(target, feedback_fn) 3058 return 3059 else: 3060 update_function = self._wconfd.UpdateCluster 3061 elif isinstance(target, objects.Node): 3062 update_function = self._wconfd.UpdateNode 3063 elif isinstance(target, objects.Instance): 3064 update_function = self._wconfd.UpdateInstance 3065 elif isinstance(target, objects.NodeGroup): 3066 update_function = self._wconfd.UpdateNodeGroup 3067 elif isinstance(target, objects.Network): 3068 update_function = self._wconfd.UpdateNetwork 3069 elif isinstance(target, objects.Disk): 3070 update_function = self._wconfd.UpdateDisk 3071 else: 3072 raise errors.ProgrammerError("Invalid object type (%s) passed to" 3073 " ConfigWriter.Update" % type(target)) 3074 3075 def WithRetry(): 3076 result = update_function(target.ToDict()) 3077 self.OutDate() 3078 3079 if result is None: 3080 raise utils.RetryAgain() 3081 else: 3082 return result
3083 vals = utils.Retry(WithRetry, 0.1, 30) 3084 self.OutDate() 3085 target.serial_no = vals[0] 3086 target.mtime = float(vals[1]) 3087 3088 if ec_id is not None: 3089 # Commit all ips reserved by OpInstanceSetParams and OpGroupSetParams 3090 # FIXME: After RemoveInstance is moved to WConfd, use its internal 3091 # functions from TempRes module. 3092 self.CommitTemporaryIps(ec_id) 3093 3094 # Just verify the configuration with our feedback function. 3095 # It will get written automatically by the decorator. 3096 self.VerifyConfigAndLog(feedback_fn=feedback_fn) 3097 3098 @ConfigSync()
3099 - def UpdateOfflineCluster(self, target, feedback_fn):
3100 self._ConfigData().cluster = target 3101 target.serial_no += 1 3102 target.mtime = time.time() 3103 self.VerifyConfigAndLog(feedback_fn=feedback_fn)
3104
3105 - def _UnlockedDropECReservations(self, _ec_id):
3106 """Drop per-execution-context reservations 3107 3108 """ 3109 # FIXME: Remove the following two lines after all reservations are moved to 3110 # wconfd. 3111 for rm in self._all_rms: 3112 rm.DropECReservations(_ec_id) 3113 if not self._offline: 3114 self._wconfd.DropAllReservations(self._GetWConfdContext())
3115
3116 - def DropECReservations(self, ec_id):
3117 self._UnlockedDropECReservations(ec_id)
3118 3119 @ConfigSync(shared=1)
3120 - def GetAllNetworksInfo(self):
3121 """Get configuration info of all the networks. 3122 3123 """ 3124 return dict(self._ConfigData().networks)
3125
3126 - def _UnlockedGetNetworkList(self):
3127 """Get the list of networks. 3128 3129 This function is for internal use, when the config lock is already held. 3130 3131 """ 3132 return self._ConfigData().networks.keys()
3133 3134 @ConfigSync(shared=1)
3135 - def GetNetworkList(self):
3136 """Get the list of networks. 3137 3138 @return: array of networks, ex. ["main", "vlan100", "200] 3139 3140 """ 3141 return self._UnlockedGetNetworkList()
3142 3143 @ConfigSync(shared=1)
3144 - def GetNetworkNames(self):
3145 """Get a list of network names 3146 3147 """ 3148 names = [net.name 3149 for net in self._ConfigData().networks.values()] 3150 return names
3151
3152 - def _UnlockedGetNetwork(self, uuid):
3153 """Returns information about a network. 3154 3155 This function is for internal use, when the config lock is already held. 3156 3157 """ 3158 if uuid not in self._ConfigData().networks: 3159 return None 3160 3161 return self._ConfigData().networks[uuid]
3162 3163 @ConfigSync(shared=1)
3164 - def GetNetwork(self, uuid):
3165 """Returns information about a network. 3166 3167 It takes the information from the configuration file. 3168 3169 @param uuid: UUID of the network 3170 3171 @rtype: L{objects.Network} 3172 @return: the network object 3173 3174 """ 3175 return self._UnlockedGetNetwork(uuid)
3176 3177 @ConfigSync()
3178 - def AddNetwork(self, net, ec_id, check_uuid=True):
3179 """Add a network to the configuration. 3180 3181 @type net: L{objects.Network} 3182 @param net: the Network object to add 3183 @type ec_id: string 3184 @param ec_id: unique id for the job to use when creating a missing UUID 3185 3186 """ 3187 self._UnlockedAddNetwork(net, ec_id, check_uuid)
3188
3189 - def _UnlockedAddNetwork(self, net, ec_id, check_uuid):
3190 """Add a network to the configuration. 3191 3192 """ 3193 logging.info("Adding network %s to configuration", net.name) 3194 3195 if check_uuid: 3196 self._EnsureUUID(net, ec_id) 3197 3198 net.serial_no = 1 3199 net.ctime = net.mtime = time.time() 3200 self._ConfigData().networks[net.uuid] = net 3201 self._ConfigData().cluster.serial_no += 1
3202
3203 - def _UnlockedLookupNetwork(self, target):
3204 """Lookup a network's UUID. 3205 3206 @type target: string 3207 @param target: network name or UUID 3208 @rtype: string 3209 @return: network UUID 3210 @raises errors.OpPrereqError: when the target network cannot be found 3211 3212 """ 3213 if target is None: 3214 return None 3215 if target in self._ConfigData().networks: 3216 return target 3217 for net in self._ConfigData().networks.values(): 3218 if net.name == target: 3219 return net.uuid 3220 raise errors.OpPrereqError("Network '%s' not found" % target, 3221 errors.ECODE_NOENT)
3222 3223 @ConfigSync(shared=1)
3224 - def LookupNetwork(self, target):
3225 """Lookup a network's UUID. 3226 3227 This function is just a wrapper over L{_UnlockedLookupNetwork}. 3228 3229 @type target: string 3230 @param target: network name or UUID 3231 @rtype: string 3232 @return: network UUID 3233 3234 """ 3235 return self._UnlockedLookupNetwork(target)
3236 3237 @ConfigSync()
3238 - def RemoveNetwork(self, network_uuid):
3239 """Remove a network from the configuration. 3240 3241 @type network_uuid: string 3242 @param network_uuid: the UUID of the network to remove 3243 3244 """ 3245 logging.info("Removing network %s from configuration", network_uuid) 3246 3247 if network_uuid not in self._ConfigData().networks: 3248 raise errors.ConfigurationError("Unknown network '%s'" % network_uuid) 3249 3250 del self._ConfigData().networks[network_uuid] 3251 self._ConfigData().cluster.serial_no += 1
3252
3253 - def _UnlockedGetGroupNetParams(self, net_uuid, node_uuid):
3254 """Get the netparams (mode, link) of a network. 3255 3256 Get a network's netparams for a given node. 3257 3258 @type net_uuid: string 3259 @param net_uuid: network uuid 3260 @type node_uuid: string 3261 @param node_uuid: node UUID 3262 @rtype: dict or None 3263 @return: netparams 3264 3265 """ 3266 node_info = self._UnlockedGetNodeInfo(node_uuid) 3267 nodegroup_info = self._UnlockedGetNodeGroup(node_info.group) 3268 netparams = nodegroup_info.networks.get(net_uuid, None) 3269 3270 return netparams
3271 3272 @ConfigSync(shared=1)
3273 - def GetGroupNetParams(self, net_uuid, node_uuid):
3274 """Locking wrapper of _UnlockedGetGroupNetParams() 3275 3276 """ 3277 return self._UnlockedGetGroupNetParams(net_uuid, node_uuid)
3278 3279 @ConfigSync(shared=1)
3280 - def CheckIPInNodeGroup(self, ip, node_uuid):
3281 """Check IP uniqueness in nodegroup. 3282 3283 Check networks that are connected in the node's node group 3284 if ip is contained in any of them. Used when creating/adding 3285 a NIC to ensure uniqueness among nodegroups. 3286 3287 @type ip: string 3288 @param ip: ip address 3289 @type node_uuid: string 3290 @param node_uuid: node UUID 3291 @rtype: (string, dict) or (None, None) 3292 @return: (network name, netparams) 3293 3294 """ 3295 if ip is None: 3296 return (None, None) 3297 node_info = self._UnlockedGetNodeInfo(node_uuid) 3298 nodegroup_info = self._UnlockedGetNodeGroup(node_info.group) 3299 for net_uuid in nodegroup_info.networks.keys(): 3300 net_info = self._UnlockedGetNetwork(net_uuid) 3301 pool = network.AddressPool(net_info) 3302 if pool.Contains(ip): 3303 return (net_info.name, nodegroup_info.networks[net_uuid]) 3304 3305 return (None, None)
3306 3307 @ConfigSync(shared=1)
3308 - def GetCandidateCerts(self):
3309 """Returns the candidate certificate map. 3310 3311 """ 3312 return self._ConfigData().cluster.candidate_certs
3313 3314 @ConfigSync()
3315 - def SetCandidateCerts(self, certs):
3316 """Replaces the master candidate cert list with the new values. 3317 3318 @type certs: dict of string to string 3319 @param certs: map of node UUIDs to SSL client certificate digests. 3320 3321 """ 3322 self._ConfigData().cluster.candidate_certs = certs
3323 3324 @ConfigSync()
3325 - def AddNodeToCandidateCerts(self, node_uuid, cert_digest, 3326 info_fn=logging.info, warn_fn=logging.warn):
3327 """Adds an entry to the candidate certificate map. 3328 3329 @type node_uuid: string 3330 @param node_uuid: the node's UUID 3331 @type cert_digest: string 3332 @param cert_digest: the digest of the node's client SSL certificate 3333 @type info_fn: function 3334 @param info_fn: logging function for information messages 3335 @type warn_fn: function 3336 @param warn_fn: logging function for warning messages 3337 3338 """ 3339 cluster = self._ConfigData().cluster 3340 if node_uuid in cluster.candidate_certs: 3341 old_cert_digest = cluster.candidate_certs[node_uuid] 3342 if old_cert_digest == cert_digest: 3343 if info_fn is not None: 3344 info_fn("Certificate digest for node %s already in config." 3345 "Not doing anything." % node_uuid) 3346 return 3347 else: 3348 if warn_fn is not None: 3349 warn_fn("Overriding differing certificate digest for node %s" 3350 % node_uuid) 3351 cluster.candidate_certs[node_uuid] = cert_digest
3352 3353 @ConfigSync()
3354 - def RemoveNodeFromCandidateCerts(self, node_uuid, 3355 warn_fn=logging.warn):
3356 """Removes the entry of the given node in the certificate map. 3357 3358 @type node_uuid: string 3359 @param node_uuid: the node's UUID 3360 @type warn_fn: function 3361 @param warn_fn: logging function for warning messages 3362 3363 """ 3364 cluster = self._ConfigData().cluster 3365 if node_uuid not in cluster.candidate_certs: 3366 if warn_fn is not None: 3367 warn_fn("Cannot remove certifcate for node %s, because it's not" 3368 " in the candidate map." % node_uuid) 3369 return 3370 del cluster.candidate_certs[node_uuid]
3371
3372 - def FlushConfig(self):
3373 """Force the distribution of configuration to master candidates. 3374 3375 It is not necessary to hold a lock for this operation, it is handled 3376 internally by WConfd. 3377 3378 """ 3379 if not self._offline: 3380 self._wconfd.FlushConfig()
3381
3382 - def FlushConfigGroup(self, uuid):
3383 """Force the distribution of configuration to master candidates of a group. 3384 3385 It is not necessary to hold a lock for this operation, it is handled 3386 internally by WConfd. 3387 3388 """ 3389 if not self._offline: 3390 self._wconfd.FlushConfigGroup(uuid)
3391 3392 @ConfigSync(shared=1)
3393 - def GetAllDiskInfo(self):
3394 """Get the configuration of all disks. 3395 3396 @rtype: dict 3397 @return: dict of (disk, disk_info), where disk_info is what 3398 would GetDiskInfo return for disk 3399 """ 3400 return self._UnlockedGetAllDiskInfo()
3401
3402 - def _UnlockedGetAllDiskInfo(self):
3403 return dict((disk_uuid, self._UnlockedGetDiskInfo(disk_uuid)) 3404 for disk_uuid in self._UnlockedGetDiskList())
3405 3406 @ConfigSync(shared=1)
3407 - def GetInstanceForDisk(self, disk_uuid):
3408 """Returns the instance the disk is currently attached to. 3409 3410 @type disk_uuid: string 3411 @param disk_uuid: the identifier of the disk in question. 3412 3413 @rtype: string 3414 @return: uuid of instance the disk is attached to. 3415 """ 3416 for inst_uuid, inst_info in self._UnlockedGetAllInstancesInfo().items(): 3417 if disk_uuid in inst_info.disks: 3418 return inst_uuid
3419
3420 - def SetMaintdRoundDelay(self, delay):
3421 """Set the minimal time the maintenance daemon should wait between rounds""" 3422 utils.SimpleRetry(True, self._wconfd.SetMaintdRoundDelay, 0.1, 30, 3423 args=[delay])
3424
3425 - def SetMaintdBalance(self, flag):
3426 """Enable/disable auto-balancing by the maintenance daemon""" 3427 utils.SimpleRetry(True, self._wconfd.SetMaintdBalance, 0.1, 30, 3428 args=[flag])
3429
3430 - def SetMaintdBalanceThreshold(self, score):
3431 """Set the minimal score improvement per move for balancing steps""" 3432 utils.SimpleRetry(True, self._wconfd.SetMaintdBalanceThreshold, 0.1, 30, 3433 args=[score])
3434
3435 3436 -class DetachedConfig(ConfigWriter):
3437 """Read-only snapshot of the config.""" 3438
3439 - def __init__(self, config_data):
3440 super(DetachedConfig, self).__init__(self, offline=True) 3441 self._SetConfigData(config_data)
3442 3443 @staticmethod
3444 - def _WriteCallError():
3445 raise errors.ProgrammerError("DetachedConfig supports only read-only" 3446 " operations")
3447
3448 - def _OpenConfig(self, shared, force=None):
3449 if not shared: 3450 DetachedConfig._WriteCallError()
3451
3452 - def _CloseConfig(self, save):
3453 if save: 3454 DetachedConfig._WriteCallError()
3455