Package ganeti :: Package config
[hide private]
[frames] | no frames]

Source Code for Package ganeti.config

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Configuration management for Ganeti 
  32   
  33  This module provides the interface to the Ganeti cluster configuration. 
  34   
  35  The configuration data is stored on every node but is updated on the master 
  36  only. After each update, the master distributes the data to the other nodes. 
  37   
  38  Currently, the data storage format is JSON. YAML was slow and consuming too 
  39  much memory. 
  40   
  41  """ 
  42   
  43  # pylint: disable=R0904 
  44  # R0904: Too many public methods 
  45   
  46  import copy 
  47  import os 
  48  import random 
  49  import logging 
  50  import time 
  51  import threading 
  52  import itertools 
  53   
  54  from ganeti.config.temporary_reservations import TemporaryReservationManager 
  55  from ganeti.config.utils import ConfigSync, ConfigManager 
  56  from ganeti.config.verify import (VerifyType, VerifyNic, VerifyIpolicy, 
  57                                    ValidateConfig) 
  58   
  59  from ganeti import errors 
  60  from ganeti import utils 
  61  from ganeti import constants 
  62  import ganeti.wconfd as wc 
  63  from ganeti import objects 
  64  from ganeti import serializer 
  65  from ganeti import uidpool 
  66  from ganeti import netutils 
  67  from ganeti import runtime 
  68  from ganeti import pathutils 
  69  from ganeti import network 
70 71 72 -def GetWConfdContext(ec_id, livelock):
73 """Prepare a context for communication with WConfd. 74 75 WConfd needs to know the identity of each caller to properly manage locks and 76 detect job death. This helper function prepares the identity object given a 77 job ID (optional) and a livelock file. 78 79 @type ec_id: int, or None 80 @param ec_id: the job ID or None, if the caller isn't a job 81 @type livelock: L{ganeti.utils.livelock.LiveLock} 82 @param livelock: a livelock object holding the lockfile needed for WConfd 83 @return: the WConfd context 84 85 """ 86 if ec_id is None: 87 return (threading.current_thread().getName(), 88 livelock.GetPath(), os.getpid()) 89 else: 90 return (ec_id, 91 livelock.GetPath(), os.getpid())
92
93 94 -def GetConfig(ec_id, livelock, **kwargs):
95 """A utility function for constructing instances of ConfigWriter. 96 97 It prepares a WConfd context and uses it to create a ConfigWriter instance. 98 99 @type ec_id: int, or None 100 @param ec_id: the job ID or None, if the caller isn't a job 101 @type livelock: L{ganeti.utils.livelock.LiveLock} 102 @param livelock: a livelock object holding the lockfile needed for WConfd 103 @type kwargs: dict 104 @param kwargs: Any additional arguments for the ConfigWriter constructor 105 @rtype: L{ConfigWriter} 106 @return: the ConfigWriter context 107 108 """ 109 kwargs['wconfdcontext'] = GetWConfdContext(ec_id, livelock) 110 111 # if the config is to be opened in the accept_foreign mode, we should 112 # also tell the RPC client not to check for the master node 113 accept_foreign = kwargs.get('accept_foreign', False) 114 kwargs['wconfd'] = wc.Client(allow_non_master=accept_foreign) 115 116 return ConfigWriter(**kwargs)
117 118 119 # job id used for resource management at config upgrade time 120 _UPGRADE_CONFIG_JID = "jid-cfg-upgrade"
121 122 123 -def _MatchNameComponentIgnoreCase(short_name, names):
124 """Wrapper around L{utils.text.MatchNameComponent}. 125 126 """ 127 return utils.MatchNameComponent(short_name, names, case_sensitive=False)
128
129 130 -def _CheckInstanceDiskIvNames(disks):
131 """Checks if instance's disks' C{iv_name} attributes are in order. 132 133 @type disks: list of L{objects.Disk} 134 @param disks: List of disks 135 @rtype: list of tuples; (int, string, string) 136 @return: List of wrongly named disks, each tuple contains disk index, 137 expected and actual name 138 139 """ 140 result = [] 141 142 for (idx, disk) in enumerate(disks): 143 exp_iv_name = "disk/%s" % idx 144 if disk.iv_name != exp_iv_name: 145 result.append((idx, exp_iv_name, disk.iv_name)) 146 147 return result
148
149 150 -class ConfigWriter(object):
151 """The interface to the cluster configuration. 152 153 WARNING: The class is no longer thread-safe! 154 Each thread must construct a separate instance. 155 156 @ivar _all_rms: a list of all temporary reservation managers 157 158 Currently the class fulfills 3 main functions: 159 1. lock the configuration for access (monitor) 160 2. reload and write the config if necessary (bridge) 161 3. provide convenient access methods to config data (facade) 162 163 """
164 - def __init__(self, cfg_file=None, offline=False, _getents=runtime.GetEnts, 165 accept_foreign=False, wconfdcontext=None, wconfd=None):
166 self.write_count = 0 167 self._config_data = None 168 self._SetConfigData(None) 169 self._offline = offline 170 if cfg_file is None: 171 self._cfg_file = pathutils.CLUSTER_CONF_FILE 172 else: 173 self._cfg_file = cfg_file 174 self._getents = _getents 175 self._temporary_ids = TemporaryReservationManager() 176 self._all_rms = [self._temporary_ids] 177 # Note: in order to prevent errors when resolving our name later, 178 # we compute it here once and reuse it; it's 179 # better to raise an error before starting to modify the config 180 # file than after it was modified 181 self._my_hostname = netutils.Hostname.GetSysName() 182 self._cfg_id = None 183 self._wconfdcontext = wconfdcontext 184 self._wconfd = wconfd 185 self._accept_foreign = accept_foreign 186 self._lock_count = 0 187 self._lock_current_shared = None 188 self._lock_forced = False
189
190 - def _ConfigData(self):
191 return self._config_data
192
193 - def OutDate(self):
194 self._config_data = None
195
196 - def _SetConfigData(self, cfg):
197 self._config_data = cfg
198
199 - def _GetWConfdContext(self):
200 return self._wconfdcontext
201 202 # this method needs to be static, so that we can call it on the class 203 @staticmethod
204 - def IsCluster():
205 """Check if the cluster is configured. 206 207 """ 208 return os.path.exists(pathutils.CLUSTER_CONF_FILE)
209
210 - def _UnlockedGetNdParams(self, node):
211 nodegroup = self._UnlockedGetNodeGroup(node.group) 212 return self._ConfigData().cluster.FillND(node, nodegroup)
213 214 @ConfigSync(shared=1)
215 - def GetNdParams(self, node):
216 """Get the node params populated with cluster defaults. 217 218 @type node: L{objects.Node} 219 @param node: The node we want to know the params for 220 @return: A dict with the filled in node params 221 222 """ 223 return self._UnlockedGetNdParams(node)
224 225 @ConfigSync(shared=1)
226 - def GetNdGroupParams(self, nodegroup):
227 """Get the node groups params populated with cluster defaults. 228 229 @type nodegroup: L{objects.NodeGroup} 230 @param nodegroup: The node group we want to know the params for 231 @return: A dict with the filled in node group params 232 233 """ 234 return self._UnlockedGetNdGroupParams(nodegroup)
235
236 - def _UnlockedGetNdGroupParams(self, group):
237 """Get the ndparams of the group. 238 239 @type group: L{objects.NodeGroup} 240 @param group: The group we want to know the params for 241 @rtype: dict of str to int 242 @return: A dict with the filled in node group params 243 244 """ 245 return self._ConfigData().cluster.FillNDGroup(group)
246 247 @ConfigSync(shared=1)
248 - def GetGroupSshPorts(self):
249 """Get a map of group UUIDs to SSH ports. 250 251 @rtype: dict of str to int 252 @return: a dict mapping the UUIDs to the SSH ports 253 254 """ 255 port_map = {} 256 for uuid, group in self._config_data.nodegroups.items(): 257 ndparams = self._UnlockedGetNdGroupParams(group) 258 port = ndparams.get(constants.ND_SSH_PORT) 259 port_map[uuid] = port 260 return port_map
261 262 @ConfigSync(shared=1)
263 - def GetInstanceDiskParams(self, instance):
264 """Get the disk params populated with inherit chain. 265 266 @type instance: L{objects.Instance} 267 @param instance: The instance we want to know the params for 268 @return: A dict with the filled in disk params 269 270 """ 271 node = self._UnlockedGetNodeInfo(instance.primary_node) 272 nodegroup = self._UnlockedGetNodeGroup(node.group) 273 return self._UnlockedGetGroupDiskParams(nodegroup)
274
275 - def _UnlockedGetInstanceDisks(self, inst_uuid):
276 """Return the disks' info for the given instance 277 278 @type inst_uuid: string 279 @param inst_uuid: The UUID of the instance we want to know the disks for 280 281 @rtype: List of L{objects.Disk} 282 @return: A list with all the disks' info 283 284 """ 285 instance = self._UnlockedGetInstanceInfo(inst_uuid) 286 if instance is None: 287 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 288 289 return [self._UnlockedGetDiskInfo(disk_uuid) 290 for disk_uuid in instance.disks]
291 292 @ConfigSync(shared=1)
293 - def GetInstanceDisks(self, inst_uuid):
294 """Return the disks' info for the given instance 295 296 This is a simple wrapper over L{_UnlockedGetInstanceDisks}. 297 298 """ 299 return self._UnlockedGetInstanceDisks(inst_uuid)
300
301 - def AddInstanceDisk(self, inst_uuid, disk, idx=None, replace=False):
302 """Add a disk to the config and attach it to instance.""" 303 if not isinstance(disk, objects.Disk): 304 raise errors.ProgrammerError("Invalid type passed to AddInstanceDisk") 305 306 disk.UpgradeConfig() 307 utils.SimpleRetry(True, self._wconfd.AddInstanceDisk, 0.1, 30, 308 args=[inst_uuid, disk.ToDict(), idx, replace]) 309 self.OutDate()
310
311 - def AttachInstanceDisk(self, inst_uuid, disk_uuid, idx=None):
312 """Attach an existing disk to an instance.""" 313 utils.SimpleRetry(True, self._wconfd.AttachInstanceDisk, 0.1, 30, 314 args=[inst_uuid, disk_uuid, idx]) 315 self.OutDate()
316
317 - def _UnlockedRemoveDisk(self, disk_uuid):
318 """Remove the disk from the configuration. 319 320 @type disk_uuid: string 321 @param disk_uuid: The UUID of the disk object 322 323 """ 324 if disk_uuid not in self._ConfigData().disks: 325 raise errors.ConfigurationError("Disk %s doesn't exist" % disk_uuid) 326 327 # Disk must not be attached anywhere 328 for inst in self._ConfigData().instances.values(): 329 if disk_uuid in inst.disks: 330 raise errors.ReservationError("Cannot remove disk %s. Disk is" 331 " attached to instance %s" 332 % (disk_uuid, inst.name)) 333 334 # Remove disk from config file 335 del self._ConfigData().disks[disk_uuid] 336 self._ConfigData().cluster.serial_no += 1
337
338 - def RemoveInstanceDisk(self, inst_uuid, disk_uuid):
339 """Detach a disk from an instance and remove it from the config.""" 340 utils.SimpleRetry(True, self._wconfd.RemoveInstanceDisk, 0.1, 30, 341 args=[inst_uuid, disk_uuid]) 342 self.OutDate()
343
344 - def DetachInstanceDisk(self, inst_uuid, disk_uuid):
345 """Detach a disk from an instance.""" 346 utils.SimpleRetry(True, self._wconfd.DetachInstanceDisk, 0.1, 30, 347 args=[inst_uuid, disk_uuid]) 348 self.OutDate()
349
350 - def _UnlockedGetDiskInfo(self, disk_uuid):
351 """Returns information about a disk. 352 353 It takes the information from the configuration file. 354 355 @param disk_uuid: UUID of the disk 356 357 @rtype: L{objects.Disk} 358 @return: the disk object 359 360 """ 361 if disk_uuid not in self._ConfigData().disks: 362 return None 363 364 return self._ConfigData().disks[disk_uuid]
365 366 @ConfigSync(shared=1)
367 - def GetDiskInfo(self, disk_uuid):
368 """Returns information about a disk. 369 370 This is a simple wrapper over L{_UnlockedGetDiskInfo}. 371 372 """ 373 return self._UnlockedGetDiskInfo(disk_uuid)
374
375 - def _UnlockedGetDiskInfoByName(self, disk_name):
376 """Return information about a named disk. 377 378 Return disk information from the configuration file, searching with the 379 name of the disk. 380 381 @param disk_name: Name of the disk 382 383 @rtype: L{objects.Disk} 384 @return: the disk object 385 386 """ 387 disk = None 388 count = 0 389 for d in self._ConfigData().disks.itervalues(): 390 if d.name == disk_name: 391 count += 1 392 disk = d 393 394 if count > 1: 395 raise errors.ConfigurationError("There are %s disks with this name: %s" 396 % (count, disk_name)) 397 398 return disk
399 400 @ConfigSync(shared=1)
401 - def GetDiskInfoByName(self, disk_name):
402 """Return information about a named disk. 403 404 This is a simple wrapper over L{_UnlockedGetDiskInfoByName}. 405 406 """ 407 return self._UnlockedGetDiskInfoByName(disk_name)
408
409 - def _UnlockedGetDiskList(self):
410 """Get the list of disks. 411 412 @return: array of disks, ex. ['disk2-uuid', 'disk1-uuid'] 413 414 """ 415 return self._ConfigData().disks.keys()
416 417 @ConfigSync(shared=1)
418 - def GetAllDisksInfo(self):
419 """Get the configuration of all disks. 420 421 This is a simple wrapper over L{_UnlockedGetAllDisksInfo}. 422 423 """ 424 return self._UnlockedGetAllDisksInfo()
425
426 - def _UnlockedGetAllDisksInfo(self):
427 """Get the configuration of all disks. 428 429 @rtype: dict 430 @return: dict of (disk, disk_info), where disk_info is what 431 would GetDiskInfo return for the node 432 433 """ 434 my_dict = dict([(disk_uuid, self._UnlockedGetDiskInfo(disk_uuid)) 435 for disk_uuid in self._UnlockedGetDiskList()]) 436 return my_dict
437
438 - def _AllInstanceNodes(self, inst_uuid):
439 """Compute the set of all disk-related nodes for an instance. 440 441 This abstracts away some work from '_UnlockedGetInstanceNodes' 442 and '_UnlockedGetInstanceSecondaryNodes'. 443 444 @type inst_uuid: string 445 @param inst_uuid: The UUID of the instance we want to get nodes for 446 @rtype: set of strings 447 @return: A set of names for all the nodes of the instance 448 449 """ 450 instance = self._UnlockedGetInstanceInfo(inst_uuid) 451 if instance is None: 452 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 453 454 instance_disks = self._UnlockedGetInstanceDisks(inst_uuid) 455 all_nodes = [] 456 for disk in instance_disks: 457 all_nodes.extend(disk.all_nodes) 458 return (set(all_nodes), instance)
459
460 - def _UnlockedGetInstanceNodes(self, inst_uuid):
461 """Get all disk-related nodes for an instance. 462 463 For non-DRBD instances, this will contain only the instance's primary node, 464 whereas for DRBD instances, it will contain both the primary and the 465 secondaries. 466 467 @type inst_uuid: string 468 @param inst_uuid: The UUID of the instance we want to get nodes for 469 @rtype: list of strings 470 @return: A list of names for all the nodes of the instance 471 472 """ 473 (all_nodes, instance) = self._AllInstanceNodes(inst_uuid) 474 # ensure that primary node is always the first 475 all_nodes.discard(instance.primary_node) 476 return (instance.primary_node, ) + tuple(all_nodes)
477 478 @ConfigSync(shared=1)
479 - def GetInstanceNodes(self, inst_uuid):
480 """Get all disk-related nodes for an instance. 481 482 This is just a wrapper over L{_UnlockedGetInstanceNodes} 483 484 """ 485 return self._UnlockedGetInstanceNodes(inst_uuid)
486
487 - def _UnlockedGetInstanceSecondaryNodes(self, inst_uuid):
488 """Get the list of secondary nodes. 489 490 @type inst_uuid: string 491 @param inst_uuid: The UUID of the instance we want to get nodes for 492 @rtype: list of strings 493 @return: A tuple of names for all the secondary nodes of the instance 494 495 """ 496 (all_nodes, instance) = self._AllInstanceNodes(inst_uuid) 497 all_nodes.discard(instance.primary_node) 498 return tuple(all_nodes)
499 500 @ConfigSync(shared=1)
501 - def GetInstanceSecondaryNodes(self, inst_uuid):
502 """Get the list of secondary nodes. 503 504 This is a simple wrapper over L{_UnlockedGetInstanceSecondaryNodes}. 505 506 """ 507 return self._UnlockedGetInstanceSecondaryNodes(inst_uuid)
508
509 - def _UnlockedGetInstanceLVsByNode(self, inst_uuid, lvmap=None):
510 """Provide a mapping of node to LVs a given instance owns. 511 512 @type inst_uuid: string 513 @param inst_uuid: The UUID of the instance we want to 514 compute the LVsByNode for 515 @type lvmap: dict 516 @param lvmap: Optional dictionary to receive the 517 'node' : ['lv', ...] data. 518 @rtype: dict or None 519 @return: None if lvmap arg is given, otherwise, a dictionary of 520 the form { 'node_uuid' : ['volume1', 'volume2', ...], ... }; 521 volumeN is of the form "vg_name/lv_name", compatible with 522 GetVolumeList() 523 524 """ 525 def _MapLVsByNode(lvmap, devices, node_uuid): 526 """Recursive helper function.""" 527 if not node_uuid in lvmap: 528 lvmap[node_uuid] = [] 529 530 for dev in devices: 531 if dev.dev_type == constants.DT_PLAIN: 532 if not dev.forthcoming: 533 lvmap[node_uuid].append(dev.logical_id[0] + "/" + dev.logical_id[1]) 534 535 elif dev.dev_type in constants.DTS_DRBD: 536 if dev.children: 537 _MapLVsByNode(lvmap, dev.children, dev.logical_id[0]) 538 _MapLVsByNode(lvmap, dev.children, dev.logical_id[1]) 539 540 elif dev.children: 541 _MapLVsByNode(lvmap, dev.children, node_uuid)
542 543 instance = self._UnlockedGetInstanceInfo(inst_uuid) 544 if instance is None: 545 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 546 547 if lvmap is None: 548 lvmap = {} 549 ret = lvmap 550 else: 551 ret = None 552 553 _MapLVsByNode(lvmap, 554 self._UnlockedGetInstanceDisks(instance.uuid), 555 instance.primary_node) 556 return ret
557 558 @ConfigSync(shared=1)
559 - def GetInstanceLVsByNode(self, inst_uuid, lvmap=None):
560 """Provide a mapping of node to LVs a given instance owns. 561 562 This is a simple wrapper over L{_UnlockedGetInstanceLVsByNode} 563 564 """ 565 return self._UnlockedGetInstanceLVsByNode(inst_uuid, lvmap=lvmap)
566 567 @ConfigSync(shared=1)
568 - def GetGroupDiskParams(self, group):
569 """Get the disk params populated with inherit chain. 570 571 @type group: L{objects.NodeGroup} 572 @param group: The group we want to know the params for 573 @return: A dict with the filled in disk params 574 575 """ 576 return self._UnlockedGetGroupDiskParams(group)
577
578 - def _UnlockedGetGroupDiskParams(self, group):
579 """Get the disk params populated with inherit chain down to node-group. 580 581 @type group: L{objects.NodeGroup} 582 @param group: The group we want to know the params for 583 @return: A dict with the filled in disk params 584 585 """ 586 data = self._ConfigData().cluster.SimpleFillDP(group.diskparams) 587 assert isinstance(data, dict), "Not a dictionary: " + str(data) 588 return data
589 590 @ConfigSync(shared=1)
591 - def GetPotentialMasterCandidates(self):
592 """Gets the list of node names of potential master candidates. 593 594 @rtype: list of str 595 @return: list of node names of potential master candidates 596 597 """ 598 # FIXME: Note that currently potential master candidates are nodes 599 # but this definition will be extended once RAPI-unmodifiable 600 # parameters are introduced. 601 nodes = self._UnlockedGetAllNodesInfo() 602 return [node_info.name for node_info in nodes.values()]
603
604 - def GenerateMAC(self, net_uuid, _ec_id):
605 """Generate a MAC for an instance. 606 607 This should check the current instances for duplicates. 608 609 """ 610 return self._wconfd.GenerateMAC(self._GetWConfdContext(), net_uuid)
611
612 - def ReserveMAC(self, mac, _ec_id):
613 """Reserve a MAC for an instance. 614 615 This only checks instances managed by this cluster, it does not 616 check for potential collisions elsewhere. 617 618 """ 619 self._wconfd.ReserveMAC(self._GetWConfdContext(), mac)
620 621 @ConfigSync(shared=1)
622 - def CommitTemporaryIps(self, _ec_id):
623 """Tell WConfD to commit all temporary ids""" 624 self._wconfd.CommitTemporaryIps(self._GetWConfdContext())
625
626 - def ReleaseIp(self, net_uuid, address, _ec_id):
627 """Give a specific IP address back to an IP pool. 628 629 The IP address is returned to the IP pool and marked as reserved. 630 631 """ 632 if net_uuid: 633 if self._offline: 634 raise errors.ProgrammerError("Can't call ReleaseIp in offline mode") 635 self._wconfd.ReleaseIp(self._GetWConfdContext(), net_uuid, address)
636
637 - def GenerateIp(self, net_uuid, _ec_id):
638 """Find a free IPv4 address for an instance. 639 640 """ 641 if self._offline: 642 raise errors.ProgrammerError("Can't call GenerateIp in offline mode") 643 return self._wconfd.GenerateIp(self._GetWConfdContext(), net_uuid)
644
645 - def ReserveIp(self, net_uuid, address, _ec_id, check=True):
646 """Reserve a given IPv4 address for use by an instance. 647 648 """ 649 if self._offline: 650 raise errors.ProgrammerError("Can't call ReserveIp in offline mode") 651 return self._wconfd.ReserveIp(self._GetWConfdContext(), net_uuid, address, 652 check)
653
654 - def ReserveLV(self, lv_name, _ec_id):
655 """Reserve an VG/LV pair for an instance. 656 657 @type lv_name: string 658 @param lv_name: the logical volume name to reserve 659 660 """ 661 return self._wconfd.ReserveLV(self._GetWConfdContext(), lv_name)
662
663 - def GenerateDRBDSecret(self, _ec_id):
664 """Generate a DRBD secret. 665 666 This checks the current disks for duplicates. 667 668 """ 669 return self._wconfd.GenerateDRBDSecret(self._GetWConfdContext())
670 671 # FIXME: After _AllIDs is removed, move it to config_mock.py
672 - def _AllLVs(self):
673 """Compute the list of all LVs. 674 675 """ 676 lvnames = set() 677 for instance in self._ConfigData().instances.values(): 678 node_data = self._UnlockedGetInstanceLVsByNode(instance.uuid) 679 for lv_list in node_data.values(): 680 lvnames.update(lv_list) 681 return lvnames
682
683 - def _AllNICs(self):
684 """Compute the list of all NICs. 685 686 """ 687 nics = [] 688 for instance in self._ConfigData().instances.values(): 689 nics.extend(instance.nics) 690 return nics
691
692 - def _AllIDs(self, include_temporary):
693 """Compute the list of all UUIDs and names we have. 694 695 @type include_temporary: boolean 696 @param include_temporary: whether to include the _temporary_ids set 697 @rtype: set 698 @return: a set of IDs 699 700 """ 701 existing = set() 702 if include_temporary: 703 existing.update(self._temporary_ids.GetReserved()) 704 existing.update(self._AllLVs()) 705 existing.update(self._ConfigData().instances.keys()) 706 existing.update(self._ConfigData().nodes.keys()) 707 existing.update([i.uuid for i in self._AllUUIDObjects() if i.uuid]) 708 return existing
709
710 - def _GenerateUniqueID(self, ec_id):
711 """Generate an unique UUID. 712 713 This checks the current node, instances and disk names for 714 duplicates. 715 716 @rtype: string 717 @return: the unique id 718 719 """ 720 existing = self._AllIDs(include_temporary=False) 721 return self._temporary_ids.Generate(existing, utils.NewUUID, ec_id)
722 723 @ConfigSync(shared=1)
724 - def GenerateUniqueID(self, ec_id):
725 """Generate an unique ID. 726 727 This is just a wrapper over the unlocked version. 728 729 @type ec_id: string 730 @param ec_id: unique id for the job to reserve the id to 731 732 """ 733 return self._GenerateUniqueID(ec_id)
734
735 - def _AllMACs(self):
736 """Return all MACs present in the config. 737 738 @rtype: list 739 @return: the list of all MACs 740 741 """ 742 result = [] 743 for instance in self._ConfigData().instances.values(): 744 for nic in instance.nics: 745 result.append(nic.mac) 746 747 return result
748
749 - def _AllDRBDSecrets(self):
750 """Return all DRBD secrets present in the config. 751 752 @rtype: list 753 @return: the list of all DRBD secrets 754 755 """ 756 def helper(disk, result): 757 """Recursively gather secrets from this disk.""" 758 if disk.dev_type == constants.DT_DRBD8: 759 result.append(disk.logical_id[5]) 760 if disk.children: 761 for child in disk.children: 762 helper(child, result)
763 764 result = [] 765 for disk in self._ConfigData().disks.values(): 766 helper(disk, result) 767 768 return result 769 770 @staticmethod
771 - def _VerifyDisks(data, result):
772 """Per-disk verification checks 773 774 Extends L{result} with diagnostic information about the disks. 775 776 @type data: see L{_ConfigData} 777 @param data: configuration data 778 779 @type result: list of strings 780 @param result: list containing diagnostic messages 781 782 """ 783 for disk_uuid in data.disks: 784 disk = data.disks[disk_uuid] 785 result.extend(["disk %s error: %s" % (disk.uuid, msg) 786 for msg in disk.Verify()]) 787 if disk.uuid != disk_uuid: 788 result.append("disk '%s' is indexed by wrong UUID '%s'" % 789 (disk.name, disk_uuid))
790
791 - def _UnlockedVerifyConfig(self):
792 """Verify function. 793 794 @rtype: list 795 @return: a list of error messages; a non-empty list signifies 796 configuration errors 797 798 """ 799 # pylint: disable=R0914 800 result = [] 801 seen_macs = [] 802 ports = {} 803 data = self._ConfigData() 804 cluster = data.cluster 805 806 # First call WConfd to perform its checks, if we're not offline 807 if not self._offline: 808 try: 809 self._wconfd.VerifyConfig() 810 except errors.ConfigVerifyError, err: 811 try: 812 for msg in err.args[1]: 813 result.append(msg) 814 except IndexError: 815 pass 816 817 # check cluster parameters 818 VerifyType("cluster", "beparams", cluster.SimpleFillBE({}), 819 constants.BES_PARAMETER_TYPES, result.append) 820 VerifyType("cluster", "nicparams", cluster.SimpleFillNIC({}), 821 constants.NICS_PARAMETER_TYPES, result.append) 822 VerifyNic("cluster", cluster.SimpleFillNIC({}), result.append) 823 VerifyType("cluster", "ndparams", cluster.SimpleFillND({}), 824 constants.NDS_PARAMETER_TYPES, result.append) 825 VerifyIpolicy("cluster", cluster.ipolicy, True, result.append) 826 827 for disk_template in cluster.diskparams: 828 if disk_template not in constants.DTS_HAVE_ACCESS: 829 continue 830 831 access = cluster.diskparams[disk_template].get(constants.LDP_ACCESS, 832 constants.DISK_KERNELSPACE) 833 if access not in constants.DISK_VALID_ACCESS_MODES: 834 result.append( 835 "Invalid value of '%s:%s': '%s' (expected one of %s)" % ( 836 disk_template, constants.LDP_ACCESS, access, 837 utils.CommaJoin(constants.DISK_VALID_ACCESS_MODES) 838 ) 839 ) 840 841 self._VerifyDisks(data, result) 842 843 # per-instance checks 844 for instance_uuid in data.instances: 845 instance = data.instances[instance_uuid] 846 if instance.uuid != instance_uuid: 847 result.append("instance '%s' is indexed by wrong UUID '%s'" % 848 (instance.name, instance_uuid)) 849 if instance.primary_node not in data.nodes: 850 result.append("instance '%s' has invalid primary node '%s'" % 851 (instance.name, instance.primary_node)) 852 for snode in self._UnlockedGetInstanceSecondaryNodes(instance.uuid): 853 if snode not in data.nodes: 854 result.append("instance '%s' has invalid secondary node '%s'" % 855 (instance.name, snode)) 856 for idx, nic in enumerate(instance.nics): 857 if nic.mac in seen_macs: 858 result.append("instance '%s' has NIC %d mac %s duplicate" % 859 (instance.name, idx, nic.mac)) 860 else: 861 seen_macs.append(nic.mac) 862 if nic.nicparams: 863 filled = cluster.SimpleFillNIC(nic.nicparams) 864 owner = "instance %s nic %d" % (instance.name, idx) 865 VerifyType(owner, "nicparams", 866 filled, constants.NICS_PARAMETER_TYPES, result.append) 867 VerifyNic(owner, filled, result.append) 868 869 # parameter checks 870 if instance.beparams: 871 VerifyType("instance %s" % instance.name, "beparams", 872 cluster.FillBE(instance), constants.BES_PARAMETER_TYPES, 873 result.append) 874 875 # check that disks exists 876 for disk_uuid in instance.disks: 877 if disk_uuid not in data.disks: 878 result.append("Instance '%s' has invalid disk '%s'" % 879 (instance.name, disk_uuid)) 880 881 instance_disks = self._UnlockedGetInstanceDisks(instance.uuid) 882 # gather the drbd ports for duplicate checks 883 for (idx, dsk) in enumerate(instance_disks): 884 if dsk.dev_type in constants.DTS_DRBD: 885 tcp_port = dsk.logical_id[2] 886 if tcp_port not in ports: 887 ports[tcp_port] = [] 888 ports[tcp_port].append((instance.name, "drbd disk %s" % idx)) 889 # gather network port reservation 890 net_port = getattr(instance, "network_port", None) 891 if net_port is not None: 892 if net_port not in ports: 893 ports[net_port] = [] 894 ports[net_port].append((instance.name, "network port")) 895 896 wrong_names = _CheckInstanceDiskIvNames(instance_disks) 897 if wrong_names: 898 tmp = "; ".join(("name of disk %s should be '%s', but is '%s'" % 899 (idx, exp_name, actual_name)) 900 for (idx, exp_name, actual_name) in wrong_names) 901 902 result.append("Instance '%s' has wrongly named disks: %s" % 903 (instance.name, tmp)) 904 905 # cluster-wide pool of free ports 906 for free_port in cluster.tcpudp_port_pool: 907 if free_port not in ports: 908 ports[free_port] = [] 909 ports[free_port].append(("cluster", "port marked as free")) 910 911 # compute tcp/udp duplicate ports 912 keys = ports.keys() 913 keys.sort() 914 for pnum in keys: 915 pdata = ports[pnum] 916 if len(pdata) > 1: 917 txt = utils.CommaJoin(["%s/%s" % val for val in pdata]) 918 result.append("tcp/udp port %s has duplicates: %s" % (pnum, txt)) 919 920 # highest used tcp port check 921 if keys: 922 if keys[-1] > cluster.highest_used_port: 923 result.append("Highest used port mismatch, saved %s, computed %s" % 924 (cluster.highest_used_port, keys[-1])) 925 926 if not data.nodes[cluster.master_node].master_candidate: 927 result.append("Master node is not a master candidate") 928 929 # master candidate checks 930 mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats() 931 if mc_now < mc_max: 932 result.append("Not enough master candidates: actual %d, target %d" % 933 (mc_now, mc_max)) 934 935 # node checks 936 for node_uuid, node in data.nodes.items(): 937 if node.uuid != node_uuid: 938 result.append("Node '%s' is indexed by wrong UUID '%s'" % 939 (node.name, node_uuid)) 940 if [node.master_candidate, node.drained, node.offline].count(True) > 1: 941 result.append("Node %s state is invalid: master_candidate=%s," 942 " drain=%s, offline=%s" % 943 (node.name, node.master_candidate, node.drained, 944 node.offline)) 945 if node.group not in data.nodegroups: 946 result.append("Node '%s' has invalid group '%s'" % 947 (node.name, node.group)) 948 else: 949 VerifyType("node %s" % node.name, "ndparams", 950 cluster.FillND(node, data.nodegroups[node.group]), 951 constants.NDS_PARAMETER_TYPES, result.append) 952 used_globals = constants.NDC_GLOBALS.intersection(node.ndparams) 953 if used_globals: 954 result.append("Node '%s' has some global parameters set: %s" % 955 (node.name, utils.CommaJoin(used_globals))) 956 957 # nodegroups checks 958 nodegroups_names = set() 959 for nodegroup_uuid in data.nodegroups: 960 nodegroup = data.nodegroups[nodegroup_uuid] 961 if nodegroup.uuid != nodegroup_uuid: 962 result.append("node group '%s' (uuid: '%s') indexed by wrong uuid '%s'" 963 % (nodegroup.name, nodegroup.uuid, nodegroup_uuid)) 964 if utils.UUID_RE.match(nodegroup.name.lower()): 965 result.append("node group '%s' (uuid: '%s') has uuid-like name" % 966 (nodegroup.name, nodegroup.uuid)) 967 if nodegroup.name in nodegroups_names: 968 result.append("duplicate node group name '%s'" % nodegroup.name) 969 else: 970 nodegroups_names.add(nodegroup.name) 971 group_name = "group %s" % nodegroup.name 972 VerifyIpolicy(group_name, cluster.SimpleFillIPolicy(nodegroup.ipolicy), 973 False, result.append) 974 if nodegroup.ndparams: 975 VerifyType(group_name, "ndparams", 976 cluster.SimpleFillND(nodegroup.ndparams), 977 constants.NDS_PARAMETER_TYPES, result.append) 978 979 # drbd minors check 980 # FIXME: The check for DRBD map needs to be implemented in WConfd 981 982 # IP checks 983 default_nicparams = cluster.nicparams[constants.PP_DEFAULT] 984 ips = {} 985 986 def _AddIpAddress(ip, name): 987 ips.setdefault(ip, []).append(name)
988 989 _AddIpAddress(cluster.master_ip, "cluster_ip") 990 991 for node in data.nodes.values(): 992 _AddIpAddress(node.primary_ip, "node:%s/primary" % node.name) 993 if node.secondary_ip != node.primary_ip: 994 _AddIpAddress(node.secondary_ip, "node:%s/secondary" % node.name) 995 996 for instance in data.instances.values(): 997 for idx, nic in enumerate(instance.nics): 998 if nic.ip is None: 999 continue 1000 1001 nicparams = objects.FillDict(default_nicparams, nic.nicparams) 1002 nic_mode = nicparams[constants.NIC_MODE] 1003 nic_link = nicparams[constants.NIC_LINK] 1004 1005 if nic_mode == constants.NIC_MODE_BRIDGED: 1006 link = "bridge:%s" % nic_link 1007 elif nic_mode == constants.NIC_MODE_ROUTED: 1008 link = "route:%s" % nic_link 1009 elif nic_mode == constants.NIC_MODE_OVS: 1010 link = "ovs:%s" % nic_link 1011 else: 1012 raise errors.ProgrammerError("NIC mode '%s' not handled" % nic_mode) 1013 1014 _AddIpAddress("%s/%s/%s" % (link, nic.ip, nic.network), 1015 "instance:%s/nic:%d" % (instance.name, idx)) 1016 1017 for ip, owners in ips.items(): 1018 if len(owners) > 1: 1019 result.append("IP address %s is used by multiple owners: %s" % 1020 (ip, utils.CommaJoin(owners))) 1021 1022 return result 1023 1024 @ConfigSync(shared=1)
1025 - def VerifyConfigAndLog(self, feedback_fn=None):
1026 """A simple wrapper around L{_UnlockedVerifyConfigAndLog}""" 1027 return self._UnlockedVerifyConfigAndLog(feedback_fn=feedback_fn)
1028
1029 - def _UnlockedVerifyConfigAndLog(self, feedback_fn=None):
1030 """Verify the configuration and log any errors. 1031 1032 The errors get logged as critical errors and also to the feedback function, 1033 if given. 1034 1035 @param feedback_fn: Callable feedback function 1036 @rtype: list 1037 @return: a list of error messages; a non-empty list signifies 1038 configuration errors 1039 1040 """ 1041 assert feedback_fn is None or callable(feedback_fn) 1042 1043 # Warn on config errors, but don't abort the save - the 1044 # configuration has already been modified, and we can't revert; 1045 # the best we can do is to warn the user and save as is, leaving 1046 # recovery to the user 1047 config_errors = self._UnlockedVerifyConfig() 1048 if config_errors: 1049 errmsg = ("Configuration data is not consistent: %s" % 1050 (utils.CommaJoin(config_errors))) 1051 logging.critical(errmsg) 1052 if feedback_fn: 1053 feedback_fn(errmsg) 1054 return config_errors
1055 1056 @ConfigSync(shared=1)
1057 - def VerifyConfig(self):
1058 """Verify function. 1059 1060 This is just a wrapper over L{_UnlockedVerifyConfig}. 1061 1062 @rtype: list 1063 @return: a list of error messages; a non-empty list signifies 1064 configuration errors 1065 1066 """ 1067 return self._UnlockedVerifyConfig()
1068
1069 - def AddTcpUdpPort(self, port):
1070 """Adds a new port to the available port pool.""" 1071 utils.SimpleRetry(True, self._wconfd.AddTcpUdpPort, 0.1, 30, args=[port]) 1072 self.OutDate()
1073 1074 @ConfigSync(shared=1)
1075 - def GetPortList(self):
1076 """Returns a copy of the current port list. 1077 1078 """ 1079 return self._ConfigData().cluster.tcpudp_port_pool.copy()
1080
1081 - def AllocatePort(self):
1082 """Allocate a port.""" 1083 def WithRetry(): 1084 port = self._wconfd.AllocatePort() 1085 self.OutDate() 1086 1087 if port is None: 1088 raise utils.RetryAgain() 1089 else: 1090 return port
1091 return utils.Retry(WithRetry, 0.1, 30) 1092 1093 @ConfigSync(shared=1)
1094 - def ComputeDRBDMap(self):
1095 """Compute the used DRBD minor/nodes. 1096 1097 This is just a wrapper over a call to WConfd. 1098 1099 @return: dictionary of node_uuid: dict of minor: instance_uuid; 1100 the returned dict will have all the nodes in it (even if with 1101 an empty list). 1102 1103 """ 1104 if self._offline: 1105 raise errors.ProgrammerError("Can't call ComputeDRBDMap in offline mode") 1106 else: 1107 return dict(map(lambda (k, v): (k, dict(v)), 1108 self._wconfd.ComputeDRBDMap()))
1109
1110 - def AllocateDRBDMinor(self, node_uuids, disk_uuid):
1111 """Allocate a drbd minor. 1112 1113 This is just a wrapper over a call to WConfd. 1114 1115 The free minor will be automatically computed from the existing 1116 devices. A node can not be given multiple times. 1117 The result is the list of minors, in the same 1118 order as the passed nodes. 1119 1120 @type node_uuids: list of strings 1121 @param node_uuids: the nodes in which we allocate minors 1122 @type disk_uuid: string 1123 @param disk_uuid: the disk for which we allocate minors 1124 @rtype: list of ints 1125 @return: A list of minors in the same order as the passed nodes 1126 1127 """ 1128 assert isinstance(disk_uuid, basestring), \ 1129 "Invalid argument '%s' passed to AllocateDRBDMinor" % disk_uuid 1130 1131 if self._offline: 1132 raise errors.ProgrammerError("Can't call AllocateDRBDMinor" 1133 " in offline mode") 1134 1135 result = self._wconfd.AllocateDRBDMinor(disk_uuid, node_uuids) 1136 logging.debug("Request to allocate drbd minors, input: %s, returning %s", 1137 node_uuids, result) 1138 return result
1139
1140 - def ReleaseDRBDMinors(self, disk_uuid):
1141 """Release temporary drbd minors allocated for a given disk. 1142 1143 This is just a wrapper over a call to WConfd. 1144 1145 @type disk_uuid: string 1146 @param disk_uuid: the disk for which temporary minors should be released 1147 1148 """ 1149 assert isinstance(disk_uuid, basestring), \ 1150 "Invalid argument passed to ReleaseDRBDMinors" 1151 # in offline mode we allow the calls to release DRBD minors, 1152 # because then nothing can be allocated anyway; 1153 # this is useful for testing 1154 if not self._offline: 1155 self._wconfd.ReleaseDRBDMinors(disk_uuid)
1156 1157 @ConfigSync(shared=1)
1158 - def GetInstanceDiskTemplate(self, inst_uuid):
1159 """Return the disk template of an instance. 1160 1161 This corresponds to the currently attached disks. If no disks are attached, 1162 it is L{constants.DT_DISKLESS}, if homogeneous disk types are attached, 1163 that type is returned, if that isn't the case, L{constants.DT_MIXED} is 1164 returned. 1165 1166 @type inst_uuid: str 1167 @param inst_uuid: The uuid of the instance. 1168 """ 1169 return utils.GetDiskTemplate(self._UnlockedGetInstanceDisks(inst_uuid))
1170 1171 @ConfigSync(shared=1)
1172 - def GetConfigVersion(self):
1173 """Get the configuration version. 1174 1175 @return: Config version 1176 1177 """ 1178 return self._ConfigData().version
1179 1180 @ConfigSync(shared=1)
1181 - def GetClusterName(self):
1182 """Get cluster name. 1183 1184 @return: Cluster name 1185 1186 """ 1187 return self._ConfigData().cluster.cluster_name
1188 1189 @ConfigSync(shared=1)
1190 - def GetMasterNode(self):
1191 """Get the UUID of the master node for this cluster. 1192 1193 @return: Master node UUID 1194 1195 """ 1196 return self._ConfigData().cluster.master_node
1197 1198 @ConfigSync(shared=1)
1199 - def GetMasterNodeName(self):
1200 """Get the hostname of the master node for this cluster. 1201 1202 @return: Master node hostname 1203 1204 """ 1205 return self._UnlockedGetNodeName(self._ConfigData().cluster.master_node)
1206 1207 @ConfigSync(shared=1)
1208 - def GetMasterNodeInfo(self):
1209 """Get the master node information for this cluster. 1210 1211 @rtype: objects.Node 1212 @return: Master node L{objects.Node} object 1213 1214 """ 1215 return self._UnlockedGetNodeInfo(self._ConfigData().cluster.master_node)
1216 1217 @ConfigSync(shared=1)
1218 - def GetMasterIP(self):
1219 """Get the IP of the master node for this cluster. 1220 1221 @return: Master IP 1222 1223 """ 1224 return self._ConfigData().cluster.master_ip
1225 1226 @ConfigSync(shared=1)
1227 - def GetMasterNetdev(self):
1228 """Get the master network device for this cluster. 1229 1230 """ 1231 return self._ConfigData().cluster.master_netdev
1232 1233 @ConfigSync(shared=1)
1234 - def GetMasterNetmask(self):
1235 """Get the netmask of the master node for this cluster. 1236 1237 """ 1238 return self._ConfigData().cluster.master_netmask
1239 1240 @ConfigSync(shared=1)
1241 - def GetUseExternalMipScript(self):
1242 """Get flag representing whether to use the external master IP setup script. 1243 1244 """ 1245 return self._ConfigData().cluster.use_external_mip_script
1246 1247 @ConfigSync(shared=1)
1248 - def GetFileStorageDir(self):
1249 """Get the file storage dir for this cluster. 1250 1251 """ 1252 return self._ConfigData().cluster.file_storage_dir
1253 1254 @ConfigSync(shared=1)
1255 - def GetSharedFileStorageDir(self):
1256 """Get the shared file storage dir for this cluster. 1257 1258 """ 1259 return self._ConfigData().cluster.shared_file_storage_dir
1260 1261 @ConfigSync(shared=1)
1262 - def GetGlusterStorageDir(self):
1263 """Get the Gluster storage dir for this cluster. 1264 1265 """ 1266 return self._ConfigData().cluster.gluster_storage_dir
1267 1268 @ConfigSync(shared=1)
1269 - def GetHypervisorType(self):
1270 """Get the hypervisor type for this cluster. 1271 1272 """ 1273 return self._ConfigData().cluster.enabled_hypervisors[0]
1274 1275 @ConfigSync(shared=1)
1276 - def GetRsaHostKey(self):
1277 """Return the rsa hostkey from the config. 1278 1279 @rtype: string 1280 @return: the rsa hostkey 1281 1282 """ 1283 return self._ConfigData().cluster.rsahostkeypub
1284 1285 @ConfigSync(shared=1)
1286 - def GetDsaHostKey(self):
1287 """Return the dsa hostkey from the config. 1288 1289 @rtype: string 1290 @return: the dsa hostkey 1291 1292 """ 1293 return self._ConfigData().cluster.dsahostkeypub
1294 1295 @ConfigSync(shared=1)
1296 - def GetDefaultIAllocator(self):
1297 """Get the default instance allocator for this cluster. 1298 1299 """ 1300 return self._ConfigData().cluster.default_iallocator
1301 1302 @ConfigSync(shared=1)
1303 - def GetDefaultIAllocatorParameters(self):
1304 """Get the default instance allocator parameters for this cluster. 1305 1306 @rtype: dict 1307 @return: dict of iallocator parameters 1308 1309 """ 1310 return self._ConfigData().cluster.default_iallocator_params
1311 1312 @ConfigSync(shared=1)
1313 - def GetPrimaryIPFamily(self):
1314 """Get cluster primary ip family. 1315 1316 @return: primary ip family 1317 1318 """ 1319 return self._ConfigData().cluster.primary_ip_family
1320 1321 @ConfigSync(shared=1)
1322 - def GetMasterNetworkParameters(self):
1323 """Get network parameters of the master node. 1324 1325 @rtype: L{object.MasterNetworkParameters} 1326 @return: network parameters of the master node 1327 1328 """ 1329 cluster = self._ConfigData().cluster 1330 result = objects.MasterNetworkParameters( 1331 uuid=cluster.master_node, ip=cluster.master_ip, 1332 netmask=cluster.master_netmask, netdev=cluster.master_netdev, 1333 ip_family=cluster.primary_ip_family) 1334 1335 return result
1336 1337 @ConfigSync(shared=1)
1338 - def GetInstallImage(self):
1339 """Get the install image location 1340 1341 @rtype: string 1342 @return: location of the install image 1343 1344 """ 1345 return self._ConfigData().cluster.install_image
1346 1347 @ConfigSync()
1348 - def SetInstallImage(self, install_image):
1349 """Set the install image location 1350 1351 @type install_image: string 1352 @param install_image: location of the install image 1353 1354 """ 1355 self._ConfigData().cluster.install_image = install_image
1356 1357 @ConfigSync(shared=1)
1358 - def GetInstanceCommunicationNetwork(self):
1359 """Get cluster instance communication network 1360 1361 @rtype: string 1362 @return: instance communication network, which is the name of the 1363 network used for instance communication 1364 1365 """ 1366 return self._ConfigData().cluster.instance_communication_network
1367 1368 @ConfigSync()
1369 - def SetInstanceCommunicationNetwork(self, network_name):
1370 """Set cluster instance communication network 1371 1372 @type network_name: string 1373 @param network_name: instance communication network, which is the name of 1374 the network used for instance communication 1375 1376 """ 1377 self._ConfigData().cluster.instance_communication_network = network_name
1378 1379 @ConfigSync(shared=1)
1380 - def GetZeroingImage(self):
1381 """Get the zeroing image location 1382 1383 @rtype: string 1384 @return: the location of the zeroing image 1385 1386 """ 1387 return self._config_data.cluster.zeroing_image
1388 1389 @ConfigSync(shared=1)
1390 - def GetCompressionTools(self):
1391 """Get cluster compression tools 1392 1393 @rtype: list of string 1394 @return: a list of tools that are cleared for use in this cluster for the 1395 purpose of compressing data 1396 1397 """ 1398 return self._ConfigData().cluster.compression_tools
1399 1400 @ConfigSync()
1401 - def SetCompressionTools(self, tools):
1402 """Set cluster compression tools 1403 1404 @type tools: list of string 1405 @param tools: a list of tools that are cleared for use in this cluster for 1406 the purpose of compressing data 1407 1408 """ 1409 self._ConfigData().cluster.compression_tools = tools
1410 1411 @ConfigSync()
1412 - def AddNodeGroup(self, group, ec_id, check_uuid=True):
1413 """Add a node group to the configuration. 1414 1415 This method calls group.UpgradeConfig() to fill any missing attributes 1416 according to their default values. 1417 1418 @type group: L{objects.NodeGroup} 1419 @param group: the NodeGroup object to add 1420 @type ec_id: string 1421 @param ec_id: unique id for the job to use when creating a missing UUID 1422 @type check_uuid: bool 1423 @param check_uuid: add an UUID to the group if it doesn't have one or, if 1424 it does, ensure that it does not exist in the 1425 configuration already 1426 1427 """ 1428 self._UnlockedAddNodeGroup(group, ec_id, check_uuid)
1429
1430 - def _UnlockedAddNodeGroup(self, group, ec_id, check_uuid):
1431 """Add a node group to the configuration. 1432 1433 """ 1434 logging.info("Adding node group %s to configuration", group.name) 1435 1436 # Some code might need to add a node group with a pre-populated UUID 1437 # generated with ConfigWriter.GenerateUniqueID(). We allow them to bypass 1438 # the "does this UUID" exist already check. 1439 if check_uuid: 1440 self._EnsureUUID(group, ec_id) 1441 1442 try: 1443 existing_uuid = self._UnlockedLookupNodeGroup(group.name) 1444 except errors.OpPrereqError: 1445 pass 1446 else: 1447 raise errors.OpPrereqError("Desired group name '%s' already exists as a" 1448 " node group (UUID: %s)" % 1449 (group.name, existing_uuid), 1450 errors.ECODE_EXISTS) 1451 1452 group.serial_no = 1 1453 group.ctime = group.mtime = time.time() 1454 group.UpgradeConfig() 1455 1456 self._ConfigData().nodegroups[group.uuid] = group 1457 self._ConfigData().cluster.serial_no += 1
1458 1459 @ConfigSync()
1460 - def RemoveNodeGroup(self, group_uuid):
1461 """Remove a node group from the configuration. 1462 1463 @type group_uuid: string 1464 @param group_uuid: the UUID of the node group to remove 1465 1466 """ 1467 logging.info("Removing node group %s from configuration", group_uuid) 1468 1469 if group_uuid not in self._ConfigData().nodegroups: 1470 raise errors.ConfigurationError("Unknown node group '%s'" % group_uuid) 1471 1472 assert len(self._ConfigData().nodegroups) != 1, \ 1473 "Group '%s' is the only group, cannot be removed" % group_uuid 1474 1475 del self._ConfigData().nodegroups[group_uuid] 1476 self._ConfigData().cluster.serial_no += 1
1477
1478 - def _UnlockedLookupNodeGroup(self, target):
1479 """Lookup a node group's UUID. 1480 1481 @type target: string or None 1482 @param target: group name or UUID or None to look for the default 1483 @rtype: string 1484 @return: nodegroup UUID 1485 @raises errors.OpPrereqError: when the target group cannot be found 1486 1487 """ 1488 if target is None: 1489 if len(self._ConfigData().nodegroups) != 1: 1490 raise errors.OpPrereqError("More than one node group exists. Target" 1491 " group must be specified explicitly.") 1492 else: 1493 return self._ConfigData().nodegroups.keys()[0] 1494 if target in self._ConfigData().nodegroups: 1495 return target 1496 for nodegroup in self._ConfigData().nodegroups.values(): 1497 if nodegroup.name == target: 1498 return nodegroup.uuid 1499 raise errors.OpPrereqError("Node group '%s' not found" % target, 1500 errors.ECODE_NOENT)
1501 1502 @ConfigSync(shared=1)
1503 - def LookupNodeGroup(self, target):
1504 """Lookup a node group's UUID. 1505 1506 This function is just a wrapper over L{_UnlockedLookupNodeGroup}. 1507 1508 @type target: string or None 1509 @param target: group name or UUID or None to look for the default 1510 @rtype: string 1511 @return: nodegroup UUID 1512 1513 """ 1514 return self._UnlockedLookupNodeGroup(target)
1515
1516 - def _UnlockedGetNodeGroup(self, uuid):
1517 """Lookup a node group. 1518 1519 @type uuid: string 1520 @param uuid: group UUID 1521 @rtype: L{objects.NodeGroup} or None 1522 @return: nodegroup object, or None if not found 1523 1524 """ 1525 if uuid not in self._ConfigData().nodegroups: 1526 return None 1527 1528 return self._ConfigData().nodegroups[uuid]
1529 1530 @ConfigSync(shared=1)
1531 - def GetNodeGroup(self, uuid):
1532 """Lookup a node group. 1533 1534 @type uuid: string 1535 @param uuid: group UUID 1536 @rtype: L{objects.NodeGroup} or None 1537 @return: nodegroup object, or None if not found 1538 1539 """ 1540 return self._UnlockedGetNodeGroup(uuid)
1541
1542 - def _UnlockedGetAllNodeGroupsInfo(self):
1543 """Get the configuration of all node groups. 1544 1545 """ 1546 return dict(self._ConfigData().nodegroups)
1547 1548 @ConfigSync(shared=1)
1549 - def GetAllNodeGroupsInfo(self):
1550 """Get the configuration of all node groups. 1551 1552 """ 1553 return self._UnlockedGetAllNodeGroupsInfo()
1554 1555 @ConfigSync(shared=1)
1556 - def GetAllNodeGroupsInfoDict(self):
1557 """Get the configuration of all node groups expressed as a dictionary of 1558 dictionaries. 1559 1560 """ 1561 return dict(map(lambda (uuid, ng): (uuid, ng.ToDict()), 1562 self._UnlockedGetAllNodeGroupsInfo().items()))
1563 1564 @ConfigSync(shared=1)
1565 - def GetNodeGroupList(self):
1566 """Get a list of node groups. 1567 1568 """ 1569 return self._ConfigData().nodegroups.keys()
1570 1571 @ConfigSync(shared=1)
1572 - def GetNodeGroupMembersByNodes(self, nodes):
1573 """Get nodes which are member in the same nodegroups as the given nodes. 1574 1575 """ 1576 ngfn = lambda node_uuid: self._UnlockedGetNodeInfo(node_uuid).group 1577 return frozenset(member_uuid 1578 for node_uuid in nodes 1579 for member_uuid in 1580 self._UnlockedGetNodeGroup(ngfn(node_uuid)).members)
1581 1582 @ConfigSync(shared=1)
1583 - def GetMultiNodeGroupInfo(self, group_uuids):
1584 """Get the configuration of multiple node groups. 1585 1586 @param group_uuids: List of node group UUIDs 1587 @rtype: list 1588 @return: List of tuples of (group_uuid, group_info) 1589 1590 """ 1591 return [(uuid, self._UnlockedGetNodeGroup(uuid)) for uuid in group_uuids]
1592
1593 - def AddInstance(self, instance, _ec_id, replace=False):
1594 """Add an instance to the config. 1595 1596 This should be used after creating a new instance. 1597 1598 @type instance: L{objects.Instance} 1599 @param instance: the instance object 1600 @type replace: bool 1601 @param replace: if true, expect the instance to be present and 1602 replace rather than add. 1603 1604 """ 1605 if not isinstance(instance, objects.Instance): 1606 raise errors.ProgrammerError("Invalid type passed to AddInstance") 1607 1608 instance.serial_no = 1 1609 1610 utils.SimpleRetry(True, self._wconfd.AddInstance, 0.1, 30, 1611 args=[instance.ToDict(), 1612 self._GetWConfdContext(), 1613 replace]) 1614 self.OutDate()
1615
1616 - def _EnsureUUID(self, item, ec_id):
1617 """Ensures a given object has a valid UUID. 1618 1619 @param item: the instance or node to be checked 1620 @param ec_id: the execution context id for the uuid reservation 1621 1622 """ 1623 if not item.uuid: 1624 item.uuid = self._GenerateUniqueID(ec_id) 1625 else: 1626 self._CheckUniqueUUID(item, include_temporary=True)
1627
1628 - def _CheckUniqueUUID(self, item, include_temporary):
1629 """Checks that the UUID of the given object is unique. 1630 1631 @param item: the instance or node to be checked 1632 @param include_temporary: whether temporarily generated UUID's should be 1633 included in the check. If the UUID of the item to be checked is 1634 a temporarily generated one, this has to be C{False}. 1635 1636 """ 1637 if not item.uuid: 1638 raise errors.ConfigurationError("'%s' must have an UUID" % (item.name,)) 1639 if item.uuid in self._AllIDs(include_temporary=include_temporary): 1640 raise errors.ConfigurationError("Cannot add '%s': UUID %s already" 1641 " in use" % (item.name, item.uuid))
1642
1643 - def _CheckUUIDpresent(self, item):
1644 """Checks that an object with the given UUID exists. 1645 1646 @param item: the instance or other UUID possessing object to verify that 1647 its UUID is present 1648 1649 """ 1650 if not item.uuid: 1651 raise errors.ConfigurationError("'%s' must have an UUID" % (item.name,)) 1652 if item.uuid not in self._AllIDs(include_temporary=False): 1653 raise errors.ConfigurationError("Cannot replace '%s': UUID %s not present" 1654 % (item.name, item.uuid))
1655
1656 - def _SetInstanceStatus(self, inst_uuid, status, disks_active, 1657 admin_state_source):
1658 """Set the instance's status to a given value. 1659 1660 @rtype: L{objects.Instance} 1661 @return: the updated instance object 1662 1663 """ 1664 def WithRetry(): 1665 result = self._wconfd.SetInstanceStatus(inst_uuid, status, 1666 disks_active, admin_state_source) 1667 self.OutDate() 1668 1669 if result is None: 1670 raise utils.RetryAgain() 1671 else: 1672 return result
1673 return objects.Instance.FromDict(utils.Retry(WithRetry, 0.1, 30)) 1674
1675 - def MarkInstanceUp(self, inst_uuid):
1676 """Mark the instance status to up in the config. 1677 1678 This also sets the instance disks active flag. 1679 1680 @rtype: L{objects.Instance} 1681 @return: the updated instance object 1682 1683 """ 1684 return self._SetInstanceStatus(inst_uuid, constants.ADMINST_UP, True, 1685 constants.ADMIN_SOURCE)
1686
1687 - def MarkInstanceOffline(self, inst_uuid):
1688 """Mark the instance status to down in the config. 1689 1690 This also clears the instance disks active flag. 1691 1692 @rtype: L{objects.Instance} 1693 @return: the updated instance object 1694 1695 """ 1696 return self._SetInstanceStatus(inst_uuid, constants.ADMINST_OFFLINE, False, 1697 constants.ADMIN_SOURCE)
1698
1699 - def RemoveInstance(self, inst_uuid):
1700 """Remove the instance from the configuration. 1701 1702 """ 1703 utils.SimpleRetry(True, self._wconfd.RemoveInstance, 0.1, 30, 1704 args=[inst_uuid]) 1705 self.OutDate()
1706 1707 @ConfigSync()
1708 - def RenameInstance(self, inst_uuid, new_name):
1709 """Rename an instance. 1710 1711 This needs to be done in ConfigWriter and not by RemoveInstance 1712 combined with AddInstance as only we can guarantee an atomic 1713 rename. 1714 1715 """ 1716 if inst_uuid not in self._ConfigData().instances: 1717 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 1718 1719 inst = self._ConfigData().instances[inst_uuid] 1720 inst.name = new_name 1721 1722 instance_disks = self._UnlockedGetInstanceDisks(inst_uuid) 1723 for (_, disk) in enumerate(instance_disks): 1724 if disk.dev_type in [constants.DT_FILE, constants.DT_SHARED_FILE]: 1725 # rename the file paths in logical and physical id 1726 file_storage_dir = os.path.dirname(os.path.dirname(disk.logical_id[1])) 1727 disk.logical_id = (disk.logical_id[0], 1728 utils.PathJoin(file_storage_dir, inst.name, 1729 os.path.basename(disk.logical_id[1]))) 1730 1731 # Force update of ssconf files 1732 self._ConfigData().cluster.serial_no += 1
1733
1734 - def MarkInstanceDown(self, inst_uuid):
1735 """Mark the status of an instance to down in the configuration. 1736 1737 This does not touch the instance disks active flag, as shut down instances 1738 can still have active disks. 1739 1740 @rtype: L{objects.Instance} 1741 @return: the updated instance object 1742 1743 """ 1744 return self._SetInstanceStatus(inst_uuid, constants.ADMINST_DOWN, None, 1745 constants.ADMIN_SOURCE)
1746
1747 - def MarkInstanceUserDown(self, inst_uuid):
1748 """Mark the status of an instance to user down in the configuration. 1749 1750 This does not touch the instance disks active flag, as user shut 1751 down instances can still have active disks. 1752 1753 """ 1754 1755 self._SetInstanceStatus(inst_uuid, constants.ADMINST_DOWN, None, 1756 constants.USER_SOURCE)
1757
1758 - def MarkInstanceDisksActive(self, inst_uuid):
1759 """Mark the status of instance disks active. 1760 1761 @rtype: L{objects.Instance} 1762 @return: the updated instance object 1763 1764 """ 1765 return self._SetInstanceStatus(inst_uuid, None, True, None)
1766
1767 - def MarkInstanceDisksInactive(self, inst_uuid):
1768 """Mark the status of instance disks inactive. 1769 1770 @rtype: L{objects.Instance} 1771 @return: the updated instance object 1772 1773 """ 1774 return self._SetInstanceStatus(inst_uuid, None, False, None)
1775
1776 - def _UnlockedGetInstanceList(self):
1777 """Get the list of instances. 1778 1779 This function is for internal use, when the config lock is already held. 1780 1781 """ 1782 return self._ConfigData().instances.keys()
1783 1784 @ConfigSync(shared=1)
1785 - def GetInstanceList(self):
1786 """Get the list of instances. 1787 1788 @return: array of instances, ex. ['instance2-uuid', 'instance1-uuid'] 1789 1790 """ 1791 return self._UnlockedGetInstanceList()
1792
1793 - def ExpandInstanceName(self, short_name):
1794 """Attempt to expand an incomplete instance name. 1795 1796 """ 1797 # Locking is done in L{ConfigWriter.GetAllInstancesInfo} 1798 all_insts = self.GetAllInstancesInfo().values() 1799 expanded_name = _MatchNameComponentIgnoreCase( 1800 short_name, [inst.name for inst in all_insts]) 1801 1802 if expanded_name is not None: 1803 # there has to be exactly one instance with that name 1804 inst = (filter(lambda n: n.name == expanded_name, all_insts)[0]) 1805 return (inst.uuid, inst.name) 1806 else: 1807 return (None, None)
1808
1809 - def _UnlockedGetInstanceInfo(self, inst_uuid):
1810 """Returns information about an instance. 1811 1812 This function is for internal use, when the config lock is already held. 1813 1814 """ 1815 if inst_uuid not in self._ConfigData().instances: 1816 return None 1817 1818 return self._ConfigData().instances[inst_uuid]
1819 1820 @ConfigSync(shared=1)
1821 - def GetInstanceInfo(self, inst_uuid):
1822 """Returns information about an instance. 1823 1824 It takes the information from the configuration file. Other information of 1825 an instance are taken from the live systems. 1826 1827 @param inst_uuid: UUID of the instance 1828 1829 @rtype: L{objects.Instance} 1830 @return: the instance object 1831 1832 """ 1833 return self._UnlockedGetInstanceInfo(inst_uuid)
1834 1835 @ConfigSync(shared=1)
1836 - def GetInstanceNodeGroups(self, inst_uuid, primary_only=False):
1837 """Returns set of node group UUIDs for instance's nodes. 1838 1839 @rtype: frozenset 1840 1841 """ 1842 instance = self._UnlockedGetInstanceInfo(inst_uuid) 1843 if not instance: 1844 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 1845 1846 if primary_only: 1847 nodes = [instance.primary_node] 1848 else: 1849 nodes = self._UnlockedGetInstanceNodes(instance.uuid) 1850 1851 return frozenset(self._UnlockedGetNodeInfo(node_uuid).group 1852 for node_uuid in nodes)
1853 1854 @ConfigSync(shared=1)
1855 - def GetInstanceNetworks(self, inst_uuid):
1856 """Returns set of network UUIDs for instance's nics. 1857 1858 @rtype: frozenset 1859 1860 """ 1861 instance = self._UnlockedGetInstanceInfo(inst_uuid) 1862 if not instance: 1863 raise errors.ConfigurationError("Unknown instance '%s'" % inst_uuid) 1864 1865 networks = set() 1866 for nic in instance.nics: 1867 if nic.network: 1868 networks.add(nic.network) 1869 1870 return frozenset(networks)
1871 1872 @ConfigSync(shared=1)
1873 - def GetMultiInstanceInfo(self, inst_uuids):
1874 """Get the configuration of multiple instances. 1875 1876 @param inst_uuids: list of instance UUIDs 1877 @rtype: list 1878 @return: list of tuples (instance UUID, instance_info), where 1879 instance_info is what would GetInstanceInfo return for the 1880 node, while keeping the original order 1881 1882 """ 1883 return [(uuid, self._UnlockedGetInstanceInfo(uuid)) for uuid in inst_uuids]
1884 1885 @ConfigSync(shared=1)
1886 - def GetMultiInstanceInfoByName(self, inst_names):
1887 """Get the configuration of multiple instances. 1888 1889 @param inst_names: list of instance names 1890 @rtype: list 1891 @return: list of tuples (instance, instance_info), where 1892 instance_info is what would GetInstanceInfo return for the 1893 node, while keeping the original order 1894 1895 """ 1896 result = [] 1897 for name in inst_names: 1898 instance = self._UnlockedGetInstanceInfoByName(name) 1899 if instance: 1900 result.append((instance.uuid, instance)) 1901 else: 1902 raise errors.ConfigurationError("Instance data of instance '%s'" 1903 " not found." % name) 1904 return result
1905 1906 @ConfigSync(shared=1)
1907 - def GetAllInstancesInfo(self):
1908 """Get the configuration of all instances. 1909 1910 @rtype: dict 1911 @return: dict of (instance, instance_info), where instance_info is what 1912 would GetInstanceInfo return for the node 1913 1914 """ 1915 return self._UnlockedGetAllInstancesInfo()
1916
1917 - def _UnlockedGetAllInstancesInfo(self):
1918 my_dict = dict([(inst_uuid, self._UnlockedGetInstanceInfo(inst_uuid)) 1919 for inst_uuid in self._UnlockedGetInstanceList()]) 1920 return my_dict
1921 1922 @ConfigSync(shared=1)
1923 - def GetInstancesInfoByFilter(self, filter_fn):
1924 """Get instance configuration with a filter. 1925 1926 @type filter_fn: callable 1927 @param filter_fn: Filter function receiving instance object as parameter, 1928 returning boolean. Important: this function is called while the 1929 configuration locks is held. It must not do any complex work or call 1930 functions potentially leading to a deadlock. Ideally it doesn't call any 1931 other functions and just compares instance attributes. 1932 1933 """ 1934 return dict((uuid, inst) 1935 for (uuid, inst) in self._ConfigData().instances.items() 1936 if filter_fn(inst))
1937 1938 @ConfigSync(shared=1)
1939 - def GetInstanceInfoByName(self, inst_name):
1940 """Get the L{objects.Instance} object for a named instance. 1941 1942 @param inst_name: name of the instance to get information for 1943 @type inst_name: string 1944 @return: the corresponding L{objects.Instance} instance or None if no 1945 information is available 1946 1947 """ 1948 return self._UnlockedGetInstanceInfoByName(inst_name)
1949
1950 - def _UnlockedGetInstanceInfoByName(self, inst_name):
1951 for inst in self._UnlockedGetAllInstancesInfo().values(): 1952 if inst.name == inst_name: 1953 return inst 1954 return None
1955
1956 - def _UnlockedGetInstanceName(self, inst_uuid):
1957 inst_info = self._UnlockedGetInstanceInfo(inst_uuid) 1958 if inst_info is None: 1959 raise errors.OpExecError("Unknown instance: %s" % inst_uuid) 1960 return inst_info.name
1961 1962 @ConfigSync(shared=1)
1963 - def GetInstanceName(self, inst_uuid):
1964 """Gets the instance name for the passed instance. 1965 1966 @param inst_uuid: instance UUID to get name for 1967 @type inst_uuid: string 1968 @rtype: string 1969 @return: instance name 1970 1971 """ 1972 return self._UnlockedGetInstanceName(inst_uuid)
1973 1974 @ConfigSync(shared=1)
1975 - def GetInstanceNames(self, inst_uuids):
1976 """Gets the instance names for the passed list of nodes. 1977 1978 @param inst_uuids: list of instance UUIDs to get names for 1979 @type inst_uuids: list of strings 1980 @rtype: list of strings 1981 @return: list of instance names 1982 1983 """ 1984 return self._UnlockedGetInstanceNames(inst_uuids)
1985
1986 - def SetInstancePrimaryNode(self, inst_uuid, target_node_uuid):
1987 """Sets the primary node of an existing instance 1988 1989 @param inst_uuid: instance UUID 1990 @type inst_uuid: string 1991 @param target_node_uuid: the new primary node UUID 1992 @type target_node_uuid: string 1993 1994 """ 1995 utils.SimpleRetry(True, self._wconfd.SetInstancePrimaryNode, 0.1, 30, 1996 args=[inst_uuid, target_node_uuid]) 1997 self.OutDate()
1998 1999 @ConfigSync()
2000 - def SetDiskNodes(self, disk_uuid, nodes):
2001 """Sets the nodes of an existing disk 2002 2003 @param disk_uuid: disk UUID 2004 @type disk_uuid: string 2005 @param nodes: the new nodes for the disk 2006 @type nodes: list of node uuids 2007 2008 """ 2009 self._UnlockedGetDiskInfo(disk_uuid).nodes = nodes
2010 2011 @ConfigSync()
2012 - def SetDiskLogicalID(self, disk_uuid, logical_id):
2013 """Sets the logical_id of an existing disk 2014 2015 @param disk_uuid: disk UUID 2016 @type disk_uuid: string 2017 @param logical_id: the new logical_id for the disk 2018 @type logical_id: tuple 2019 2020 """ 2021 disk = self._UnlockedGetDiskInfo(disk_uuid) 2022 if disk is None: 2023 raise errors.ConfigurationError("Unknown disk UUID '%s'" % disk_uuid) 2024 2025 if len(disk.logical_id) != len(logical_id): 2026 raise errors.ProgrammerError("Logical ID format mismatch\n" 2027 "Existing logical ID: %s\n" 2028 "New logical ID: %s", disk.logical_id, 2029 logical_id) 2030 2031 disk.logical_id = logical_id
2032
2033 - def _UnlockedGetInstanceNames(self, inst_uuids):
2034 return [self._UnlockedGetInstanceName(uuid) for uuid in inst_uuids]
2035
2036 - def _UnlockedAddNode(self, node, ec_id):
2037 """Add a node to the configuration. 2038 2039 @type node: L{objects.Node} 2040 @param node: a Node instance 2041 2042 """ 2043 logging.info("Adding node %s to configuration", node.name) 2044 2045 self._EnsureUUID(node, ec_id) 2046 2047 node.serial_no = 1 2048 node.ctime = node.mtime = time.time() 2049 self._UnlockedAddNodeToGroup(node.uuid, node.group) 2050 assert node.uuid in self._ConfigData().nodegroups[node.group].members 2051 self._ConfigData().nodes[node.uuid] = node 2052 self._ConfigData().cluster.serial_no += 1
2053 2054 @ConfigSync()
2055 - def AddNode(self, node, ec_id):
2056 """Add a node to the configuration. 2057 2058 @type node: L{objects.Node} 2059 @param node: a Node instance 2060 2061 """ 2062 self._UnlockedAddNode(node, ec_id)
2063 2064 @ConfigSync()
2065 - def RemoveNode(self, node_uuid):
2066 """Remove a node from the configuration. 2067 2068 """ 2069 logging.info("Removing node %s from configuration", node_uuid) 2070 2071 if node_uuid not in self._ConfigData().nodes: 2072 raise errors.ConfigurationError("Unknown node '%s'" % node_uuid) 2073 2074 self._UnlockedRemoveNodeFromGroup(self._ConfigData().nodes[node_uuid]) 2075 del self._ConfigData().nodes[node_uuid] 2076 self._ConfigData().cluster.serial_no += 1
2077
2078 - def ExpandNodeName(self, short_name):
2079 """Attempt to expand an incomplete node name into a node UUID. 2080 2081 """ 2082 # Locking is done in L{ConfigWriter.GetAllNodesInfo} 2083 all_nodes = self.GetAllNodesInfo().values() 2084 expanded_name = _MatchNameComponentIgnoreCase( 2085 short_name, [node.name for node in all_nodes]) 2086 2087 if expanded_name is not None: 2088 # there has to be exactly one node with that name 2089 node = (filter(lambda n: n.name == expanded_name, all_nodes)[0]) 2090 return (node.uuid, node.name) 2091 else: 2092 return (None, None)
2093
2094 - def _UnlockedGetNodeInfo(self, node_uuid):
2095 """Get the configuration of a node, as stored in the config. 2096 2097 This function is for internal use, when the config lock is already 2098 held. 2099 2100 @param node_uuid: the node UUID 2101 2102 @rtype: L{objects.Node} 2103 @return: the node object 2104 2105 """ 2106 if node_uuid not in self._ConfigData().nodes: 2107 return None 2108 2109 return self._ConfigData().nodes[node_uuid]
2110 2111 @ConfigSync(shared=1)
2112 - def GetNodeInfo(self, node_uuid):
2113 """Get the configuration of a node, as stored in the config. 2114 2115 This is just a locked wrapper over L{_UnlockedGetNodeInfo}. 2116 2117 @param node_uuid: the node UUID 2118 2119 @rtype: L{objects.Node} 2120 @return: the node object 2121 2122 """ 2123 return self._UnlockedGetNodeInfo(node_uuid)
2124 2125 @ConfigSync(shared=1)
2126 - def GetNodeInstances(self, node_uuid):
2127 """Get the instances of a node, as stored in the config. 2128 2129 @param node_uuid: the node UUID 2130 2131 @rtype: (list, list) 2132 @return: a tuple with two lists: the primary and the secondary instances 2133 2134 """ 2135 pri = [] 2136 sec = [] 2137 for inst in self._ConfigData().instances.values(): 2138 if inst.primary_node == node_uuid: 2139 pri.append(inst.uuid) 2140 if node_uuid in self._UnlockedGetInstanceSecondaryNodes(inst.uuid): 2141 sec.append(inst.uuid) 2142 return (pri, sec)
2143 2144 @ConfigSync(shared=1)
2145 - def GetNodeGroupInstances(self, uuid, primary_only=False):
2146 """Get the instances of a node group. 2147 2148 @param uuid: Node group UUID 2149 @param primary_only: Whether to only consider primary nodes 2150 @rtype: frozenset 2151 @return: List of instance UUIDs in node group 2152 2153 """ 2154 if primary_only: 2155 nodes_fn = lambda inst: [inst.primary_node] 2156 else: 2157 nodes_fn = lambda inst: self._UnlockedGetInstanceNodes(inst.uuid) 2158 2159 return frozenset(inst.uuid 2160 for inst in self._ConfigData().instances.values() 2161 for node_uuid in nodes_fn(inst) 2162 if self._UnlockedGetNodeInfo(node_uuid).group == uuid)
2163
2164 - def _UnlockedGetHvparamsString(self, hvname):
2165 """Return the string representation of the list of hyervisor parameters of 2166 the given hypervisor. 2167 2168 @see: C{GetHvparams} 2169 2170 """ 2171 result = "" 2172 hvparams = self._ConfigData().cluster.hvparams[hvname] 2173 for key in hvparams: 2174 result += "%s=%s\n" % (key, hvparams[key]) 2175 return result
2176 2177 @ConfigSync(shared=1)
2178 - def GetHvparamsString(self, hvname):
2179 """Return the hypervisor parameters of the given hypervisor. 2180 2181 @type hvname: string 2182 @param hvname: name of a hypervisor 2183 @rtype: string 2184 @return: string containing key-value-pairs, one pair on each line; 2185 format: KEY=VALUE 2186 2187 """ 2188 return self._UnlockedGetHvparamsString(hvname)
2189
2190 - def _UnlockedGetNodeList(self):
2191 """Return the list of nodes which are in the configuration. 2192 2193 This function is for internal use, when the config lock is already 2194 held. 2195 2196 @rtype: list 2197 2198 """ 2199 return self._ConfigData().nodes.keys()
2200 2201 @ConfigSync(shared=1)
2202 - def GetNodeList(self):
2203 """Return the list of nodes which are in the configuration. 2204 2205 """ 2206 return self._UnlockedGetNodeList()
2207
2208 - def _UnlockedGetOnlineNodeList(self):
2209 """Return the list of nodes which are online. 2210 2211 """ 2212 all_nodes = [self._UnlockedGetNodeInfo(node) 2213 for node in self._UnlockedGetNodeList()] 2214 return [node.uuid for node in all_nodes if not node.offline]
2215 2216 @ConfigSync(shared=1)
2217 - def GetOnlineNodeList(self):
2218 """Return the list of nodes which are online. 2219 2220 """ 2221 return self._UnlockedGetOnlineNodeList()
2222 2223 @ConfigSync(shared=1)
2224 - def GetVmCapableNodeList(self):
2225 """Return the list of nodes which are not vm capable. 2226 2227 """ 2228 all_nodes = [self._UnlockedGetNodeInfo(node) 2229 for node in self._UnlockedGetNodeList()] 2230 return [node.uuid for node in all_nodes if node.vm_capable]
2231 2232 @ConfigSync(shared=1)
2233 - def GetNonVmCapableNodeList(self):
2234 """Return the list of nodes' uuids which are not vm capable. 2235 2236 """ 2237 all_nodes = [self._UnlockedGetNodeInfo(node) 2238 for node in self._UnlockedGetNodeList()] 2239 return [node.uuid for node in all_nodes if not node.vm_capable]
2240 2241 @ConfigSync(shared=1)
2242 - def GetNonVmCapableNodeNameList(self):
2243 """Return the list of nodes' names which are not vm capable. 2244 2245 """ 2246 all_nodes = [self._UnlockedGetNodeInfo(node) 2247 for node in self._UnlockedGetNodeList()] 2248 return [node.name for node in all_nodes if not node.vm_capable]
2249 2250 @ConfigSync(shared=1)
2251 - def GetMultiNodeInfo(self, node_uuids):
2252 """Get the configuration of multiple nodes. 2253 2254 @param node_uuids: list of node UUIDs 2255 @rtype: list 2256 @return: list of tuples of (node, node_info), where node_info is 2257 what would GetNodeInfo return for the node, in the original 2258 order 2259 2260 """ 2261 return [(uuid, self._UnlockedGetNodeInfo(uuid)) for uuid in node_uuids]
2262
2263 - def _UnlockedGetAllNodesInfo(self):
2264 """Gets configuration of all nodes. 2265 2266 @note: See L{GetAllNodesInfo} 2267 2268 """ 2269 return dict([(node_uuid, self._UnlockedGetNodeInfo(node_uuid)) 2270 for node_uuid in self._UnlockedGetNodeList()])
2271 2272 @ConfigSync(shared=1)
2273 - def GetAllNodesInfo(self):
2274 """Get the configuration of all nodes. 2275 2276 @rtype: dict 2277 @return: dict of (node, node_info), where node_info is what 2278 would GetNodeInfo return for the node 2279 2280 """ 2281 return self._UnlockedGetAllNodesInfo()
2282
2283 - def _UnlockedGetNodeInfoByName(self, node_name):
2284 for node in self._UnlockedGetAllNodesInfo().values(): 2285 if node.name == node_name: 2286 return node 2287 return None
2288 2289 @ConfigSync(shared=1)
2290 - def GetNodeInfoByName(self, node_name):
2291 """Get the L{objects.Node} object for a named node. 2292 2293 @param node_name: name of the node to get information for 2294 @type node_name: string 2295 @return: the corresponding L{objects.Node} instance or None if no 2296 information is available 2297 2298 """ 2299 return self._UnlockedGetNodeInfoByName(node_name)
2300 2301 @ConfigSync(shared=1)
2302 - def GetNodeGroupInfoByName(self, nodegroup_name):
2303 """Get the L{objects.NodeGroup} object for a named node group. 2304 2305 @param nodegroup_name: name of the node group to get information for 2306 @type nodegroup_name: string 2307 @return: the corresponding L{objects.NodeGroup} instance or None if no 2308 information is available 2309 2310 """ 2311 for nodegroup in self._UnlockedGetAllNodeGroupsInfo().values(): 2312 if nodegroup.name == nodegroup_name: 2313 return nodegroup 2314 return None
2315
2316 - def _UnlockedGetNodeName(self, node_spec):
2317 if isinstance(node_spec, objects.Node): 2318 return node_spec.name 2319 elif isinstance(node_spec, basestring): 2320 node_info = self._UnlockedGetNodeInfo(node_spec) 2321 if node_info is None: 2322 raise errors.OpExecError("Unknown node: %s" % node_spec) 2323 return node_info.name 2324 else: 2325 raise errors.ProgrammerError("Can't handle node spec '%s'" % node_spec)
2326 2327 @ConfigSync(shared=1)
2328 - def GetNodeName(self, node_spec):
2329 """Gets the node name for the passed node. 2330 2331 @param node_spec: node to get names for 2332 @type node_spec: either node UUID or a L{objects.Node} object 2333 @rtype: string 2334 @return: node name 2335 2336 """ 2337 return self._UnlockedGetNodeName(node_spec)
2338
2339 - def _UnlockedGetNodeNames(self, node_specs):
2340 return [self._UnlockedGetNodeName(node_spec) for node_spec in node_specs]
2341 2342 @ConfigSync(shared=1)
2343 - def GetNodeNames(self, node_specs):
2344 """Gets the node names for the passed list of nodes. 2345 2346 @param node_specs: list of nodes to get names for 2347 @type node_specs: list of either node UUIDs or L{objects.Node} objects 2348 @rtype: list of strings 2349 @return: list of node names 2350 2351 """ 2352 return self._UnlockedGetNodeNames(node_specs)
2353 2354 @ConfigSync(shared=1)
2355 - def GetNodeGroupsFromNodes(self, node_uuids):
2356 """Returns groups for a list of nodes. 2357 2358 @type node_uuids: list of string 2359 @param node_uuids: List of node UUIDs 2360 @rtype: frozenset 2361 2362 """ 2363 return frozenset(self._UnlockedGetNodeInfo(uuid).group 2364 for uuid in node_uuids)
2365
2366 - def _UnlockedGetMasterCandidateUuids(self):
2367 """Get the list of UUIDs of master candidates. 2368 2369 @rtype: list of strings 2370 @return: list of UUIDs of all master candidates. 2371 2372 """ 2373 return [node.uuid for node in self._ConfigData().nodes.values() 2374 if node.master_candidate]
2375 2376 @ConfigSync(shared=1)
2377 - def GetMasterCandidateUuids(self):
2378 """Get the list of UUIDs of master candidates. 2379 2380 @rtype: list of strings 2381 @return: list of UUIDs of all master candidates. 2382 2383 """ 2384 return self._UnlockedGetMasterCandidateUuids()
2385
2386 - def _UnlockedGetMasterCandidateStats(self, exceptions=None):
2387 """Get the number of current and maximum desired and possible candidates. 2388 2389 @type exceptions: list 2390 @param exceptions: if passed, list of nodes that should be ignored 2391 @rtype: tuple 2392 @return: tuple of (current, desired and possible, possible) 2393 2394 """ 2395 mc_now = mc_should = mc_max = 0 2396 for node in self._ConfigData().nodes.values(): 2397 if exceptions and node.uuid in exceptions: 2398 continue 2399 if not (node.offline or node.drained) and node.master_capable: 2400 mc_max += 1 2401 if node.master_candidate: 2402 mc_now += 1 2403 mc_should = min(mc_max, self._ConfigData().cluster.candidate_pool_size) 2404 return (mc_now, mc_should, mc_max)
2405 2406 @ConfigSync(shared=1)
2407 - def GetMasterCandidateStats(self, exceptions=None):
2408 """Get the number of current and maximum possible candidates. 2409 2410 This is just a wrapper over L{_UnlockedGetMasterCandidateStats}. 2411 2412 @type exceptions: list 2413 @param exceptions: if passed, list of nodes that should be ignored 2414 @rtype: tuple 2415 @return: tuple of (current, max) 2416 2417 """ 2418 return self._UnlockedGetMasterCandidateStats(exceptions)
2419 2420 @ConfigSync()
2421 - def MaintainCandidatePool(self, exception_node_uuids):
2422 """Try to grow the candidate pool to the desired size. 2423 2424 @type exception_node_uuids: list 2425 @param exception_node_uuids: if passed, list of nodes that should be ignored 2426 @rtype: list 2427 @return: list with the adjusted nodes (L{objects.Node} instances) 2428 2429 """ 2430 mc_now, mc_max, _ = self._UnlockedGetMasterCandidateStats( 2431 exception_node_uuids) 2432 mod_list = [] 2433 if mc_now < mc_max: 2434 node_list = self._ConfigData().nodes.keys() 2435 random.shuffle(node_list) 2436 for uuid in node_list: 2437 if mc_now >= mc_max: 2438 break 2439 node = self._ConfigData().nodes[uuid] 2440 if (node.master_candidate or node.offline or node.drained or 2441 node.uuid in exception_node_uuids or not node.master_capable): 2442 continue 2443 mod_list.append(node) 2444 node.master_candidate = True 2445 node.serial_no += 1 2446 mc_now += 1 2447 if mc_now != mc_max: 2448 # this should not happen 2449 logging.warning("Warning: MaintainCandidatePool didn't manage to" 2450 " fill the candidate pool (%d/%d)", mc_now, mc_max) 2451 if mod_list: 2452 self._ConfigData().cluster.serial_no += 1 2453 2454 return mod_list
2455
2456 - def _UnlockedAddNodeToGroup(self, node_uuid, nodegroup_uuid):
2457 """Add a given node to the specified group. 2458 2459 """ 2460 if nodegroup_uuid not in self._ConfigData().nodegroups: 2461 # This can happen if a node group gets deleted between its lookup and 2462 # when we're adding the first node to it, since we don't keep a lock in 2463 # the meantime. It's ok though, as we'll fail cleanly if the node group 2464 # is not found anymore. 2465 raise errors.OpExecError("Unknown node group: %s" % nodegroup_uuid) 2466 if node_uuid not in self._ConfigData().nodegroups[nodegroup_uuid].members: 2467 self._ConfigData().nodegroups[nodegroup_uuid].members.append(node_uuid)
2468
2469 - def _UnlockedRemoveNodeFromGroup(self, node):
2470 """Remove a given node from its group. 2471 2472 """ 2473 nodegroup = node.group 2474 if nodegroup not in self._ConfigData().nodegroups: 2475 logging.warning("Warning: node '%s' has unknown node group '%s'" 2476 " (while being removed from it)", node.uuid, nodegroup) 2477 nodegroup_obj = self._ConfigData().nodegroups[nodegroup] 2478 if node.uuid not in nodegroup_obj.members: 2479 logging.warning("Warning: node '%s' not a member of its node group '%s'" 2480 " (while being removed from it)", node.uuid, nodegroup) 2481 else: 2482 nodegroup_obj.members.remove(node.uuid)
2483 2484 @ConfigSync()
2485 - def AssignGroupNodes(self, mods):
2486 """Changes the group of a number of nodes. 2487 2488 @type mods: list of tuples; (node name, new group UUID) 2489 @param mods: Node membership modifications 2490 2491 """ 2492 groups = self._ConfigData().nodegroups 2493 nodes = self._ConfigData().nodes 2494 2495 resmod = [] 2496 2497 # Try to resolve UUIDs first 2498 for (node_uuid, new_group_uuid) in mods: 2499 try: 2500 node = nodes[node_uuid] 2501 except KeyError: 2502 raise errors.ConfigurationError("Unable to find node '%s'" % node_uuid) 2503 2504 if node.group == new_group_uuid: 2505 # Node is being assigned to its current group 2506 logging.debug("Node '%s' was assigned to its current group (%s)", 2507 node_uuid, node.group) 2508 continue 2509 2510 # Try to find current group of node 2511 try: 2512 old_group = groups[node.group] 2513 except KeyError: 2514 raise errors.ConfigurationError("Unable to find old group '%s'" % 2515 node.group) 2516 2517 # Try to find new group for node 2518 try: 2519 new_group = groups[new_group_uuid] 2520 except KeyError: 2521 raise errors.ConfigurationError("Unable to find new group '%s'" % 2522 new_group_uuid) 2523 2524 assert node.uuid in old_group.members, \ 2525 ("Inconsistent configuration: node '%s' not listed in members for its" 2526 " old group '%s'" % (node.uuid, old_group.uuid)) 2527 assert node.uuid not in new_group.members, \ 2528 ("Inconsistent configuration: node '%s' already listed in members for" 2529 " its new group '%s'" % (node.uuid, new_group.uuid)) 2530 2531 resmod.append((node, old_group, new_group)) 2532 2533 # Apply changes 2534 for (node, old_group, new_group) in resmod: 2535 assert node.uuid != new_group.uuid and old_group.uuid != new_group.uuid, \ 2536 "Assigning to current group is not possible" 2537 2538 node.group = new_group.uuid 2539 2540 # Update members of involved groups 2541 if node.uuid in old_group.members: 2542 old_group.members.remove(node.uuid) 2543 if node.uuid not in new_group.members: 2544 new_group.members.append(node.uuid) 2545 2546 # Update timestamps and serials (only once per node/group object) 2547 now = time.time() 2548 for obj in frozenset(itertools.chain(*resmod)): # pylint: disable=W0142 2549 obj.serial_no += 1 2550 obj.mtime = now 2551 2552 # Force ssconf update 2553 self._ConfigData().cluster.serial_no += 1
2554
2555 - def _BumpSerialNo(self):
2556 """Bump up the serial number of the config. 2557 2558 """ 2559 self._ConfigData().serial_no += 1 2560 self._ConfigData().mtime = time.time()
2561
2562 - def _AllUUIDObjects(self):
2563 """Returns all objects with uuid attributes. 2564 2565 """ 2566 return (self._ConfigData().instances.values() + 2567 self._ConfigData().nodes.values() + 2568 self._ConfigData().nodegroups.values() + 2569 self._ConfigData().networks.values() + 2570 self._ConfigData().disks.values() + 2571 self._AllNICs() + 2572 [self._ConfigData().cluster])
2573
2574 - def GetConfigManager(self, shared=False, forcelock=False):
2575 """Returns a ConfigManager, which is suitable to perform a synchronized 2576 block of configuration operations. 2577 2578 WARNING: This blocks all other configuration operations, so anything that 2579 runs inside the block should be very fast, preferably not using any IO. 2580 """ 2581 2582 return ConfigManager(self, shared=shared, forcelock=forcelock)
2583
2584 - def _AddLockCount(self, count):
2585 self._lock_count += count 2586 return self._lock_count
2587
2588 - def _LockCount(self):
2589 return self._lock_count
2590
2591 - def _OpenConfig(self, shared, force=False):
2592 """Read the config data from WConfd or disk. 2593 2594 """ 2595 if self._AddLockCount(1) > 1: 2596 if self._lock_current_shared and not shared: 2597 self._AddLockCount(-1) 2598 raise errors.ConfigurationError("Can't request an exclusive" 2599 " configuration lock while holding" 2600 " shared") 2601 elif not force or self._lock_forced or not shared or self._offline: 2602 return # we already have the lock, do nothing 2603 else: 2604 self._lock_current_shared = shared 2605 if force: 2606 self._lock_forced = True 2607 # Read the configuration data. If offline, read the file directly. 2608 # If online, call WConfd. 2609 if self._offline: 2610 try: 2611 raw_data = utils.ReadFile(self._cfg_file) 2612 data_dict = serializer.Load(raw_data) 2613 # Make sure the configuration has the right version 2614 ValidateConfig(data_dict) 2615 data = objects.ConfigData.FromDict(data_dict) 2616 except errors.ConfigVersionMismatch: 2617 raise 2618 except Exception, err: 2619 raise errors.ConfigurationError(err) 2620 2621 self._cfg_id = utils.GetFileID(path=self._cfg_file) 2622 2623 if (not hasattr(data, "cluster") or 2624 not hasattr(data.cluster, "rsahostkeypub")): 2625 raise errors.ConfigurationError("Incomplete configuration" 2626 " (missing cluster.rsahostkeypub)") 2627 2628 if not data.cluster.master_node in data.nodes: 2629 msg = ("The configuration denotes node %s as master, but does not" 2630 " contain information about this node" % 2631 data.cluster.master_node) 2632 raise errors.ConfigurationError(msg) 2633 2634 master_info = data.nodes[data.cluster.master_node] 2635 if master_info.name != self._my_hostname and not self._accept_foreign: 2636 msg = ("The configuration denotes node %s as master, while my" 2637 " hostname is %s; opening a foreign configuration is only" 2638 " possible in accept_foreign mode" % 2639 (master_info.name, self._my_hostname)) 2640 raise errors.ConfigurationError(msg) 2641 2642 self._SetConfigData(data) 2643 2644 # Upgrade configuration if needed 2645 self._UpgradeConfig(saveafter=True) 2646 else: 2647 if shared and not force: 2648 if self._config_data is None: 2649 logging.debug("Requesting config, as I have no up-to-date copy") 2650 dict_data = self._wconfd.ReadConfig() 2651 logging.debug("Configuration received") 2652 else: 2653 logging.debug("My config copy is up to date.") 2654 dict_data = None 2655 else: 2656 # poll until we acquire the lock 2657 while True: 2658 logging.debug("Receiving config from WConfd.LockConfig [shared=%s]", 2659 bool(shared)) 2660 dict_data = \ 2661 self._wconfd.LockConfig(self._GetWConfdContext(), bool(shared)) 2662 if dict_data is not None: 2663 logging.debug("Received config from WConfd.LockConfig") 2664 break 2665 time.sleep(random.random()) 2666 2667 try: 2668 if dict_data is not None: 2669 self._SetConfigData(objects.ConfigData.FromDict(dict_data)) 2670 self._UpgradeConfig() 2671 except Exception, err: 2672 raise errors.ConfigurationError(err)
2673
2674 - def _CloseConfig(self, save):
2675 """Release resources relating the config data. 2676 2677 """ 2678 if self._AddLockCount(-1) > 0: 2679 return # we still have the lock, do nothing 2680 if save: 2681 try: 2682 logging.debug("Writing configuration and unlocking it") 2683 self._WriteConfig(releaselock=True) 2684 logging.debug("Configuration write, unlock finished") 2685 except Exception, err: 2686 logging.critical("Can't write the configuration: %s", str(err)) 2687 raise 2688 elif not self._offline and \ 2689 not (self._lock_current_shared and not self._lock_forced): 2690 logging.debug("Unlocking configuration without writing") 2691 self._wconfd.UnlockConfig(self._GetWConfdContext()) 2692 self._lock_forced = False
2693 2694 # TODO: To WConfd
2695 - def _UpgradeConfig(self, saveafter=False):
2696 """Run any upgrade steps. 2697 2698 This method performs both in-object upgrades and also update some data 2699 elements that need uniqueness across the whole configuration or interact 2700 with other objects. 2701 2702 @warning: if 'saveafter' is 'True', this function will call 2703 L{_WriteConfig()} so it needs to be called only from a 2704 "safe" place. 2705 2706 """ 2707 # Keep a copy of the persistent part of _config_data to check for changes 2708 # Serialization doesn't guarantee order in dictionaries 2709 if saveafter: 2710 oldconf = copy.deepcopy(self._ConfigData().ToDict()) 2711 else: 2712 oldconf = None 2713 2714 # In-object upgrades 2715 self._ConfigData().UpgradeConfig() 2716 2717 for item in self._AllUUIDObjects(): 2718 if item.uuid is None: 2719 item.uuid = self._GenerateUniqueID(_UPGRADE_CONFIG_JID) 2720 if not self._ConfigData().nodegroups: 2721 default_nodegroup_name = constants.INITIAL_NODE_GROUP_NAME 2722 default_nodegroup = objects.NodeGroup(name=default_nodegroup_name, 2723 members=[]) 2724 self._UnlockedAddNodeGroup(default_nodegroup, _UPGRADE_CONFIG_JID, True) 2725 for node in self._ConfigData().nodes.values(): 2726 if not node.group: 2727 node.group = self._UnlockedLookupNodeGroup(None) 2728 # This is technically *not* an upgrade, but needs to be done both when 2729 # nodegroups are being added, and upon normally loading the config, 2730 # because the members list of a node group is discarded upon 2731 # serializing/deserializing the object. 2732 self._UnlockedAddNodeToGroup(node.uuid, node.group) 2733 2734 if saveafter: 2735 modified = (oldconf != self._ConfigData().ToDict()) 2736 else: 2737 modified = True # can't prove it didn't change, but doesn't matter 2738 if modified and saveafter: 2739 self._WriteConfig() 2740 self._UnlockedDropECReservations(_UPGRADE_CONFIG_JID) 2741 else: 2742 if self._offline: 2743 self._UnlockedVerifyConfigAndLog()
2744
2745 - def _WriteConfig(self, destination=None, releaselock=False):
2746 """Write the configuration data to persistent storage. 2747 2748 """ 2749 if destination is None: 2750 destination = self._cfg_file 2751 2752 # Save the configuration data. If offline, write the file directly. 2753 # If online, call WConfd. 2754 if self._offline: 2755 self._BumpSerialNo() 2756 txt = serializer.DumpJson( 2757 self._ConfigData().ToDict(_with_private=True), 2758 private_encoder=serializer.EncodeWithPrivateFields 2759 ) 2760 2761 getents = self._getents() 2762 try: 2763 fd = utils.SafeWriteFile(destination, self._cfg_id, data=txt, 2764 close=False, gid=getents.confd_gid, mode=0640) 2765 except errors.LockError: 2766 raise errors.ConfigurationError("The configuration file has been" 2767 " modified since the last write, cannot" 2768 " update") 2769 try: 2770 self._cfg_id = utils.GetFileID(fd=fd) 2771 finally: 2772 os.close(fd) 2773 else: 2774 try: 2775 if releaselock: 2776 res = self._wconfd.WriteConfigAndUnlock(self._GetWConfdContext(), 2777 self._ConfigData().ToDict()) 2778 if not res: 2779 logging.warning("WriteConfigAndUnlock indicates we already have" 2780 " released the lock; assuming this was just a retry" 2781 " and the initial call succeeded") 2782 else: 2783 self._wconfd.WriteConfig(self._GetWConfdContext(), 2784 self._ConfigData().ToDict()) 2785 except errors.LockError: 2786 raise errors.ConfigurationError("The configuration file has been" 2787 " modified since the last write, cannot" 2788 " update") 2789 2790 self.write_count += 1
2791
2792 - def _GetAllHvparamsStrings(self, hypervisors):
2793 """Get the hvparams of all given hypervisors from the config. 2794 2795 @type hypervisors: list of string 2796 @param hypervisors: list of hypervisor names 2797 @rtype: dict of strings 2798 @returns: dictionary mapping the hypervisor name to a string representation 2799 of the hypervisor's hvparams 2800 2801 """ 2802 hvparams = {} 2803 for hv in hypervisors: 2804 hvparams[hv] = self._UnlockedGetHvparamsString(hv) 2805 return hvparams
2806 2807 @staticmethod
2808 - def _ExtendByAllHvparamsStrings(ssconf_values, all_hvparams):
2809 """Extends the ssconf_values dictionary by hvparams. 2810 2811 @type ssconf_values: dict of strings 2812 @param ssconf_values: dictionary mapping ssconf_keys to strings 2813 representing the content of ssconf files 2814 @type all_hvparams: dict of strings 2815 @param all_hvparams: dictionary mapping hypervisor names to a string 2816 representation of their hvparams 2817 @rtype: same as ssconf_values 2818 @returns: the ssconf_values dictionary extended by hvparams 2819 2820 """ 2821 for hv in all_hvparams: 2822 ssconf_key = constants.SS_HVPARAMS_PREF + hv 2823 ssconf_values[ssconf_key] = all_hvparams[hv] 2824 return ssconf_values
2825
2826 - def _UnlockedGetSshPortMap(self, node_infos):
2827 node_ports = dict([(node.name, 2828 self._UnlockedGetNdParams(node).get( 2829 constants.ND_SSH_PORT)) 2830 for node in node_infos]) 2831 return node_ports
2832
2833 - def _UnlockedGetSsconfValues(self):
2834 """Return the values needed by ssconf. 2835 2836 @rtype: dict 2837 @return: a dictionary with keys the ssconf names and values their 2838 associated value 2839 2840 """ 2841 fn = "\n".join 2842 instance_names = utils.NiceSort( 2843 [inst.name for inst in 2844 self._UnlockedGetAllInstancesInfo().values()]) 2845 node_infos = self._UnlockedGetAllNodesInfo().values() 2846 node_names = [node.name for node in node_infos] 2847 node_pri_ips = ["%s %s" % (ninfo.name, ninfo.primary_ip) 2848 for ninfo in node_infos] 2849 node_snd_ips = ["%s %s" % (ninfo.name, ninfo.secondary_ip) 2850 for ninfo in node_infos] 2851 node_vm_capable = ["%s=%s" % (ninfo.name, str(ninfo.vm_capable)) 2852 for ninfo in node_infos] 2853 2854 instance_data = fn(instance_names) 2855 off_data = fn(node.name for node in node_infos if node.offline) 2856 on_data = fn(node.name for node in node_infos if not node.offline) 2857 mc_data = fn(node.name for node in node_infos if node.master_candidate) 2858 mc_ips_data = fn(node.primary_ip for node in node_infos 2859 if node.master_candidate) 2860 node_data = fn(node_names) 2861 node_pri_ips_data = fn(node_pri_ips) 2862 node_snd_ips_data = fn(node_snd_ips) 2863 node_vm_capable_data = fn(node_vm_capable) 2864 2865 cluster = self._ConfigData().cluster 2866 cluster_tags = fn(cluster.GetTags()) 2867 2868 master_candidates_certs = fn("%s=%s" % (mc_uuid, mc_cert) 2869 for mc_uuid, mc_cert 2870 in cluster.candidate_certs.items()) 2871 2872 hypervisor_list = fn(cluster.enabled_hypervisors) 2873 all_hvparams = self._GetAllHvparamsStrings(constants.HYPER_TYPES) 2874 2875 uid_pool = uidpool.FormatUidPool(cluster.uid_pool, separator="\n") 2876 2877 nodegroups = ["%s %s" % (nodegroup.uuid, nodegroup.name) for nodegroup in 2878 self._ConfigData().nodegroups.values()] 2879 nodegroups_data = fn(utils.NiceSort(nodegroups)) 2880 networks = ["%s %s" % (net.uuid, net.name) for net in 2881 self._ConfigData().networks.values()] 2882 networks_data = fn(utils.NiceSort(networks)) 2883 2884 ssh_ports = fn("%s=%s" % (node_name, port) 2885 for node_name, port 2886 in self._UnlockedGetSshPortMap(node_infos).items()) 2887 2888 ssconf_values = { 2889 constants.SS_CLUSTER_NAME: cluster.cluster_name, 2890 constants.SS_CLUSTER_TAGS: cluster_tags, 2891 constants.SS_FILE_STORAGE_DIR: cluster.file_storage_dir, 2892 constants.SS_SHARED_FILE_STORAGE_DIR: cluster.shared_file_storage_dir, 2893 constants.SS_GLUSTER_STORAGE_DIR: cluster.gluster_storage_dir, 2894 constants.SS_MASTER_CANDIDATES: mc_data, 2895 constants.SS_MASTER_CANDIDATES_IPS: mc_ips_data, 2896 constants.SS_MASTER_CANDIDATES_CERTS: master_candidates_certs, 2897 constants.SS_MASTER_IP: cluster.master_ip, 2898 constants.SS_MASTER_NETDEV: cluster.master_netdev, 2899 constants.SS_MASTER_NETMASK: str(cluster.master_netmask), 2900 constants.SS_MASTER_NODE: self._UnlockedGetNodeName(cluster.master_node), 2901 constants.SS_NODE_LIST: node_data, 2902 constants.SS_NODE_PRIMARY_IPS: node_pri_ips_data, 2903 constants.SS_NODE_SECONDARY_IPS: node_snd_ips_data, 2904 constants.SS_NODE_VM_CAPABLE: node_vm_capable_data, 2905 constants.SS_OFFLINE_NODES: off_data, 2906 constants.SS_ONLINE_NODES: on_data, 2907 constants.SS_PRIMARY_IP_FAMILY: str(cluster.primary_ip_family), 2908 constants.SS_INSTANCE_LIST: instance_data, 2909 constants.SS_RELEASE_VERSION: constants.RELEASE_VERSION, 2910 constants.SS_HYPERVISOR_LIST: hypervisor_list, 2911 constants.SS_MAINTAIN_NODE_HEALTH: str(cluster.maintain_node_health), 2912 constants.SS_UID_POOL: uid_pool, 2913 constants.SS_NODEGROUPS: nodegroups_data, 2914 constants.SS_NETWORKS: networks_data, 2915 constants.SS_ENABLED_USER_SHUTDOWN: str(cluster.enabled_user_shutdown), 2916 constants.SS_SSH_PORTS: ssh_ports, 2917 } 2918 ssconf_values = self._ExtendByAllHvparamsStrings(ssconf_values, 2919 all_hvparams) 2920 bad_values = [(k, v) for k, v in ssconf_values.items() 2921 if not isinstance(v, (str, basestring))] 2922 if bad_values: 2923 err = utils.CommaJoin("%s=%s" % (k, v) for k, v in bad_values) 2924 raise errors.ConfigurationError("Some ssconf key(s) have non-string" 2925 " values: %s" % err) 2926 return ssconf_values
2927 2928 @ConfigSync(shared=1)
2929 - def GetSsconfValues(self):
2930 """Wrapper using lock around _UnlockedGetSsconf(). 2931 2932 """ 2933 return self._UnlockedGetSsconfValues()
2934 2935 @ConfigSync(shared=1)
2936 - def GetVGName(self):
2937 """Return the volume group name. 2938 2939 """ 2940 return self._ConfigData().cluster.volume_group_name
2941 2942 @ConfigSync()
2943 - def SetVGName(self, vg_name):
2944 """Set the volume group name. 2945 2946 """ 2947 self._ConfigData().cluster.volume_group_name = vg_name 2948 self._ConfigData().cluster.serial_no += 1
2949 2950 @ConfigSync(shared=1)
2951 - def GetDRBDHelper(self):
2952 """Return DRBD usermode helper. 2953 2954 """ 2955 return self._ConfigData().cluster.drbd_usermode_helper
2956 2957 @ConfigSync()
2958 - def SetDRBDHelper(self, drbd_helper):
2959 """Set DRBD usermode helper. 2960 2961 """ 2962 self._ConfigData().cluster.drbd_usermode_helper = drbd_helper 2963 self._ConfigData().cluster.serial_no += 1
2964 2965 @ConfigSync(shared=1)
2966 - def GetMACPrefix(self):
2967 """Return the mac prefix. 2968 2969 """ 2970 return self._ConfigData().cluster.mac_prefix
2971 2972 @ConfigSync(shared=1)
2973 - def GetClusterInfo(self):
2974 """Returns information about the cluster 2975 2976 @rtype: L{objects.Cluster} 2977 @return: the cluster object 2978 2979 """ 2980 return self._ConfigData().cluster
2981 2982 @ConfigSync(shared=1)
2983 - def DisksOfType(self, dev_type):
2984 """Check if in there is at disk of the given type in the configuration. 2985 2986 """ 2987 return self._ConfigData().DisksOfType(dev_type)
2988 2989 @ConfigSync(shared=1)
2990 - def GetDetachedConfig(self):
2991 """Returns a detached version of a ConfigManager, which represents 2992 a read-only snapshot of the configuration at this particular time. 2993 2994 """ 2995 return DetachedConfig(self._ConfigData())
2996
2997 - def Update(self, target, feedback_fn, ec_id=None):
2998 """Notify function to be called after updates. 2999 3000 This function must be called when an object (as returned by 3001 GetInstanceInfo, GetNodeInfo, GetCluster) has been updated and the 3002 caller wants the modifications saved to the backing store. Note 3003 that all modified objects will be saved, but the target argument 3004 is the one the caller wants to ensure that it's saved. 3005 3006 @param target: an instance of either L{objects.Cluster}, 3007 L{objects.Node} or L{objects.Instance} which is existing in 3008 the cluster 3009 @param feedback_fn: Callable feedback function 3010 3011 """ 3012 3013 update_function = None 3014 if isinstance(target, objects.Cluster): 3015 if self._offline: 3016 self.UpdateOfflineCluster(target, feedback_fn) 3017 return 3018 else: 3019 update_function = self._wconfd.UpdateCluster 3020 elif isinstance(target, objects.Node): 3021 update_function = self._wconfd.UpdateNode 3022 elif isinstance(target, objects.Instance): 3023 update_function = self._wconfd.UpdateInstance 3024 elif isinstance(target, objects.NodeGroup): 3025 update_function = self._wconfd.UpdateNodeGroup 3026 elif isinstance(target, objects.Network): 3027 update_function = self._wconfd.UpdateNetwork 3028 elif isinstance(target, objects.Disk): 3029 update_function = self._wconfd.UpdateDisk 3030 else: 3031 raise errors.ProgrammerError("Invalid object type (%s) passed to" 3032 " ConfigWriter.Update" % type(target)) 3033 3034 def WithRetry(): 3035 result = update_function(target.ToDict()) 3036 self.OutDate() 3037 3038 if result is None: 3039 raise utils.RetryAgain() 3040 else: 3041 return result
3042 vals = utils.Retry(WithRetry, 0.1, 30) 3043 self.OutDate() 3044 target.serial_no = vals[0] 3045 target.mtime = float(vals[1]) 3046 3047 if ec_id is not None: 3048 # Commit all ips reserved by OpInstanceSetParams and OpGroupSetParams 3049 # FIXME: After RemoveInstance is moved to WConfd, use its internal 3050 # functions from TempRes module. 3051 self.CommitTemporaryIps(ec_id) 3052 3053 # Just verify the configuration with our feedback function. 3054 # It will get written automatically by the decorator. 3055 self.VerifyConfigAndLog(feedback_fn=feedback_fn) 3056 3057 @ConfigSync()
3058 - def UpdateOfflineCluster(self, target, feedback_fn):
3059 self._ConfigData().cluster = target 3060 target.serial_no += 1 3061 target.mtime = time.time() 3062 self.VerifyConfigAndLog(feedback_fn=feedback_fn)
3063
3064 - def _UnlockedDropECReservations(self, _ec_id):
3065 """Drop per-execution-context reservations 3066 3067 """ 3068 # FIXME: Remove the following two lines after all reservations are moved to 3069 # wconfd. 3070 for rm in self._all_rms: 3071 rm.DropECReservations(_ec_id) 3072 if not self._offline: 3073 self._wconfd.DropAllReservations(self._GetWConfdContext())
3074
3075 - def DropECReservations(self, ec_id):
3076 self._UnlockedDropECReservations(ec_id)
3077 3078 @ConfigSync(shared=1)
3079 - def GetAllNetworksInfo(self):
3080 """Get configuration info of all the networks. 3081 3082 """ 3083 return dict(self._ConfigData().networks)
3084
3085 - def _UnlockedGetNetworkList(self):
3086 """Get the list of networks. 3087 3088 This function is for internal use, when the config lock is already held. 3089 3090 """ 3091 return self._ConfigData().networks.keys()
3092 3093 @ConfigSync(shared=1)
3094 - def GetNetworkList(self):
3095 """Get the list of networks. 3096 3097 @return: array of networks, ex. ["main", "vlan100", "200] 3098 3099 """ 3100 return self._UnlockedGetNetworkList()
3101 3102 @ConfigSync(shared=1)
3103 - def GetNetworkNames(self):
3104 """Get a list of network names 3105 3106 """ 3107 names = [net.name 3108 for net in self._ConfigData().networks.values()] 3109 return names
3110
3111 - def _UnlockedGetNetwork(self, uuid):
3112 """Returns information about a network. 3113 3114 This function is for internal use, when the config lock is already held. 3115 3116 """ 3117 if uuid not in self._ConfigData().networks: 3118 return None 3119 3120 return self._ConfigData().networks[uuid]
3121 3122 @ConfigSync(shared=1)
3123 - def GetNetwork(self, uuid):
3124 """Returns information about a network. 3125 3126 It takes the information from the configuration file. 3127 3128 @param uuid: UUID of the network 3129 3130 @rtype: L{objects.Network} 3131 @return: the network object 3132 3133 """ 3134 return self._UnlockedGetNetwork(uuid)
3135 3136 @ConfigSync()
3137 - def AddNetwork(self, net, ec_id, check_uuid=True):
3138 """Add a network to the configuration. 3139 3140 @type net: L{objects.Network} 3141 @param net: the Network object to add 3142 @type ec_id: string 3143 @param ec_id: unique id for the job to use when creating a missing UUID 3144 3145 """ 3146 self._UnlockedAddNetwork(net, ec_id, check_uuid)
3147
3148 - def _UnlockedAddNetwork(self, net, ec_id, check_uuid):
3149 """Add a network to the configuration. 3150 3151 """ 3152 logging.info("Adding network %s to configuration", net.name) 3153 3154 if check_uuid: 3155 self._EnsureUUID(net, ec_id) 3156 3157 net.serial_no = 1 3158 net.ctime = net.mtime = time.time() 3159 self._ConfigData().networks[net.uuid] = net 3160 self._ConfigData().cluster.serial_no += 1
3161
3162 - def _UnlockedLookupNetwork(self, target):
3163 """Lookup a network's UUID. 3164 3165 @type target: string 3166 @param target: network name or UUID 3167 @rtype: string 3168 @return: network UUID 3169 @raises errors.OpPrereqError: when the target network cannot be found 3170 3171 """ 3172 if target is None: 3173 return None 3174 if target in self._ConfigData().networks: 3175 return target 3176 for net in self._ConfigData().networks.values(): 3177 if net.name == target: 3178 return net.uuid 3179 raise errors.OpPrereqError("Network '%s' not found" % target, 3180 errors.ECODE_NOENT)
3181 3182 @ConfigSync(shared=1)
3183 - def LookupNetwork(self, target):
3184 """Lookup a network's UUID. 3185 3186 This function is just a wrapper over L{_UnlockedLookupNetwork}. 3187 3188 @type target: string 3189 @param target: network name or UUID 3190 @rtype: string 3191 @return: network UUID 3192 3193 """ 3194 return self._UnlockedLookupNetwork(target)
3195 3196 @ConfigSync()
3197 - def RemoveNetwork(self, network_uuid):
3198 """Remove a network from the configuration. 3199 3200 @type network_uuid: string 3201 @param network_uuid: the UUID of the network to remove 3202 3203 """ 3204 logging.info("Removing network %s from configuration", network_uuid) 3205 3206 if network_uuid not in self._ConfigData().networks: 3207 raise errors.ConfigurationError("Unknown network '%s'" % network_uuid) 3208 3209 del self._ConfigData().networks[network_uuid] 3210 self._ConfigData().cluster.serial_no += 1
3211
3212 - def _UnlockedGetGroupNetParams(self, net_uuid, node_uuid):
3213 """Get the netparams (mode, link) of a network. 3214 3215 Get a network's netparams for a given node. 3216 3217 @type net_uuid: string 3218 @param net_uuid: network uuid 3219 @type node_uuid: string 3220 @param node_uuid: node UUID 3221 @rtype: dict or None 3222 @return: netparams 3223 3224 """ 3225 node_info = self._UnlockedGetNodeInfo(node_uuid) 3226 nodegroup_info = self._UnlockedGetNodeGroup(node_info.group) 3227 netparams = nodegroup_info.networks.get(net_uuid, None) 3228 3229 return netparams
3230 3231 @ConfigSync(shared=1)
3232 - def GetGroupNetParams(self, net_uuid, node_uuid):
3233 """Locking wrapper of _UnlockedGetGroupNetParams() 3234 3235 """ 3236 return self._UnlockedGetGroupNetParams(net_uuid, node_uuid)
3237 3238 @ConfigSync(shared=1)
3239 - def CheckIPInNodeGroup(self, ip, node_uuid):
3240 """Check IP uniqueness in nodegroup. 3241 3242 Check networks that are connected in the node's node group 3243 if ip is contained in any of them. Used when creating/adding 3244 a NIC to ensure uniqueness among nodegroups. 3245 3246 @type ip: string 3247 @param ip: ip address 3248 @type node_uuid: string 3249 @param node_uuid: node UUID 3250 @rtype: (string, dict) or (None, None) 3251 @return: (network name, netparams) 3252 3253 """ 3254 if ip is None: 3255 return (None, None) 3256 node_info = self._UnlockedGetNodeInfo(node_uuid) 3257 nodegroup_info = self._UnlockedGetNodeGroup(node_info.group) 3258 for net_uuid in nodegroup_info.networks.keys(): 3259 net_info = self._UnlockedGetNetwork(net_uuid) 3260 pool = network.AddressPool(net_info) 3261 if pool.Contains(ip): 3262 return (net_info.name, nodegroup_info.networks[net_uuid]) 3263 3264 return (None, None)
3265 3266 @ConfigSync(shared=1)
3267 - def GetCandidateCerts(self):
3268 """Returns the candidate certificate map. 3269 3270 """ 3271 return self._ConfigData().cluster.candidate_certs
3272 3273 @ConfigSync()
3274 - def SetCandidateCerts(self, certs):
3275 """Replaces the master candidate cert list with the new values. 3276 3277 @type certs: dict of string to string 3278 @param certs: map of node UUIDs to SSL client certificate digests. 3279 3280 """ 3281 self._ConfigData().cluster.candidate_certs = certs
3282 3283 @ConfigSync()
3284 - def AddNodeToCandidateCerts(self, node_uuid, cert_digest, 3285 info_fn=logging.info, warn_fn=logging.warn):
3286 """Adds an entry to the candidate certificate map. 3287 3288 @type node_uuid: string 3289 @param node_uuid: the node's UUID 3290 @type cert_digest: string 3291 @param cert_digest: the digest of the node's client SSL certificate 3292 @type info_fn: function 3293 @param info_fn: logging function for information messages 3294 @type warn_fn: function 3295 @param warn_fn: logging function for warning messages 3296 3297 """ 3298 cluster = self._ConfigData().cluster 3299 if node_uuid in cluster.candidate_certs: 3300 old_cert_digest = cluster.candidate_certs[node_uuid] 3301 if old_cert_digest == cert_digest: 3302 if info_fn is not None: 3303 info_fn("Certificate digest for node %s already in config." 3304 "Not doing anything." % node_uuid) 3305 return 3306 else: 3307 if warn_fn is not None: 3308 warn_fn("Overriding differing certificate digest for node %s" 3309 % node_uuid) 3310 cluster.candidate_certs[node_uuid] = cert_digest
3311 3312 @ConfigSync()
3313 - def RemoveNodeFromCandidateCerts(self, node_uuid, 3314 warn_fn=logging.warn):
3315 """Removes the entry of the given node in the certificate map. 3316 3317 @type node_uuid: string 3318 @param node_uuid: the node's UUID 3319 @type warn_fn: function 3320 @param warn_fn: logging function for warning messages 3321 3322 """ 3323 cluster = self._ConfigData().cluster 3324 if node_uuid not in cluster.candidate_certs: 3325 if warn_fn is not None: 3326 warn_fn("Cannot remove certifcate for node %s, because it's not" 3327 " in the candidate map." % node_uuid) 3328 return 3329 del cluster.candidate_certs[node_uuid]
3330
3331 - def FlushConfig(self):
3332 """Force the distribution of configuration to master candidates. 3333 3334 It is not necessary to hold a lock for this operation, it is handled 3335 internally by WConfd. 3336 3337 """ 3338 if not self._offline: 3339 self._wconfd.FlushConfig()
3340
3341 - def FlushConfigGroup(self, uuid):
3342 """Force the distribution of configuration to master candidates of a group. 3343 3344 It is not necessary to hold a lock for this operation, it is handled 3345 internally by WConfd. 3346 3347 """ 3348 if not self._offline: 3349 self._wconfd.FlushConfigGroup(uuid)
3350 3351 @ConfigSync(shared=1)
3352 - def GetAllDiskInfo(self):
3353 """Get the configuration of all disks. 3354 3355 @rtype: dict 3356 @return: dict of (disk, disk_info), where disk_info is what 3357 would GetDiskInfo return for disk 3358 """ 3359 return self._UnlockedGetAllDiskInfo()
3360
3361 - def _UnlockedGetAllDiskInfo(self):
3362 return dict((disk_uuid, self._UnlockedGetDiskInfo(disk_uuid)) 3363 for disk_uuid in self._UnlockedGetDiskList())
3364 3365 @ConfigSync(shared=1)
3366 - def GetInstanceForDisk(self, disk_uuid):
3367 """Returns the instance the disk is currently attached to. 3368 3369 @type disk_uuid: string 3370 @param disk_uuid: the identifier of the disk in question. 3371 3372 @rtype: string 3373 @return: uuid of instance the disk is attached to. 3374 """ 3375 for inst_uuid, inst_info in self._UnlockedGetAllInstancesInfo().items(): 3376 if disk_uuid in inst_info.disks: 3377 return inst_uuid
3378
3379 3380 -class DetachedConfig(ConfigWriter):
3381 """Read-only snapshot of the config.""" 3382
3383 - def __init__(self, config_data):
3384 super(DetachedConfig, self).__init__(self, offline=True) 3385 self._SetConfigData(config_data)
3386 3387 @staticmethod
3388 - def _WriteCallError():
3389 raise errors.ProgrammerError("DetachedConfig supports only read-only" 3390 " operations")
3391
3392 - def _OpenConfig(self, shared, force=None):
3393 if not shared: 3394 DetachedConfig._WriteCallError()
3395
3396 - def _CloseConfig(self, save):
3397 if save: 3398 DetachedConfig._WriteCallError()
3399