Package ganeti :: Module backend
[hide private]
[frames] | no frames]

Source Code for Module ganeti.backend

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
   5  # 
   6  # This program is free software; you can redistribute it and/or modify 
   7  # it under the terms of the GNU General Public License as published by 
   8  # the Free Software Foundation; either version 2 of the License, or 
   9  # (at your option) any later version. 
  10  # 
  11  # This program is distributed in the hope that it will be useful, but 
  12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
  13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
  14  # General Public License for more details. 
  15  # 
  16  # You should have received a copy of the GNU General Public License 
  17  # along with this program; if not, write to the Free Software 
  18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
  19  # 02110-1301, USA. 
  20   
  21   
  22  """Functions used by the node daemon 
  23   
  24  @var _ALLOWED_UPLOAD_FILES: denotes which files are accepted in 
  25       the L{UploadFile} function 
  26  @var _ALLOWED_CLEAN_DIRS: denotes which directories are accepted 
  27       in the L{_CleanDirectory} function 
  28   
  29  """ 
  30   
  31  # pylint: disable=E1103 
  32   
  33  # E1103: %s %r has no %r member (but some types could not be 
  34  # inferred), because the _TryOSFromDisk returns either (True, os_obj) 
  35  # or (False, "string") which confuses pylint 
  36   
  37   
  38  import os 
  39  import os.path 
  40  import shutil 
  41  import time 
  42  import stat 
  43  import errno 
  44  import re 
  45  import random 
  46  import logging 
  47  import tempfile 
  48  import zlib 
  49  import base64 
  50  import signal 
  51   
  52  from ganeti import errors 
  53  from ganeti import utils 
  54  from ganeti import ssh 
  55  from ganeti import hypervisor 
  56  from ganeti import constants 
  57  from ganeti.storage import bdev 
  58  from ganeti.storage import drbd 
  59  from ganeti.storage import filestorage 
  60  from ganeti import objects 
  61  from ganeti import ssconf 
  62  from ganeti import serializer 
  63  from ganeti import netutils 
  64  from ganeti import runtime 
  65  from ganeti import compat 
  66  from ganeti import pathutils 
  67  from ganeti import vcluster 
  68  from ganeti import ht 
  69  from ganeti.storage.base import BlockDev 
  70  from ganeti.storage.drbd import DRBD8 
  71  from ganeti import hooksmaster 
  72   
  73   
  74  _BOOT_ID_PATH = "/proc/sys/kernel/random/boot_id" 
  75  _ALLOWED_CLEAN_DIRS = compat.UniqueFrozenset([ 
  76    pathutils.DATA_DIR, 
  77    pathutils.JOB_QUEUE_ARCHIVE_DIR, 
  78    pathutils.QUEUE_DIR, 
  79    pathutils.CRYPTO_KEYS_DIR, 
  80    ]) 
  81  _MAX_SSL_CERT_VALIDITY = 7 * 24 * 60 * 60 
  82  _X509_KEY_FILE = "key" 
  83  _X509_CERT_FILE = "cert" 
  84  _IES_STATUS_FILE = "status" 
  85  _IES_PID_FILE = "pid" 
  86  _IES_CA_FILE = "ca" 
  87   
  88  #: Valid LVS output line regex 
  89  _LVSLINE_REGEX = re.compile(r"^ *([^|]+)\|([^|]+)\|([0-9.]+)\|([^|]{6,})\|?$") 
  90   
  91  # Actions for the master setup script 
  92  _MASTER_START = "start" 
  93  _MASTER_STOP = "stop" 
  94   
  95  #: Maximum file permissions for restricted command directory and executables 
  96  _RCMD_MAX_MODE = (stat.S_IRWXU | 
  97                    stat.S_IRGRP | stat.S_IXGRP | 
  98                    stat.S_IROTH | stat.S_IXOTH) 
  99   
 100  #: Delay before returning an error for restricted commands 
 101  _RCMD_INVALID_DELAY = 10 
 102   
 103  #: How long to wait to acquire lock for restricted commands (shorter than 
 104  #: L{_RCMD_INVALID_DELAY}) to reduce blockage of noded forks when many 
 105  #: command requests arrive 
 106  _RCMD_LOCK_TIMEOUT = _RCMD_INVALID_DELAY * 0.8 
107 108 109 -class RPCFail(Exception):
110 """Class denoting RPC failure. 111 112 Its argument is the error message. 113 114 """
115
116 117 -def _GetInstReasonFilename(instance_name):
118 """Path of the file containing the reason of the instance status change. 119 120 @type instance_name: string 121 @param instance_name: The name of the instance 122 @rtype: string 123 @return: The path of the file 124 125 """ 126 return utils.PathJoin(pathutils.INSTANCE_REASON_DIR, instance_name)
127
128 129 -def _StoreInstReasonTrail(instance_name, trail):
130 """Serialize a reason trail related to an instance change of state to file. 131 132 The exact location of the file depends on the name of the instance and on 133 the configuration of the Ganeti cluster defined at deploy time. 134 135 @type instance_name: string 136 @param instance_name: The name of the instance 137 @rtype: None 138 139 """ 140 json = serializer.DumpJson(trail) 141 filename = _GetInstReasonFilename(instance_name) 142 utils.WriteFile(filename, data=json)
143
144 145 -def _Fail(msg, *args, **kwargs):
146 """Log an error and the raise an RPCFail exception. 147 148 This exception is then handled specially in the ganeti daemon and 149 turned into a 'failed' return type. As such, this function is a 150 useful shortcut for logging the error and returning it to the master 151 daemon. 152 153 @type msg: string 154 @param msg: the text of the exception 155 @raise RPCFail 156 157 """ 158 if args: 159 msg = msg % args 160 if "log" not in kwargs or kwargs["log"]: # if we should log this error 161 if "exc" in kwargs and kwargs["exc"]: 162 logging.exception(msg) 163 else: 164 logging.error(msg) 165 raise RPCFail(msg)
166
167 168 -def _GetConfig():
169 """Simple wrapper to return a SimpleStore. 170 171 @rtype: L{ssconf.SimpleStore} 172 @return: a SimpleStore instance 173 174 """ 175 return ssconf.SimpleStore()
176
177 178 -def _GetSshRunner(cluster_name):
179 """Simple wrapper to return an SshRunner. 180 181 @type cluster_name: str 182 @param cluster_name: the cluster name, which is needed 183 by the SshRunner constructor 184 @rtype: L{ssh.SshRunner} 185 @return: an SshRunner instance 186 187 """ 188 return ssh.SshRunner(cluster_name)
189
190 191 -def _Decompress(data):
192 """Unpacks data compressed by the RPC client. 193 194 @type data: list or tuple 195 @param data: Data sent by RPC client 196 @rtype: str 197 @return: Decompressed data 198 199 """ 200 assert isinstance(data, (list, tuple)) 201 assert len(data) == 2 202 (encoding, content) = data 203 if encoding == constants.RPC_ENCODING_NONE: 204 return content 205 elif encoding == constants.RPC_ENCODING_ZLIB_BASE64: 206 return zlib.decompress(base64.b64decode(content)) 207 else: 208 raise AssertionError("Unknown data encoding")
209
210 211 -def _CleanDirectory(path, exclude=None):
212 """Removes all regular files in a directory. 213 214 @type path: str 215 @param path: the directory to clean 216 @type exclude: list 217 @param exclude: list of files to be excluded, defaults 218 to the empty list 219 220 """ 221 if path not in _ALLOWED_CLEAN_DIRS: 222 _Fail("Path passed to _CleanDirectory not in allowed clean targets: '%s'", 223 path) 224 225 if not os.path.isdir(path): 226 return 227 if exclude is None: 228 exclude = [] 229 else: 230 # Normalize excluded paths 231 exclude = [os.path.normpath(i) for i in exclude] 232 233 for rel_name in utils.ListVisibleFiles(path): 234 full_name = utils.PathJoin(path, rel_name) 235 if full_name in exclude: 236 continue 237 if os.path.isfile(full_name) and not os.path.islink(full_name): 238 utils.RemoveFile(full_name)
239
240 241 -def _BuildUploadFileList():
242 """Build the list of allowed upload files. 243 244 This is abstracted so that it's built only once at module import time. 245 246 """ 247 allowed_files = set([ 248 pathutils.CLUSTER_CONF_FILE, 249 pathutils.ETC_HOSTS, 250 pathutils.SSH_KNOWN_HOSTS_FILE, 251 pathutils.VNC_PASSWORD_FILE, 252 pathutils.RAPI_CERT_FILE, 253 pathutils.SPICE_CERT_FILE, 254 pathutils.SPICE_CACERT_FILE, 255 pathutils.RAPI_USERS_FILE, 256 pathutils.CONFD_HMAC_KEY, 257 pathutils.CLUSTER_DOMAIN_SECRET_FILE, 258 ]) 259 260 for hv_name in constants.HYPER_TYPES: 261 hv_class = hypervisor.GetHypervisorClass(hv_name) 262 allowed_files.update(hv_class.GetAncillaryFiles()[0]) 263 264 assert pathutils.FILE_STORAGE_PATHS_FILE not in allowed_files, \ 265 "Allowed file storage paths should never be uploaded via RPC" 266 267 return frozenset(allowed_files)
268 269 270 _ALLOWED_UPLOAD_FILES = _BuildUploadFileList()
271 272 273 -def JobQueuePurge():
274 """Removes job queue files and archived jobs. 275 276 @rtype: tuple 277 @return: True, None 278 279 """ 280 _CleanDirectory(pathutils.QUEUE_DIR, exclude=[pathutils.JOB_QUEUE_LOCK_FILE]) 281 _CleanDirectory(pathutils.JOB_QUEUE_ARCHIVE_DIR)
282
283 284 -def GetMasterInfo():
285 """Returns master information. 286 287 This is an utility function to compute master information, either 288 for consumption here or from the node daemon. 289 290 @rtype: tuple 291 @return: master_netdev, master_ip, master_name, primary_ip_family, 292 master_netmask 293 @raise RPCFail: in case of errors 294 295 """ 296 try: 297 cfg = _GetConfig() 298 master_netdev = cfg.GetMasterNetdev() 299 master_ip = cfg.GetMasterIP() 300 master_netmask = cfg.GetMasterNetmask() 301 master_node = cfg.GetMasterNode() 302 primary_ip_family = cfg.GetPrimaryIPFamily() 303 except errors.ConfigurationError, err: 304 _Fail("Cluster configuration incomplete: %s", err, exc=True) 305 return (master_netdev, master_ip, master_node, primary_ip_family, 306 master_netmask)
307
308 309 -def RunLocalHooks(hook_opcode, hooks_path, env_builder_fn):
310 """Decorator that runs hooks before and after the decorated function. 311 312 @type hook_opcode: string 313 @param hook_opcode: opcode of the hook 314 @type hooks_path: string 315 @param hooks_path: path of the hooks 316 @type env_builder_fn: function 317 @param env_builder_fn: function that returns a dictionary containing the 318 environment variables for the hooks. Will get all the parameters of the 319 decorated function. 320 @raise RPCFail: in case of pre-hook failure 321 322 """ 323 def decorator(fn): 324 def wrapper(*args, **kwargs): 325 _, myself = ssconf.GetMasterAndMyself() 326 nodes = ([myself], [myself]) # these hooks run locally 327 328 env_fn = compat.partial(env_builder_fn, *args, **kwargs) 329 330 cfg = _GetConfig() 331 hr = HooksRunner() 332 hm = hooksmaster.HooksMaster(hook_opcode, hooks_path, nodes, 333 hr.RunLocalHooks, None, env_fn, 334 logging.warning, cfg.GetClusterName(), 335 cfg.GetMasterNode()) 336 hm.RunPhase(constants.HOOKS_PHASE_PRE) 337 result = fn(*args, **kwargs) 338 hm.RunPhase(constants.HOOKS_PHASE_POST) 339 340 return result
341 return wrapper 342 return decorator 343
344 345 -def _BuildMasterIpEnv(master_params, use_external_mip_script=None):
346 """Builds environment variables for master IP hooks. 347 348 @type master_params: L{objects.MasterNetworkParameters} 349 @param master_params: network parameters of the master 350 @type use_external_mip_script: boolean 351 @param use_external_mip_script: whether to use an external master IP 352 address setup script (unused, but necessary per the implementation of the 353 _RunLocalHooks decorator) 354 355 """ 356 # pylint: disable=W0613 357 ver = netutils.IPAddress.GetVersionFromAddressFamily(master_params.ip_family) 358 env = { 359 "MASTER_NETDEV": master_params.netdev, 360 "MASTER_IP": master_params.ip, 361 "MASTER_NETMASK": str(master_params.netmask), 362 "CLUSTER_IP_VERSION": str(ver), 363 } 364 365 return env
366
367 368 -def _RunMasterSetupScript(master_params, action, use_external_mip_script):
369 """Execute the master IP address setup script. 370 371 @type master_params: L{objects.MasterNetworkParameters} 372 @param master_params: network parameters of the master 373 @type action: string 374 @param action: action to pass to the script. Must be one of 375 L{backend._MASTER_START} or L{backend._MASTER_STOP} 376 @type use_external_mip_script: boolean 377 @param use_external_mip_script: whether to use an external master IP 378 address setup script 379 @raise backend.RPCFail: if there are errors during the execution of the 380 script 381 382 """ 383 env = _BuildMasterIpEnv(master_params) 384 385 if use_external_mip_script: 386 setup_script = pathutils.EXTERNAL_MASTER_SETUP_SCRIPT 387 else: 388 setup_script = pathutils.DEFAULT_MASTER_SETUP_SCRIPT 389 390 result = utils.RunCmd([setup_script, action], env=env, reset_env=True) 391 392 if result.failed: 393 _Fail("Failed to %s the master IP. Script return value: %s, output: '%s'" % 394 (action, result.exit_code, result.output), log=True)
395 396 397 @RunLocalHooks(constants.FAKE_OP_MASTER_TURNUP, "master-ip-turnup", 398 _BuildMasterIpEnv)
399 -def ActivateMasterIp(master_params, use_external_mip_script):
400 """Activate the IP address of the master daemon. 401 402 @type master_params: L{objects.MasterNetworkParameters} 403 @param master_params: network parameters of the master 404 @type use_external_mip_script: boolean 405 @param use_external_mip_script: whether to use an external master IP 406 address setup script 407 @raise RPCFail: in case of errors during the IP startup 408 409 """ 410 _RunMasterSetupScript(master_params, _MASTER_START, 411 use_external_mip_script)
412
413 414 -def StartMasterDaemons(no_voting):
415 """Activate local node as master node. 416 417 The function will start the master daemons (ganeti-masterd and ganeti-rapi). 418 419 @type no_voting: boolean 420 @param no_voting: whether to start ganeti-masterd without a node vote 421 but still non-interactively 422 @rtype: None 423 424 """ 425 426 if no_voting: 427 masterd_args = "--no-voting --yes-do-it" 428 else: 429 masterd_args = "" 430 431 env = { 432 "EXTRA_MASTERD_ARGS": masterd_args, 433 } 434 435 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-master"], env=env) 436 if result.failed: 437 msg = "Can't start Ganeti master: %s" % result.output 438 logging.error(msg) 439 _Fail(msg)
440 441 442 @RunLocalHooks(constants.FAKE_OP_MASTER_TURNDOWN, "master-ip-turndown", 443 _BuildMasterIpEnv)
444 -def DeactivateMasterIp(master_params, use_external_mip_script):
445 """Deactivate the master IP on this node. 446 447 @type master_params: L{objects.MasterNetworkParameters} 448 @param master_params: network parameters of the master 449 @type use_external_mip_script: boolean 450 @param use_external_mip_script: whether to use an external master IP 451 address setup script 452 @raise RPCFail: in case of errors during the IP turndown 453 454 """ 455 _RunMasterSetupScript(master_params, _MASTER_STOP, 456 use_external_mip_script)
457
458 459 -def StopMasterDaemons():
460 """Stop the master daemons on this node. 461 462 Stop the master daemons (ganeti-masterd and ganeti-rapi) on this node. 463 464 @rtype: None 465 466 """ 467 # TODO: log and report back to the caller the error failures; we 468 # need to decide in which case we fail the RPC for this 469 470 result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-master"]) 471 if result.failed: 472 logging.error("Could not stop Ganeti master, command %s had exitcode %s" 473 " and error %s", 474 result.cmd, result.exit_code, result.output)
475
476 477 -def ChangeMasterNetmask(old_netmask, netmask, master_ip, master_netdev):
478 """Change the netmask of the master IP. 479 480 @param old_netmask: the old value of the netmask 481 @param netmask: the new value of the netmask 482 @param master_ip: the master IP 483 @param master_netdev: the master network device 484 485 """ 486 if old_netmask == netmask: 487 return 488 489 if not netutils.IPAddress.Own(master_ip): 490 _Fail("The master IP address is not up, not attempting to change its" 491 " netmask") 492 493 result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "add", 494 "%s/%s" % (master_ip, netmask), 495 "dev", master_netdev, "label", 496 "%s:0" % master_netdev]) 497 if result.failed: 498 _Fail("Could not set the new netmask on the master IP address") 499 500 result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "del", 501 "%s/%s" % (master_ip, old_netmask), 502 "dev", master_netdev, "label", 503 "%s:0" % master_netdev]) 504 if result.failed: 505 _Fail("Could not bring down the master IP address with the old netmask")
506
507 508 -def EtcHostsModify(mode, host, ip):
509 """Modify a host entry in /etc/hosts. 510 511 @param mode: The mode to operate. Either add or remove entry 512 @param host: The host to operate on 513 @param ip: The ip associated with the entry 514 515 """ 516 if mode == constants.ETC_HOSTS_ADD: 517 if not ip: 518 RPCFail("Mode 'add' needs 'ip' parameter, but parameter not" 519 " present") 520 utils.AddHostToEtcHosts(host, ip) 521 elif mode == constants.ETC_HOSTS_REMOVE: 522 if ip: 523 RPCFail("Mode 'remove' does not allow 'ip' parameter, but" 524 " parameter is present") 525 utils.RemoveHostFromEtcHosts(host) 526 else: 527 RPCFail("Mode not supported")
528
529 530 -def LeaveCluster(modify_ssh_setup):
531 """Cleans up and remove the current node. 532 533 This function cleans up and prepares the current node to be removed 534 from the cluster. 535 536 If processing is successful, then it raises an 537 L{errors.QuitGanetiException} which is used as a special case to 538 shutdown the node daemon. 539 540 @param modify_ssh_setup: boolean 541 542 """ 543 _CleanDirectory(pathutils.DATA_DIR) 544 _CleanDirectory(pathutils.CRYPTO_KEYS_DIR) 545 JobQueuePurge() 546 547 if modify_ssh_setup: 548 try: 549 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.SSH_LOGIN_USER) 550 551 utils.RemoveAuthorizedKey(auth_keys, utils.ReadFile(pub_key)) 552 553 utils.RemoveFile(priv_key) 554 utils.RemoveFile(pub_key) 555 except errors.OpExecError: 556 logging.exception("Error while processing ssh files") 557 558 try: 559 utils.RemoveFile(pathutils.CONFD_HMAC_KEY) 560 utils.RemoveFile(pathutils.RAPI_CERT_FILE) 561 utils.RemoveFile(pathutils.SPICE_CERT_FILE) 562 utils.RemoveFile(pathutils.SPICE_CACERT_FILE) 563 utils.RemoveFile(pathutils.NODED_CERT_FILE) 564 except: # pylint: disable=W0702 565 logging.exception("Error while removing cluster secrets") 566 567 utils.StopDaemon(constants.CONFD) 568 utils.StopDaemon(constants.MOND) 569 570 # Raise a custom exception (handled in ganeti-noded) 571 raise errors.QuitGanetiException(True, "Shutdown scheduled")
572
573 574 -def _CheckStorageParams(params, num_params):
575 """Performs sanity checks for storage parameters. 576 577 @type params: list 578 @param params: list of storage parameters 579 @type num_params: int 580 @param num_params: expected number of parameters 581 582 """ 583 if params is None: 584 raise errors.ProgrammerError("No storage parameters for storage" 585 " reporting is provided.") 586 if not isinstance(params, list): 587 raise errors.ProgrammerError("The storage parameters are not of type" 588 " list: '%s'" % params) 589 if not len(params) == num_params: 590 raise errors.ProgrammerError("Did not receive the expected number of" 591 "storage parameters: expected %s," 592 " received '%s'" % (num_params, len(params)))
593
594 595 -def _CheckLvmStorageParams(params):
596 """Performs sanity check for the 'exclusive storage' flag. 597 598 @see: C{_CheckStorageParams} 599 600 """ 601 _CheckStorageParams(params, 1) 602 excl_stor = params[0] 603 if not isinstance(params[0], bool): 604 raise errors.ProgrammerError("Exclusive storage parameter is not" 605 " boolean: '%s'." % excl_stor) 606 return excl_stor
607
608 609 -def _GetLvmVgSpaceInfo(name, params):
610 """Wrapper around C{_GetVgInfo} which checks the storage parameters. 611 612 @type name: string 613 @param name: name of the volume group 614 @type params: list 615 @param params: list of storage parameters, which in this case should be 616 containing only one for exclusive storage 617 618 """ 619 excl_stor = _CheckLvmStorageParams(params) 620 return _GetVgInfo(name, excl_stor)
621
622 623 -def _GetVgInfo( 624 name, excl_stor, info_fn=bdev.LogicalVolume.GetVGInfo):
625 """Retrieves information about a LVM volume group. 626 627 """ 628 # TODO: GetVGInfo supports returning information for multiple VGs at once 629 vginfo = info_fn([name], excl_stor) 630 if vginfo: 631 vg_free = int(round(vginfo[0][0], 0)) 632 vg_size = int(round(vginfo[0][1], 0)) 633 else: 634 vg_free = None 635 vg_size = None 636 637 return { 638 "type": constants.ST_LVM_VG, 639 "name": name, 640 "storage_free": vg_free, 641 "storage_size": vg_size, 642 }
643
644 645 -def _GetLvmPvSpaceInfo(name, params):
646 """Wrapper around C{_GetVgSpindlesInfo} with sanity checks. 647 648 @see: C{_GetLvmVgSpaceInfo} 649 650 """ 651 excl_stor = _CheckLvmStorageParams(params) 652 return _GetVgSpindlesInfo(name, excl_stor)
653
654 655 -def _GetVgSpindlesInfo( 656 name, excl_stor, info_fn=bdev.LogicalVolume.GetVgSpindlesInfo):
657 """Retrieves information about spindles in an LVM volume group. 658 659 @type name: string 660 @param name: VG name 661 @type excl_stor: bool 662 @param excl_stor: exclusive storage 663 @rtype: dict 664 @return: dictionary whose keys are "name", "vg_free", "vg_size" for VG name, 665 free spindles, total spindles respectively 666 667 """ 668 if excl_stor: 669 (vg_free, vg_size) = info_fn(name) 670 else: 671 vg_free = 0 672 vg_size = 0 673 return { 674 "type": constants.ST_LVM_PV, 675 "name": name, 676 "storage_free": vg_free, 677 "storage_size": vg_size, 678 }
679
680 681 -def _GetHvInfo(name, hvparams, get_hv_fn=hypervisor.GetHypervisor):
682 """Retrieves node information from a hypervisor. 683 684 The information returned depends on the hypervisor. Common items: 685 686 - vg_size is the size of the configured volume group in MiB 687 - vg_free is the free size of the volume group in MiB 688 - memory_dom0 is the memory allocated for domain0 in MiB 689 - memory_free is the currently available (free) ram in MiB 690 - memory_total is the total number of ram in MiB 691 - hv_version: the hypervisor version, if available 692 693 @type hvparams: dict of string 694 @param hvparams: the hypervisor's hvparams 695 696 """ 697 return get_hv_fn(name).GetNodeInfo(hvparams=hvparams)
698
699 700 -def _GetHvInfoAll(hv_specs, get_hv_fn=hypervisor.GetHypervisor):
701 """Retrieves node information for all hypervisors. 702 703 See C{_GetHvInfo} for information on the output. 704 705 @type hv_specs: list of pairs (string, dict of strings) 706 @param hv_specs: list of pairs of a hypervisor's name and its hvparams 707 708 """ 709 if hv_specs is None: 710 return None 711 712 result = [] 713 for hvname, hvparams in hv_specs: 714 result.append(_GetHvInfo(hvname, hvparams, get_hv_fn)) 715 return result
716
717 718 -def _GetNamedNodeInfo(names, fn):
719 """Calls C{fn} for all names in C{names} and returns a dictionary. 720 721 @rtype: None or dict 722 723 """ 724 if names is None: 725 return None 726 else: 727 return map(fn, names)
728
729 730 -def GetNodeInfo(storage_units, hv_specs):
731 """Gives back a hash with different information about the node. 732 733 @type storage_units: list of tuples (string, string, list) 734 @param storage_units: List of tuples (storage unit, identifier, parameters) to 735 ask for disk space information. In case of lvm-vg, the identifier is 736 the VG name. The parameters can contain additional, storage-type-specific 737 parameters, for example exclusive storage for lvm storage. 738 @type hv_specs: list of pairs (string, dict of strings) 739 @param hv_specs: list of pairs of a hypervisor's name and its hvparams 740 @rtype: tuple; (string, None/dict, None/dict) 741 @return: Tuple containing boot ID, volume group information and hypervisor 742 information 743 744 """ 745 bootid = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n") 746 storage_info = _GetNamedNodeInfo( 747 storage_units, 748 (lambda (storage_type, storage_key, storage_params): 749 _ApplyStorageInfoFunction(storage_type, storage_key, storage_params))) 750 hv_info = _GetHvInfoAll(hv_specs) 751 return (bootid, storage_info, hv_info)
752
753 754 -def _GetFileStorageSpaceInfo(path, params):
755 """Wrapper around filestorage.GetSpaceInfo. 756 757 The purpose of this wrapper is to call filestorage.GetFileStorageSpaceInfo 758 and ignore the *args parameter to not leak it into the filestorage 759 module's code. 760 761 @see: C{filestorage.GetFileStorageSpaceInfo} for description of the 762 parameters. 763 764 """ 765 _CheckStorageParams(params, 0) 766 return filestorage.GetFileStorageSpaceInfo(path)
767 768 769 # FIXME: implement storage reporting for all missing storage types. 770 _STORAGE_TYPE_INFO_FN = { 771 constants.ST_BLOCK: None, 772 constants.ST_DISKLESS: None, 773 constants.ST_EXT: None, 774 constants.ST_FILE: _GetFileStorageSpaceInfo, 775 constants.ST_LVM_PV: _GetLvmPvSpaceInfo, 776 constants.ST_LVM_VG: _GetLvmVgSpaceInfo, 777 constants.ST_RADOS: None, 778 }
779 780 781 -def _ApplyStorageInfoFunction(storage_type, storage_key, *args):
782 """Looks up and applies the correct function to calculate free and total 783 storage for the given storage type. 784 785 @type storage_type: string 786 @param storage_type: the storage type for which the storage shall be reported. 787 @type storage_key: string 788 @param storage_key: identifier of a storage unit, e.g. the volume group name 789 of an LVM storage unit 790 @type args: any 791 @param args: various parameters that can be used for storage reporting. These 792 parameters and their semantics vary from storage type to storage type and 793 are just propagated in this function. 794 @return: the results of the application of the storage space function (see 795 _STORAGE_TYPE_INFO_FN) if storage space reporting is implemented for that 796 storage type 797 @raises NotImplementedError: for storage types who don't support space 798 reporting yet 799 """ 800 fn = _STORAGE_TYPE_INFO_FN[storage_type] 801 if fn is not None: 802 return fn(storage_key, *args) 803 else: 804 raise NotImplementedError
805
806 807 -def _CheckExclusivePvs(pvi_list):
808 """Check that PVs are not shared among LVs 809 810 @type pvi_list: list of L{objects.LvmPvInfo} objects 811 @param pvi_list: information about the PVs 812 813 @rtype: list of tuples (string, list of strings) 814 @return: offending volumes, as tuples: (pv_name, [lv1_name, lv2_name...]) 815 816 """ 817 res = [] 818 for pvi in pvi_list: 819 if len(pvi.lv_list) > 1: 820 res.append((pvi.name, pvi.lv_list)) 821 return res
822
823 824 -def _VerifyHypervisors(what, vm_capable, result, all_hvparams, 825 get_hv_fn=hypervisor.GetHypervisor):
826 """Verifies the hypervisor. Appends the results to the 'results' list. 827 828 @type what: C{dict} 829 @param what: a dictionary of things to check 830 @type vm_capable: boolean 831 @param vm_capable: whether or not this node is vm capable 832 @type result: dict 833 @param result: dictionary of verification results; results of the 834 verifications in this function will be added here 835 @type all_hvparams: dict of dict of string 836 @param all_hvparams: dictionary mapping hypervisor names to hvparams 837 @type get_hv_fn: function 838 @param get_hv_fn: function to retrieve the hypervisor, to improve testability 839 840 """ 841 if not vm_capable: 842 return 843 844 if constants.NV_HYPERVISOR in what: 845 result[constants.NV_HYPERVISOR] = {} 846 for hv_name in what[constants.NV_HYPERVISOR]: 847 hvparams = all_hvparams[hv_name] 848 try: 849 val = get_hv_fn(hv_name).Verify(hvparams=hvparams) 850 except errors.HypervisorError, err: 851 val = "Error while checking hypervisor: %s" % str(err) 852 result[constants.NV_HYPERVISOR][hv_name] = val
853
854 855 -def _VerifyHvparams(what, vm_capable, result, 856 get_hv_fn=hypervisor.GetHypervisor):
857 """Verifies the hvparams. Appends the results to the 'results' list. 858 859 @type what: C{dict} 860 @param what: a dictionary of things to check 861 @type vm_capable: boolean 862 @param vm_capable: whether or not this node is vm capable 863 @type result: dict 864 @param result: dictionary of verification results; results of the 865 verifications in this function will be added here 866 @type get_hv_fn: function 867 @param get_hv_fn: function to retrieve the hypervisor, to improve testability 868 869 """ 870 if not vm_capable: 871 return 872 873 if constants.NV_HVPARAMS in what: 874 result[constants.NV_HVPARAMS] = [] 875 for source, hv_name, hvparms in what[constants.NV_HVPARAMS]: 876 try: 877 logging.info("Validating hv %s, %s", hv_name, hvparms) 878 get_hv_fn(hv_name).ValidateParameters(hvparms) 879 except errors.HypervisorError, err: 880 result[constants.NV_HVPARAMS].append((source, hv_name, str(err)))
881
882 883 -def _VerifyInstanceList(what, vm_capable, result, all_hvparams):
884 """Verifies the instance list. 885 886 @type what: C{dict} 887 @param what: a dictionary of things to check 888 @type vm_capable: boolean 889 @param vm_capable: whether or not this node is vm capable 890 @type result: dict 891 @param result: dictionary of verification results; results of the 892 verifications in this function will be added here 893 @type all_hvparams: dict of dict of string 894 @param all_hvparams: dictionary mapping hypervisor names to hvparams 895 896 """ 897 if constants.NV_INSTANCELIST in what and vm_capable: 898 # GetInstanceList can fail 899 try: 900 val = GetInstanceList(what[constants.NV_INSTANCELIST], 901 all_hvparams=all_hvparams) 902 except RPCFail, err: 903 val = str(err) 904 result[constants.NV_INSTANCELIST] = val
905
906 907 -def _VerifyNodeInfo(what, vm_capable, result, all_hvparams):
908 """Verifies the node info. 909 910 @type what: C{dict} 911 @param what: a dictionary of things to check 912 @type vm_capable: boolean 913 @param vm_capable: whether or not this node is vm capable 914 @type result: dict 915 @param result: dictionary of verification results; results of the 916 verifications in this function will be added here 917 @type all_hvparams: dict of dict of string 918 @param all_hvparams: dictionary mapping hypervisor names to hvparams 919 920 """ 921 if constants.NV_HVINFO in what and vm_capable: 922 hvname = what[constants.NV_HVINFO] 923 hyper = hypervisor.GetHypervisor(hvname) 924 hvparams = all_hvparams[hvname] 925 result[constants.NV_HVINFO] = hyper.GetNodeInfo(hvparams=hvparams)
926
927 928 -def VerifyNode(what, cluster_name, all_hvparams):
929 """Verify the status of the local node. 930 931 Based on the input L{what} parameter, various checks are done on the 932 local node. 933 934 If the I{filelist} key is present, this list of 935 files is checksummed and the file/checksum pairs are returned. 936 937 If the I{nodelist} key is present, we check that we have 938 connectivity via ssh with the target nodes (and check the hostname 939 report). 940 941 If the I{node-net-test} key is present, we check that we have 942 connectivity to the given nodes via both primary IP and, if 943 applicable, secondary IPs. 944 945 @type what: C{dict} 946 @param what: a dictionary of things to check: 947 - filelist: list of files for which to compute checksums 948 - nodelist: list of nodes we should check ssh communication with 949 - node-net-test: list of nodes we should check node daemon port 950 connectivity with 951 - hypervisor: list with hypervisors to run the verify for 952 @type cluster_name: string 953 @param cluster_name: the cluster's name 954 @type all_hvparams: dict of dict of strings 955 @param all_hvparams: a dictionary mapping hypervisor names to hvparams 956 @rtype: dict 957 @return: a dictionary with the same keys as the input dict, and 958 values representing the result of the checks 959 960 """ 961 result = {} 962 my_name = netutils.Hostname.GetSysName() 963 port = netutils.GetDaemonPort(constants.NODED) 964 vm_capable = my_name not in what.get(constants.NV_VMNODES, []) 965 966 _VerifyHypervisors(what, vm_capable, result, all_hvparams) 967 _VerifyHvparams(what, vm_capable, result) 968 969 if constants.NV_FILELIST in what: 970 fingerprints = utils.FingerprintFiles(map(vcluster.LocalizeVirtualPath, 971 what[constants.NV_FILELIST])) 972 result[constants.NV_FILELIST] = \ 973 dict((vcluster.MakeVirtualPath(key), value) 974 for (key, value) in fingerprints.items()) 975 976 if constants.NV_NODELIST in what: 977 (nodes, bynode) = what[constants.NV_NODELIST] 978 979 # Add nodes from other groups (different for each node) 980 try: 981 nodes.extend(bynode[my_name]) 982 except KeyError: 983 pass 984 985 # Use a random order 986 random.shuffle(nodes) 987 988 # Try to contact all nodes 989 val = {} 990 for node in nodes: 991 success, message = _GetSshRunner(cluster_name).VerifyNodeHostname(node) 992 if not success: 993 val[node] = message 994 995 result[constants.NV_NODELIST] = val 996 997 if constants.NV_NODENETTEST in what: 998 result[constants.NV_NODENETTEST] = tmp = {} 999 my_pip = my_sip = None 1000 for name, pip, sip in what[constants.NV_NODENETTEST]: 1001 if name == my_name: 1002 my_pip = pip 1003 my_sip = sip 1004 break 1005 if not my_pip: 1006 tmp[my_name] = ("Can't find my own primary/secondary IP" 1007 " in the node list") 1008 else: 1009 for name, pip, sip in what[constants.NV_NODENETTEST]: 1010 fail = [] 1011 if not netutils.TcpPing(pip, port, source=my_pip): 1012 fail.append("primary") 1013 if sip != pip: 1014 if not netutils.TcpPing(sip, port, source=my_sip): 1015 fail.append("secondary") 1016 if fail: 1017 tmp[name] = ("failure using the %s interface(s)" % 1018 " and ".join(fail)) 1019 1020 if constants.NV_MASTERIP in what: 1021 # FIXME: add checks on incoming data structures (here and in the 1022 # rest of the function) 1023 master_name, master_ip = what[constants.NV_MASTERIP] 1024 if master_name == my_name: 1025 source = constants.IP4_ADDRESS_LOCALHOST 1026 else: 1027 source = None 1028 result[constants.NV_MASTERIP] = netutils.TcpPing(master_ip, port, 1029 source=source) 1030 1031 if constants.NV_USERSCRIPTS in what: 1032 result[constants.NV_USERSCRIPTS] = \ 1033 [script for script in what[constants.NV_USERSCRIPTS] 1034 if not utils.IsExecutable(script)] 1035 1036 if constants.NV_OOB_PATHS in what: 1037 result[constants.NV_OOB_PATHS] = tmp = [] 1038 for path in what[constants.NV_OOB_PATHS]: 1039 try: 1040 st = os.stat(path) 1041 except OSError, err: 1042 tmp.append("error stating out of band helper: %s" % err) 1043 else: 1044 if stat.S_ISREG(st.st_mode): 1045 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR: 1046 tmp.append(None) 1047 else: 1048 tmp.append("out of band helper %s is not executable" % path) 1049 else: 1050 tmp.append("out of band helper %s is not a file" % path) 1051 1052 if constants.NV_LVLIST in what and vm_capable: 1053 try: 1054 val = GetVolumeList(utils.ListVolumeGroups().keys()) 1055 except RPCFail, err: 1056 val = str(err) 1057 result[constants.NV_LVLIST] = val 1058 1059 _VerifyInstanceList(what, vm_capable, result, all_hvparams) 1060 1061 if constants.NV_VGLIST in what and vm_capable: 1062 result[constants.NV_VGLIST] = utils.ListVolumeGroups() 1063 1064 if constants.NV_PVLIST in what and vm_capable: 1065 check_exclusive_pvs = constants.NV_EXCLUSIVEPVS in what 1066 val = bdev.LogicalVolume.GetPVInfo(what[constants.NV_PVLIST], 1067 filter_allocatable=False, 1068 include_lvs=check_exclusive_pvs) 1069 if check_exclusive_pvs: 1070 result[constants.NV_EXCLUSIVEPVS] = _CheckExclusivePvs(val) 1071 for pvi in val: 1072 # Avoid sending useless data on the wire 1073 pvi.lv_list = [] 1074 result[constants.NV_PVLIST] = map(objects.LvmPvInfo.ToDict, val) 1075 1076 if constants.NV_VERSION in what: 1077 result[constants.NV_VERSION] = (constants.PROTOCOL_VERSION, 1078 constants.RELEASE_VERSION) 1079 1080 _VerifyNodeInfo(what, vm_capable, result, all_hvparams) 1081 1082 if constants.NV_DRBDVERSION in what and vm_capable: 1083 try: 1084 drbd_version = DRBD8.GetProcInfo().GetVersionString() 1085 except errors.BlockDeviceError, err: 1086 logging.warning("Can't get DRBD version", exc_info=True) 1087 drbd_version = str(err) 1088 result[constants.NV_DRBDVERSION] = drbd_version 1089 1090 if constants.NV_DRBDLIST in what and vm_capable: 1091 try: 1092 used_minors = drbd.DRBD8.GetUsedDevs() 1093 except errors.BlockDeviceError, err: 1094 logging.warning("Can't get used minors list", exc_info=True) 1095 used_minors = str(err) 1096 result[constants.NV_DRBDLIST] = used_minors 1097 1098 if constants.NV_DRBDHELPER in what and vm_capable: 1099 status = True 1100 try: 1101 payload = drbd.DRBD8.GetUsermodeHelper() 1102 except errors.BlockDeviceError, err: 1103 logging.error("Can't get DRBD usermode helper: %s", str(err)) 1104 status = False 1105 payload = str(err) 1106 result[constants.NV_DRBDHELPER] = (status, payload) 1107 1108 if constants.NV_NODESETUP in what: 1109 result[constants.NV_NODESETUP] = tmpr = [] 1110 if not os.path.isdir("/sys/block") or not os.path.isdir("/sys/class/net"): 1111 tmpr.append("The sysfs filesytem doesn't seem to be mounted" 1112 " under /sys, missing required directories /sys/block" 1113 " and /sys/class/net") 1114 if (not os.path.isdir("/proc/sys") or 1115 not os.path.isfile("/proc/sysrq-trigger")): 1116 tmpr.append("The procfs filesystem doesn't seem to be mounted" 1117 " under /proc, missing required directory /proc/sys and" 1118 " the file /proc/sysrq-trigger") 1119 1120 if constants.NV_TIME in what: 1121 result[constants.NV_TIME] = utils.SplitTime(time.time()) 1122 1123 if constants.NV_OSLIST in what and vm_capable: 1124 result[constants.NV_OSLIST] = DiagnoseOS() 1125 1126 if constants.NV_BRIDGES in what and vm_capable: 1127 result[constants.NV_BRIDGES] = [bridge 1128 for bridge in what[constants.NV_BRIDGES] 1129 if not utils.BridgeExists(bridge)] 1130 1131 if what.get(constants.NV_ACCEPTED_STORAGE_PATHS) == my_name: 1132 result[constants.NV_ACCEPTED_STORAGE_PATHS] = \ 1133 filestorage.ComputeWrongFileStoragePaths() 1134 1135 if what.get(constants.NV_FILE_STORAGE_PATH): 1136 pathresult = filestorage.CheckFileStoragePath( 1137 what[constants.NV_FILE_STORAGE_PATH]) 1138 if pathresult: 1139 result[constants.NV_FILE_STORAGE_PATH] = pathresult 1140 1141 if what.get(constants.NV_SHARED_FILE_STORAGE_PATH): 1142 pathresult = filestorage.CheckFileStoragePath( 1143 what[constants.NV_SHARED_FILE_STORAGE_PATH]) 1144 if pathresult: 1145 result[constants.NV_SHARED_FILE_STORAGE_PATH] = pathresult 1146 1147 return result
1148
1149 1150 -def GetBlockDevSizes(devices):
1151 """Return the size of the given block devices 1152 1153 @type devices: list 1154 @param devices: list of block device nodes to query 1155 @rtype: dict 1156 @return: 1157 dictionary of all block devices under /dev (key). The value is their 1158 size in MiB. 1159 1160 {'/dev/disk/by-uuid/123456-12321231-312312-312': 124} 1161 1162 """ 1163 DEV_PREFIX = "/dev/" 1164 blockdevs = {} 1165 1166 for devpath in devices: 1167 if not utils.IsBelowDir(DEV_PREFIX, devpath): 1168 continue 1169 1170 try: 1171 st = os.stat(devpath) 1172 except EnvironmentError, err: 1173 logging.warning("Error stat()'ing device %s: %s", devpath, str(err)) 1174 continue 1175 1176 if stat.S_ISBLK(st.st_mode): 1177 result = utils.RunCmd(["blockdev", "--getsize64", devpath]) 1178 if result.failed: 1179 # We don't want to fail, just do not list this device as available 1180 logging.warning("Cannot get size for block device %s", devpath) 1181 continue 1182 1183 size = int(result.stdout) / (1024 * 1024) 1184 blockdevs[devpath] = size 1185 return blockdevs
1186
1187 1188 -def GetVolumeList(vg_names):
1189 """Compute list of logical volumes and their size. 1190 1191 @type vg_names: list 1192 @param vg_names: the volume groups whose LVs we should list, or 1193 empty for all volume groups 1194 @rtype: dict 1195 @return: 1196 dictionary of all partions (key) with value being a tuple of 1197 their size (in MiB), inactive and online status:: 1198 1199 {'xenvg/test1': ('20.06', True, True)} 1200 1201 in case of errors, a string is returned with the error 1202 details. 1203 1204 """ 1205 lvs = {} 1206 sep = "|" 1207 if not vg_names: 1208 vg_names = [] 1209 result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix", 1210 "--separator=%s" % sep, 1211 "-ovg_name,lv_name,lv_size,lv_attr"] + vg_names) 1212 if result.failed: 1213 _Fail("Failed to list logical volumes, lvs output: %s", result.output) 1214 1215 for line in result.stdout.splitlines(): 1216 line = line.strip() 1217 match = _LVSLINE_REGEX.match(line) 1218 if not match: 1219 logging.error("Invalid line returned from lvs output: '%s'", line) 1220 continue 1221 vg_name, name, size, attr = match.groups() 1222 inactive = attr[4] == "-" 1223 online = attr[5] == "o" 1224 virtual = attr[0] == "v" 1225 if virtual: 1226 # we don't want to report such volumes as existing, since they 1227 # don't really hold data 1228 continue 1229 lvs[vg_name + "/" + name] = (size, inactive, online) 1230 1231 return lvs
1232
1233 1234 -def ListVolumeGroups():
1235 """List the volume groups and their size. 1236 1237 @rtype: dict 1238 @return: dictionary with keys volume name and values the 1239 size of the volume 1240 1241 """ 1242 return utils.ListVolumeGroups()
1243
1244 1245 -def NodeVolumes():
1246 """List all volumes on this node. 1247 1248 @rtype: list 1249 @return: 1250 A list of dictionaries, each having four keys: 1251 - name: the logical volume name, 1252 - size: the size of the logical volume 1253 - dev: the physical device on which the LV lives 1254 - vg: the volume group to which it belongs 1255 1256 In case of errors, we return an empty list and log the 1257 error. 1258 1259 Note that since a logical volume can live on multiple physical 1260 volumes, the resulting list might include a logical volume 1261 multiple times. 1262 1263 """ 1264 result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix", 1265 "--separator=|", 1266 "--options=lv_name,lv_size,devices,vg_name"]) 1267 if result.failed: 1268 _Fail("Failed to list logical volumes, lvs output: %s", 1269 result.output) 1270 1271 def parse_dev(dev): 1272 return dev.split("(")[0]
1273 1274 def handle_dev(dev): 1275 return [parse_dev(x) for x in dev.split(",")] 1276 1277 def map_line(line): 1278 line = [v.strip() for v in line] 1279 return [{"name": line[0], "size": line[1], 1280 "dev": dev, "vg": line[3]} for dev in handle_dev(line[2])] 1281 1282 all_devs = [] 1283 for line in result.stdout.splitlines(): 1284 if line.count("|") >= 3: 1285 all_devs.extend(map_line(line.split("|"))) 1286 else: 1287 logging.warning("Strange line in the output from lvs: '%s'", line) 1288 return all_devs 1289
1290 1291 -def BridgesExist(bridges_list):
1292 """Check if a list of bridges exist on the current node. 1293 1294 @rtype: boolean 1295 @return: C{True} if all of them exist, C{False} otherwise 1296 1297 """ 1298 missing = [] 1299 for bridge in bridges_list: 1300 if not utils.BridgeExists(bridge): 1301 missing.append(bridge) 1302 1303 if missing: 1304 _Fail("Missing bridges %s", utils.CommaJoin(missing))
1305
1306 1307 -def GetInstanceListForHypervisor(hname, hvparams=None, 1308 get_hv_fn=hypervisor.GetHypervisor):
1309 """Provides a list of instances of the given hypervisor. 1310 1311 @type hname: string 1312 @param hname: name of the hypervisor 1313 @type hvparams: dict of strings 1314 @param hvparams: hypervisor parameters for the given hypervisor 1315 @type get_hv_fn: function 1316 @param get_hv_fn: function that returns a hypervisor for the given hypervisor 1317 name; optional parameter to increase testability 1318 1319 @rtype: list 1320 @return: a list of all running instances on the current node 1321 - instance1.example.com 1322 - instance2.example.com 1323 1324 """ 1325 results = [] 1326 try: 1327 hv = get_hv_fn(hname) 1328 names = hv.ListInstances(hvparams=hvparams) 1329 results.extend(names) 1330 except errors.HypervisorError, err: 1331 _Fail("Error enumerating instances (hypervisor %s): %s", 1332 hname, err, exc=True) 1333 return results
1334
1335 1336 -def GetInstanceList(hypervisor_list, all_hvparams=None, 1337 get_hv_fn=hypervisor.GetHypervisor):
1338 """Provides a list of instances. 1339 1340 @type hypervisor_list: list 1341 @param hypervisor_list: the list of hypervisors to query information 1342 @type all_hvparams: dict of dict of strings 1343 @param all_hvparams: a dictionary mapping hypervisor types to respective 1344 cluster-wide hypervisor parameters 1345 @type get_hv_fn: function 1346 @param get_hv_fn: function that returns a hypervisor for the given hypervisor 1347 name; optional parameter to increase testability 1348 1349 @rtype: list 1350 @return: a list of all running instances on the current node 1351 - instance1.example.com 1352 - instance2.example.com 1353 1354 """ 1355 results = [] 1356 for hname in hypervisor_list: 1357 hvparams = all_hvparams[hname] 1358 results.extend(GetInstanceListForHypervisor(hname, hvparams=hvparams, 1359 get_hv_fn=get_hv_fn)) 1360 return results
1361
1362 1363 -def GetInstanceInfo(instance, hname, hvparams=None):
1364 """Gives back the information about an instance as a dictionary. 1365 1366 @type instance: string 1367 @param instance: the instance name 1368 @type hname: string 1369 @param hname: the hypervisor type of the instance 1370 @type hvparams: dict of strings 1371 @param hvparams: the instance's hvparams 1372 1373 @rtype: dict 1374 @return: dictionary with the following keys: 1375 - memory: memory size of instance (int) 1376 - state: xen state of instance (string) 1377 - time: cpu time of instance (float) 1378 - vcpus: the number of vcpus (int) 1379 1380 """ 1381 output = {} 1382 1383 iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance, 1384 hvparams=hvparams) 1385 if iinfo is not None: 1386 output["memory"] = iinfo[2] 1387 output["vcpus"] = iinfo[3] 1388 output["state"] = iinfo[4] 1389 output["time"] = iinfo[5] 1390 1391 return output
1392
1393 1394 -def GetInstanceMigratable(instance):
1395 """Computes whether an instance can be migrated. 1396 1397 @type instance: L{objects.Instance} 1398 @param instance: object representing the instance to be checked. 1399 1400 @rtype: tuple 1401 @return: tuple of (result, description) where: 1402 - result: whether the instance can be migrated or not 1403 - description: a description of the issue, if relevant 1404 1405 """ 1406 hyper = hypervisor.GetHypervisor(instance.hypervisor) 1407 iname = instance.name 1408 if iname not in hyper.ListInstances(instance.hvparams): 1409 _Fail("Instance %s is not running", iname) 1410 1411 for idx in range(len(instance.disks)): 1412 link_name = _GetBlockDevSymlinkPath(iname, idx) 1413 if not os.path.islink(link_name): 1414 logging.warning("Instance %s is missing symlink %s for disk %d", 1415 iname, link_name, idx)
1416
1417 1418 -def GetAllInstancesInfo(hypervisor_list, all_hvparams):
1419 """Gather data about all instances. 1420 1421 This is the equivalent of L{GetInstanceInfo}, except that it 1422 computes data for all instances at once, thus being faster if one 1423 needs data about more than one instance. 1424 1425 @type hypervisor_list: list 1426 @param hypervisor_list: list of hypervisors to query for instance data 1427 @type all_hvparams: dict of dict of strings 1428 @param all_hvparams: mapping of hypervisor names to hvparams 1429 1430 @rtype: dict 1431 @return: dictionary of instance: data, with data having the following keys: 1432 - memory: memory size of instance (int) 1433 - state: xen state of instance (string) 1434 - time: cpu time of instance (float) 1435 - vcpus: the number of vcpus 1436 1437 """ 1438 output = {} 1439 1440 for hname in hypervisor_list: 1441 hvparams = all_hvparams[hname] 1442 iinfo = hypervisor.GetHypervisor(hname).GetAllInstancesInfo(hvparams) 1443 if iinfo: 1444 for name, _, memory, vcpus, state, times in iinfo: 1445 value = { 1446 "memory": memory, 1447 "vcpus": vcpus, 1448 "state": state, 1449 "time": times, 1450 } 1451 if name in output: 1452 # we only check static parameters, like memory and vcpus, 1453 # and not state and time which can change between the 1454 # invocations of the different hypervisors 1455 for key in "memory", "vcpus": 1456 if value[key] != output[name][key]: 1457 _Fail("Instance %s is running twice" 1458 " with different parameters", name) 1459 output[name] = value 1460 1461 return output
1462
1463 1464 -def _InstanceLogName(kind, os_name, instance, component):
1465 """Compute the OS log filename for a given instance and operation. 1466 1467 The instance name and os name are passed in as strings since not all 1468 operations have these as part of an instance object. 1469 1470 @type kind: string 1471 @param kind: the operation type (e.g. add, import, etc.) 1472 @type os_name: string 1473 @param os_name: the os name 1474 @type instance: string 1475 @param instance: the name of the instance being imported/added/etc. 1476 @type component: string or None 1477 @param component: the name of the component of the instance being 1478 transferred 1479 1480 """ 1481 # TODO: Use tempfile.mkstemp to create unique filename 1482 if component: 1483 assert "/" not in component 1484 c_msg = "-%s" % component 1485 else: 1486 c_msg = "" 1487 base = ("%s-%s-%s%s-%s.log" % 1488 (kind, os_name, instance, c_msg, utils.TimestampForFilename())) 1489 return utils.PathJoin(pathutils.LOG_OS_DIR, base)
1490
1491 1492 -def InstanceOsAdd(instance, reinstall, debug):
1493 """Add an OS to an instance. 1494 1495 @type instance: L{objects.Instance} 1496 @param instance: Instance whose OS is to be installed 1497 @type reinstall: boolean 1498 @param reinstall: whether this is an instance reinstall 1499 @type debug: integer 1500 @param debug: debug level, passed to the OS scripts 1501 @rtype: None 1502 1503 """ 1504 inst_os = OSFromDisk(instance.os) 1505 1506 create_env = OSEnvironment(instance, inst_os, debug) 1507 if reinstall: 1508 create_env["INSTANCE_REINSTALL"] = "1" 1509 1510 logfile = _InstanceLogName("add", instance.os, instance.name, None) 1511 1512 result = utils.RunCmd([inst_os.create_script], env=create_env, 1513 cwd=inst_os.path, output=logfile, reset_env=True) 1514 if result.failed: 1515 logging.error("os create command '%s' returned error: %s, logfile: %s," 1516 " output: %s", result.cmd, result.fail_reason, logfile, 1517 result.output) 1518 lines = [utils.SafeEncode(val) 1519 for val in utils.TailFile(logfile, lines=20)] 1520 _Fail("OS create script failed (%s), last lines in the" 1521 " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
1522
1523 1524 -def RunRenameInstance(instance, old_name, debug):
1525 """Run the OS rename script for an instance. 1526 1527 @type instance: L{objects.Instance} 1528 @param instance: Instance whose OS is to be installed 1529 @type old_name: string 1530 @param old_name: previous instance name 1531 @type debug: integer 1532 @param debug: debug level, passed to the OS scripts 1533 @rtype: boolean 1534 @return: the success of the operation 1535 1536 """ 1537 inst_os = OSFromDisk(instance.os) 1538 1539 rename_env = OSEnvironment(instance, inst_os, debug) 1540 rename_env["OLD_INSTANCE_NAME"] = old_name 1541 1542 logfile = _InstanceLogName("rename", instance.os, 1543 "%s-%s" % (old_name, instance.name), None) 1544 1545 result = utils.RunCmd([inst_os.rename_script], env=rename_env, 1546 cwd=inst_os.path, output=logfile, reset_env=True) 1547 1548 if result.failed: 1549 logging.error("os create command '%s' returned error: %s output: %s", 1550 result.cmd, result.fail_reason, result.output) 1551 lines = [utils.SafeEncode(val) 1552 for val in utils.TailFile(logfile, lines=20)] 1553 _Fail("OS rename script failed (%s), last lines in the" 1554 " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
1555
1556 1557 -def _GetBlockDevSymlinkPath(instance_name, idx, _dir=None):
1558 """Returns symlink path for block device. 1559 1560 """ 1561 if _dir is None: 1562 _dir = pathutils.DISK_LINKS_DIR 1563 1564 return utils.PathJoin(_dir, 1565 ("%s%s%s" % 1566 (instance_name, constants.DISK_SEPARATOR, idx)))
1567
1568 1569 -def _SymlinkBlockDev(instance_name, device_path, idx):
1570 """Set up symlinks to a instance's block device. 1571 1572 This is an auxiliary function run when an instance is start (on the primary 1573 node) or when an instance is migrated (on the target node). 1574 1575 1576 @param instance_name: the name of the target instance 1577 @param device_path: path of the physical block device, on the node 1578 @param idx: the disk index 1579 @return: absolute path to the disk's symlink 1580 1581 """ 1582 link_name = _GetBlockDevSymlinkPath(instance_name, idx) 1583 try: 1584 os.symlink(device_path, link_name) 1585 except OSError, err: 1586 if err.errno == errno.EEXIST: 1587 if (not os.path.islink(link_name) or 1588 os.readlink(link_name) != device_path): 1589 os.remove(link_name) 1590 os.symlink(device_path, link_name) 1591 else: 1592 raise 1593 1594 return link_name
1595 1608
1609 1610 -def _GatherAndLinkBlockDevs(instance):
1611 """Set up an instance's block device(s). 1612 1613 This is run on the primary node at instance startup. The block 1614 devices must be already assembled. 1615 1616 @type instance: L{objects.Instance} 1617 @param instance: the instance whose disks we shoul assemble 1618 @rtype: list 1619 @return: list of (disk_object, device_path) 1620 1621 """ 1622 block_devices = [] 1623 for idx, disk in enumerate(instance.disks): 1624 device = _RecursiveFindBD(disk) 1625 if device is None: 1626 raise errors.BlockDeviceError("Block device '%s' is not set up." % 1627 str(disk)) 1628 device.Open() 1629 try: 1630 link_name = _SymlinkBlockDev(instance.name, device.dev_path, idx) 1631 except OSError, e: 1632 raise errors.BlockDeviceError("Cannot create block device symlink: %s" % 1633 e.strerror) 1634 1635 block_devices.append((disk, link_name)) 1636 1637 return block_devices
1638
1639 1640 -def StartInstance(instance, startup_paused, reason, store_reason=True):
1641 """Start an instance. 1642 1643 @type instance: L{objects.Instance} 1644 @param instance: the instance object 1645 @type startup_paused: bool 1646 @param instance: pause instance at startup? 1647 @type reason: list of reasons 1648 @param reason: the reason trail for this startup 1649 @type store_reason: boolean 1650 @param store_reason: whether to store the shutdown reason trail on file 1651 @rtype: None 1652 1653 """ 1654 running_instances = GetInstanceListForHypervisor(instance.hypervisor, 1655 instance.hvparams) 1656 1657 if instance.name in running_instances: 1658 logging.info("Instance %s already running, not starting", instance.name) 1659 return 1660 1661 try: 1662 block_devices = _GatherAndLinkBlockDevs(instance) 1663 hyper = hypervisor.GetHypervisor(instance.hypervisor) 1664 hyper.StartInstance(instance, block_devices, startup_paused) 1665 if store_reason: 1666 _StoreInstReasonTrail(instance.name, reason) 1667 except errors.BlockDeviceError, err: 1668 _Fail("Block device error: %s", err, exc=True) 1669 except errors.HypervisorError, err: 1670 _RemoveBlockDevLinks(instance.name, instance.disks) 1671 _Fail("Hypervisor error: %s", err, exc=True)
1672
1673 1674 -def InstanceShutdown(instance, timeout, reason, store_reason=True):
1675 """Shut an instance down. 1676 1677 @note: this functions uses polling with a hardcoded timeout. 1678 1679 @type instance: L{objects.Instance} 1680 @param instance: the instance object 1681 @type timeout: integer 1682 @param timeout: maximum timeout for soft shutdown 1683 @type reason: list of reasons 1684 @param reason: the reason trail for this shutdown 1685 @type store_reason: boolean 1686 @param store_reason: whether to store the shutdown reason trail on file 1687 @rtype: None 1688 1689 """ 1690 hv_name = instance.hypervisor 1691 hyper = hypervisor.GetHypervisor(hv_name) 1692 iname = instance.name 1693 1694 if instance.name not in hyper.ListInstances(instance.hvparams): 1695 logging.info("Instance %s not running, doing nothing", iname) 1696 return 1697 1698 class _TryShutdown: 1699 def __init__(self): 1700 self.tried_once = False
1701 1702 def __call__(self): 1703 if iname not in hyper.ListInstances(instance.hvparams): 1704 return 1705 1706 try: 1707 hyper.StopInstance(instance, retry=self.tried_once, timeout=timeout) 1708 if store_reason: 1709 _StoreInstReasonTrail(instance.name, reason) 1710 except errors.HypervisorError, err: 1711 if iname not in hyper.ListInstances(instance.hvparams): 1712 # if the instance is no longer existing, consider this a 1713 # success and go to cleanup 1714 return 1715 1716 _Fail("Failed to stop instance %s: %s", iname, err) 1717 1718 self.tried_once = True 1719 1720 raise utils.RetryAgain() 1721 1722 try: 1723 utils.Retry(_TryShutdown(), 5, timeout) 1724 except utils.RetryTimeout: 1725 # the shutdown did not succeed 1726 logging.error("Shutdown of '%s' unsuccessful, forcing", iname) 1727 1728 try: 1729 hyper.StopInstance(instance, force=True) 1730 except errors.HypervisorError, err: 1731 if iname in hyper.ListInstances(instance.hvparams): 1732 # only raise an error if the instance still exists, otherwise 1733 # the error could simply be "instance ... unknown"! 1734 _Fail("Failed to force stop instance %s: %s", iname, err) 1735 1736 time.sleep(1) 1737 1738 if iname in hyper.ListInstances(instance.hvparams): 1739 _Fail("Could not shutdown instance %s even by destroy", iname) 1740 1741 try: 1742 hyper.CleanupInstance(instance.name) 1743 except errors.HypervisorError, err: 1744 logging.warning("Failed to execute post-shutdown cleanup step: %s", err) 1745 1746 _RemoveBlockDevLinks(iname, instance.disks) 1747
1748 1749 -def InstanceReboot(instance, reboot_type, shutdown_timeout, reason):
1750 """Reboot an instance. 1751 1752 @type instance: L{objects.Instance} 1753 @param instance: the instance object to reboot 1754 @type reboot_type: str 1755 @param reboot_type: the type of reboot, one the following 1756 constants: 1757 - L{constants.INSTANCE_REBOOT_SOFT}: only reboot the 1758 instance OS, do not recreate the VM 1759 - L{constants.INSTANCE_REBOOT_HARD}: tear down and 1760 restart the VM (at the hypervisor level) 1761 - the other reboot type (L{constants.INSTANCE_REBOOT_FULL}) is 1762 not accepted here, since that mode is handled differently, in 1763 cmdlib, and translates into full stop and start of the 1764 instance (instead of a call_instance_reboot RPC) 1765 @type shutdown_timeout: integer 1766 @param shutdown_timeout: maximum timeout for soft shutdown 1767 @type reason: list of reasons 1768 @param reason: the reason trail for this reboot 1769 @rtype: None 1770 1771 """ 1772 running_instances = GetInstanceListForHypervisor(instance.hypervisor, 1773 instance.hvparams) 1774 1775 if instance.name not in running_instances: 1776 _Fail("Cannot reboot instance %s that is not running", instance.name) 1777 1778 hyper = hypervisor.GetHypervisor(instance.hypervisor) 1779 if reboot_type == constants.INSTANCE_REBOOT_SOFT: 1780 try: 1781 hyper.RebootInstance(instance) 1782 except errors.HypervisorError, err: 1783 _Fail("Failed to soft reboot instance %s: %s", instance.name, err) 1784 elif reboot_type == constants.INSTANCE_REBOOT_HARD: 1785 try: 1786 InstanceShutdown(instance, shutdown_timeout, reason, store_reason=False) 1787 result = StartInstance(instance, False, reason, store_reason=False) 1788 _StoreInstReasonTrail(instance.name, reason) 1789 return result 1790 except errors.HypervisorError, err: 1791 _Fail("Failed to hard reboot instance %s: %s", instance.name, err) 1792 else: 1793 _Fail("Invalid reboot_type received: %s", reboot_type)
1794
1795 1796 -def InstanceBalloonMemory(instance, memory):
1797 """Resize an instance's memory. 1798 1799 @type instance: L{objects.Instance} 1800 @param instance: the instance object 1801 @type memory: int 1802 @param memory: new memory amount in MB 1803 @rtype: None 1804 1805 """ 1806 hyper = hypervisor.GetHypervisor(instance.hypervisor) 1807 running = hyper.ListInstances(instance.hvparams) 1808 if instance.name not in running: 1809 logging.info("Instance %s is not running, cannot balloon", instance.name) 1810 return 1811 try: 1812 hyper.BalloonInstanceMemory(instance, memory) 1813 except errors.HypervisorError, err: 1814 _Fail("Failed to balloon instance memory: %s", err, exc=True)
1815
1816 1817 -def MigrationInfo(instance):
1818 """Gather information about an instance to be migrated. 1819 1820 @type instance: L{objects.Instance} 1821 @param instance: the instance definition 1822 1823 """ 1824 hyper = hypervisor.GetHypervisor(instance.hypervisor) 1825 try: 1826 info = hyper.MigrationInfo(instance) 1827 except errors.HypervisorError, err: 1828 _Fail("Failed to fetch migration information: %s", err, exc=True) 1829 return info
1830
1831 1832 -def AcceptInstance(instance, info, target):
1833 """Prepare the node to accept an instance. 1834 1835 @type instance: L{objects.Instance} 1836 @param instance: the instance definition 1837 @type info: string/data (opaque) 1838 @param info: migration information, from the source node 1839 @type target: string 1840 @param target: target host (usually ip), on this node 1841 1842 """ 1843 # TODO: why is this required only for DTS_EXT_MIRROR? 1844 if instance.disk_template in constants.DTS_EXT_MIRROR: 1845 # Create the symlinks, as the disks are not active 1846 # in any way 1847 try: 1848 _GatherAndLinkBlockDevs(instance) 1849 except errors.BlockDeviceError, err: 1850 _Fail("Block device error: %s", err, exc=True) 1851 1852 hyper = hypervisor.GetHypervisor(instance.hypervisor) 1853 try: 1854 hyper.AcceptInstance(instance, info, target) 1855 except errors.HypervisorError, err: 1856 if instance.disk_template in constants.DTS_EXT_MIRROR: 1857 _RemoveBlockDevLinks(instance.name, instance.disks) 1858 _Fail("Failed to accept instance: %s", err, exc=True)
1859
1860 1861 -def FinalizeMigrationDst(instance, info, success):
1862 """Finalize any preparation to accept an instance. 1863 1864 @type instance: L{objects.Instance} 1865 @param instance: the instance definition 1866 @type info: string/data (opaque) 1867 @param info: migration information, from the source node 1868 @type success: boolean 1869 @param success: whether the migration was a success or a failure 1870 1871 """ 1872 hyper = hypervisor.GetHypervisor(instance.hypervisor) 1873 try: 1874 hyper.FinalizeMigrationDst(instance, info, success) 1875 except errors.HypervisorError, err: 1876 _Fail("Failed to finalize migration on the target node: %s", err, exc=True)
1877
1878 1879 -def MigrateInstance(cluster_name, instance, target, live):
1880 """Migrates an instance to another node. 1881 1882 @type cluster_name: string 1883 @param cluster_name: name of the cluster 1884 @type instance: L{objects.Instance} 1885 @param instance: the instance definition 1886 @type target: string 1887 @param target: the target node name 1888 @type live: boolean 1889 @param live: whether the migration should be done live or not (the 1890 interpretation of this parameter is left to the hypervisor) 1891 @raise RPCFail: if migration fails for some reason 1892 1893 """ 1894 hyper = hypervisor.GetHypervisor(instance.hypervisor) 1895 1896 try: 1897 hyper.MigrateInstance(cluster_name, instance, target, live) 1898 except errors.HypervisorError, err: 1899 _Fail("Failed to migrate instance: %s", err, exc=True)
1900
1901 1902 -def FinalizeMigrationSource(instance, success, live):
1903 """Finalize the instance migration on the source node. 1904 1905 @type instance: L{objects.Instance} 1906 @param instance: the instance definition of the migrated instance 1907 @type success: bool 1908 @param success: whether the migration succeeded or not 1909 @type live: bool 1910 @param live: whether the user requested a live migration or not 1911 @raise RPCFail: If the execution fails for some reason 1912 1913 """ 1914 hyper = hypervisor.GetHypervisor(instance.hypervisor) 1915 1916 try: 1917 hyper.FinalizeMigrationSource(instance, success, live) 1918 except Exception, err: # pylint: disable=W0703 1919 _Fail("Failed to finalize the migration on the source node: %s", err, 1920 exc=True)
1921
1922 1923 -def GetMigrationStatus(instance):
1924 """Get the migration status 1925 1926 @type instance: L{objects.Instance} 1927 @param instance: the instance that is being migrated 1928 @rtype: L{objects.MigrationStatus} 1929 @return: the status of the current migration (one of 1930 L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional 1931 progress info that can be retrieved from the hypervisor 1932 @raise RPCFail: If the migration status cannot be retrieved 1933 1934 """ 1935 hyper = hypervisor.GetHypervisor(instance.hypervisor) 1936 try: 1937 return hyper.GetMigrationStatus(instance) 1938 except Exception, err: # pylint: disable=W0703 1939 _Fail("Failed to get migration status: %s", err, exc=True)
1940
1941 1942 -def BlockdevCreate(disk, size, owner, on_primary, info, excl_stor):
1943 """Creates a block device for an instance. 1944 1945 @type disk: L{objects.Disk} 1946 @param disk: the object describing the disk we should create 1947 @type size: int 1948 @param size: the size of the physical underlying device, in MiB 1949 @type owner: str 1950 @param owner: the name of the instance for which disk is created, 1951 used for device cache data 1952 @type on_primary: boolean 1953 @param on_primary: indicates if it is the primary node or not 1954 @type info: string 1955 @param info: string that will be sent to the physical device 1956 creation, used for example to set (LVM) tags on LVs 1957 @type excl_stor: boolean 1958 @param excl_stor: Whether exclusive_storage is active 1959 1960 @return: the new unique_id of the device (this can sometime be 1961 computed only after creation), or None. On secondary nodes, 1962 it's not required to return anything. 1963 1964 """ 1965 # TODO: remove the obsolete "size" argument 1966 # pylint: disable=W0613 1967 clist = [] 1968 if disk.children: 1969 for child in disk.children: 1970 try: 1971 crdev = _RecursiveAssembleBD(child, owner, on_primary) 1972 except errors.BlockDeviceError, err: 1973 _Fail("Can't assemble device %s: %s", child, err) 1974 if on_primary or disk.AssembleOnSecondary(): 1975 # we need the children open in case the device itself has to 1976 # be assembled 1977 try: 1978 # pylint: disable=E1103 1979 crdev.Open() 1980 except errors.BlockDeviceError, err: 1981 _Fail("Can't make child '%s' read-write: %s", child, err) 1982 clist.append(crdev) 1983 1984 try: 1985 device = bdev.Create(disk, clist, excl_stor) 1986 except errors.BlockDeviceError, err: 1987 _Fail("Can't create block device: %s", err) 1988 1989 if on_primary or disk.AssembleOnSecondary(): 1990 try: 1991 device.Assemble() 1992 except errors.BlockDeviceError, err: 1993 _Fail("Can't assemble device after creation, unusual event: %s", err) 1994 if on_primary or disk.OpenOnSecondary(): 1995 try: 1996 device.Open(force=True) 1997 except errors.BlockDeviceError, err: 1998 _Fail("Can't make device r/w after creation, unusual event: %s", err) 1999 DevCacheManager.UpdateCache(device.dev_path, owner, 2000 on_primary, disk.iv_name) 2001 2002 device.SetInfo(info) 2003 2004 return device.unique_id
2005
2006 2007 -def _WipeDevice(path, offset, size):
2008 """This function actually wipes the device. 2009 2010 @param path: The path to the device to wipe 2011 @param offset: The offset in MiB in the file 2012 @param size: The size in MiB to write 2013 2014 """ 2015 # Internal sizes are always in Mebibytes; if the following "dd" command 2016 # should use a different block size the offset and size given to this 2017 # function must be adjusted accordingly before being passed to "dd". 2018 block_size = 1024 * 1024 2019 2020 cmd = [constants.DD_CMD, "if=/dev/zero", "seek=%d" % offset, 2021 "bs=%s" % block_size, "oflag=direct", "of=%s" % path, 2022 "count=%d" % size] 2023 result = utils.RunCmd(cmd) 2024 2025 if result.failed: 2026 _Fail("Wipe command '%s' exited with error: %s; output: %s", result.cmd, 2027 result.fail_reason, result.output)
2028
2029 2030 -def BlockdevWipe(disk, offset, size):
2031 """Wipes a block device. 2032 2033 @type disk: L{objects.Disk} 2034 @param disk: the disk object we want to wipe 2035 @type offset: int 2036 @param offset: The offset in MiB in the file 2037 @type size: int 2038 @param size: The size in MiB to write 2039 2040 """ 2041 try: 2042 rdev = _RecursiveFindBD(disk) 2043 except errors.BlockDeviceError: 2044 rdev = None 2045 2046 if not rdev: 2047 _Fail("Cannot execute wipe for device %s: device not found", disk.iv_name) 2048 2049 # Do cross verify some of the parameters 2050 if offset < 0: 2051 _Fail("Negative offset") 2052 if size < 0: 2053 _Fail("Negative size") 2054 if offset > rdev.size: 2055 _Fail("Offset is bigger than device size") 2056 if (offset + size) > rdev.size: 2057 _Fail("The provided offset and size to wipe is bigger than device size") 2058 2059 _WipeDevice(rdev.dev_path, offset, size)
2060
2061 2062 -def BlockdevPauseResumeSync(disks, pause):
2063 """Pause or resume the sync of the block device. 2064 2065 @type disks: list of L{objects.Disk} 2066 @param disks: the disks object we want to pause/resume 2067 @type pause: bool 2068 @param pause: Wheater to pause or resume 2069 2070 """ 2071 success = [] 2072 for disk in disks: 2073 try: 2074 rdev = _RecursiveFindBD(disk) 2075 except errors.BlockDeviceError: 2076 rdev = None 2077 2078 if not rdev: 2079 success.append((False, ("Cannot change sync for device %s:" 2080 " device not found" % disk.iv_name))) 2081 continue 2082 2083 result = rdev.PauseResumeSync(pause) 2084 2085 if result: 2086 success.append((result, None)) 2087 else: 2088 if pause: 2089 msg = "Pause" 2090 else: 2091 msg = "Resume" 2092 success.append((result, "%s for device %s failed" % (msg, disk.iv_name))) 2093 2094 return success
2095
2096 2097 -def BlockdevRemove(disk):
2098 """Remove a block device. 2099 2100 @note: This is intended to be called recursively. 2101 2102 @type disk: L{objects.Disk} 2103 @param disk: the disk object we should remove 2104 @rtype: boolean 2105 @return: the success of the operation 2106 2107 """ 2108 msgs = [] 2109 try: 2110 rdev = _RecursiveFindBD(disk) 2111 except errors.BlockDeviceError, err: 2112 # probably can't attach 2113 logging.info("Can't attach to device %s in remove", disk) 2114 rdev = None 2115 if rdev is not None: 2116 r_path = rdev.dev_path 2117 try: 2118 rdev.Remove() 2119 except errors.BlockDeviceError, err: 2120 msgs.append(str(err)) 2121 if not msgs: 2122 DevCacheManager.RemoveCache(r_path) 2123 2124 if disk.children: 2125 for child in disk.children: 2126 try: 2127 BlockdevRemove(child) 2128 except RPCFail, err: 2129 msgs.append(str(err)) 2130 2131 if msgs: 2132 _Fail("; ".join(msgs))
2133
2134 2135 -def _RecursiveAssembleBD(disk, owner, as_primary):
2136 """Activate a block device for an instance. 2137 2138 This is run on the primary and secondary nodes for an instance. 2139 2140 @note: this function is called recursively. 2141 2142 @type disk: L{objects.Disk} 2143 @param disk: the disk we try to assemble 2144 @type owner: str 2145 @param owner: the name of the instance which owns the disk 2146 @type as_primary: boolean 2147 @param as_primary: if we should make the block device 2148 read/write 2149 2150 @return: the assembled device or None (in case no device 2151 was assembled) 2152 @raise errors.BlockDeviceError: in case there is an error 2153 during the activation of the children or the device 2154 itself 2155 2156 """ 2157 children = [] 2158 if disk.children: 2159 mcn = disk.ChildrenNeeded() 2160 if mcn == -1: 2161 mcn = 0 # max number of Nones allowed 2162 else: 2163 mcn = len(disk.children) - mcn # max number of Nones 2164 for chld_disk in disk.children: 2165 try: 2166 cdev = _RecursiveAssembleBD(chld_disk, owner, as_primary) 2167 except errors.BlockDeviceError, err: 2168 if children.count(None) >= mcn: 2169 raise 2170 cdev = None 2171 logging.error("Error in child activation (but continuing): %s", 2172 str(err)) 2173 children.append(cdev) 2174 2175 if as_primary or disk.AssembleOnSecondary(): 2176 r_dev = bdev.Assemble(disk, children) 2177 result = r_dev 2178 if as_primary or disk.OpenOnSecondary(): 2179 r_dev.Open() 2180 DevCacheManager.UpdateCache(r_dev.dev_path, owner, 2181 as_primary, disk.iv_name) 2182 2183 else: 2184 result = True 2185 return result
2186
2187 2188 -def BlockdevAssemble(disk, owner, as_primary, idx):
2189 """Activate a block device for an instance. 2190 2191 This is a wrapper over _RecursiveAssembleBD. 2192 2193 @rtype: str or boolean 2194 @return: a C{/dev/...} path for primary nodes, and 2195 C{True} for secondary nodes 2196 2197 """ 2198 try: 2199 result = _RecursiveAssembleBD(disk, owner, as_primary) 2200 if isinstance(result, BlockDev): 2201 # pylint: disable=E1103 2202 result = result.dev_path 2203 if as_primary: 2204 _SymlinkBlockDev(owner, result, idx) 2205 except errors.BlockDeviceError, err: 2206 _Fail("Error while assembling disk: %s", err, exc=True) 2207 except OSError, err: 2208 _Fail("Error while symlinking disk: %s", err, exc=True) 2209 2210 return result
2211
2212 2213 -def BlockdevShutdown(disk):
2214 """Shut down a block device. 2215 2216 First, if the device is assembled (Attach() is successful), then 2217 the device is shutdown. Then the children of the device are 2218 shutdown. 2219 2220 This function is called recursively. Note that we don't cache the 2221 children or such, as oppossed to assemble, shutdown of different 2222 devices doesn't require that the upper device was active. 2223 2224 @type disk: L{objects.Disk} 2225 @param disk: the description of the disk we should 2226 shutdown 2227 @rtype: None 2228 2229 """ 2230 msgs = [] 2231 r_dev = _RecursiveFindBD(disk) 2232 if r_dev is not None: 2233 r_path = r_dev.dev_path 2234 try: 2235 r_dev.Shutdown() 2236 DevCacheManager.RemoveCache(r_path) 2237 except errors.BlockDeviceError, err: 2238 msgs.append(str(err)) 2239 2240 if disk.children: 2241 for child in disk.children: 2242 try: 2243 BlockdevShutdown(child) 2244 except RPCFail, err: 2245 msgs.append(str(err)) 2246 2247 if msgs: 2248 _Fail("; ".join(msgs))
2249
2250 2251 -def BlockdevAddchildren(parent_cdev, new_cdevs):
2252 """Extend a mirrored block device. 2253 2254 @type parent_cdev: L{objects.Disk} 2255 @param parent_cdev: the disk to which we should add children 2256 @type new_cdevs: list of L{objects.Disk} 2257 @param new_cdevs: the list of children which we should add 2258 @rtype: None 2259 2260 """ 2261 parent_bdev = _RecursiveFindBD(parent_cdev) 2262 if parent_bdev is None: 2263 _Fail("Can't find parent device '%s' in add children", parent_cdev) 2264 new_bdevs = [_RecursiveFindBD(disk) for disk in new_cdevs] 2265 if new_bdevs.count(None) > 0: 2266 _Fail("Can't find new device(s) to add: %s:%s", new_bdevs, new_cdevs) 2267 parent_bdev.AddChildren(new_bdevs)
2268
2269 2270 -def BlockdevRemovechildren(parent_cdev, new_cdevs):
2271 """Shrink a mirrored block device. 2272 2273 @type parent_cdev: L{objects.Disk} 2274 @param parent_cdev: the disk from which we should remove children 2275 @type new_cdevs: list of L{objects.Disk} 2276 @param new_cdevs: the list of children which we should remove 2277 @rtype: None 2278 2279 """ 2280 parent_bdev = _RecursiveFindBD(parent_cdev) 2281 if parent_bdev is None: 2282 _Fail("Can't find parent device '%s' in remove children", parent_cdev) 2283 devs = [] 2284 for disk in new_cdevs: 2285 rpath = disk.StaticDevPath() 2286 if rpath is None: 2287 bd = _RecursiveFindBD(disk) 2288 if bd is None: 2289 _Fail("Can't find device %s while removing children", disk) 2290 else: 2291 devs.append(bd.dev_path) 2292 else: 2293 if not utils.IsNormAbsPath(rpath): 2294 _Fail("Strange path returned from StaticDevPath: '%s'", rpath) 2295 devs.append(rpath) 2296 parent_bdev.RemoveChildren(devs)
2297
2298 2299 -def BlockdevGetmirrorstatus(disks):
2300 """Get the mirroring status of a list of devices. 2301 2302 @type disks: list of L{objects.Disk} 2303 @param disks: the list of disks which we should query 2304 @rtype: disk 2305 @return: List of L{objects.BlockDevStatus}, one for each disk 2306 @raise errors.BlockDeviceError: if any of the disks cannot be 2307 found 2308 2309 """ 2310 stats = [] 2311 for dsk in disks: 2312 rbd = _RecursiveFindBD(dsk) 2313 if rbd is None: 2314 _Fail("Can't find device %s", dsk) 2315 2316 stats.append(rbd.CombinedSyncStatus()) 2317 2318 return stats
2319
2320 2321 -def BlockdevGetmirrorstatusMulti(disks):
2322 """Get the mirroring status of a list of devices. 2323 2324 @type disks: list of L{objects.Disk} 2325 @param disks: the list of disks which we should query 2326 @rtype: disk 2327 @return: List of tuples, (bool, status), one for each disk; bool denotes 2328 success/failure, status is L{objects.BlockDevStatus} on success, string 2329 otherwise 2330 2331 """ 2332 result = [] 2333 for disk in disks: 2334 try: 2335 rbd = _RecursiveFindBD(disk) 2336 if rbd is None: 2337 result.append((False, "Can't find device %s" % disk)) 2338 continue 2339 2340 status = rbd.CombinedSyncStatus() 2341 except errors.BlockDeviceError, err: 2342 logging.exception("Error while getting disk status") 2343 result.append((False, str(err))) 2344 else: 2345 result.append((True, status)) 2346 2347 assert len(disks) == len(result) 2348 2349 return result
2350
2351 2352 -def _RecursiveFindBD(disk):
2353 """Check if a device is activated. 2354 2355 If so, return information about the real device. 2356 2357 @type disk: L{objects.Disk} 2358 @param disk: the disk object we need to find 2359 2360 @return: None if the device can't be found, 2361 otherwise the device instance 2362 2363 """ 2364 children = [] 2365 if disk.children: 2366 for chdisk in disk.children: 2367 children.append(_RecursiveFindBD(chdisk)) 2368 2369 return bdev.FindDevice(disk, children)
2370
2371 2372 -def _OpenRealBD(disk):
2373 """Opens the underlying block device of a disk. 2374 2375 @type disk: L{objects.Disk} 2376 @param disk: the disk object we want to open 2377 2378 """ 2379 real_disk = _RecursiveFindBD(disk) 2380 if real_disk is None: 2381 _Fail("Block device '%s' is not set up", disk) 2382 2383 real_disk.Open() 2384 2385 return real_disk
2386
2387 2388 -def BlockdevFind(disk):
2389 """Check if a device is activated. 2390 2391 If it is, return information about the real device. 2392 2393 @type disk: L{objects.Disk} 2394 @param disk: the disk to find 2395 @rtype: None or objects.BlockDevStatus 2396 @return: None if the disk cannot be found, otherwise a the current 2397 information 2398 2399 """ 2400 try: 2401 rbd = _RecursiveFindBD(disk) 2402 except errors.BlockDeviceError, err: 2403 _Fail("Failed to find device: %s", err, exc=True) 2404 2405 if rbd is None: 2406 return None 2407 2408 return rbd.GetSyncStatus()
2409
2410 2411 -def BlockdevGetdimensions(disks):
2412 """Computes the size of the given disks. 2413 2414 If a disk is not found, returns None instead. 2415 2416 @type disks: list of L{objects.Disk} 2417 @param disks: the list of disk to compute the size for 2418 @rtype: list 2419 @return: list with elements None if the disk cannot be found, 2420 otherwise the pair (size, spindles), where spindles is None if the 2421 device doesn't support that 2422 2423 """ 2424 result = [] 2425 for cf in disks: 2426 try: 2427 rbd = _RecursiveFindBD(cf) 2428 except errors.BlockDeviceError: 2429 result.append(None) 2430 continue 2431 if rbd is None: 2432 result.append(None) 2433 else: 2434 result.append(rbd.GetActualDimensions()) 2435 return result
2436
2437 2438 -def BlockdevExport(disk, dest_node_ip, dest_path, cluster_name):
2439 """Export a block device to a remote node. 2440 2441 @type disk: L{objects.Disk} 2442 @param disk: the description of the disk to export 2443 @type dest_node_ip: str 2444 @param dest_node_ip: the destination node IP to export to 2445 @type dest_path: str 2446 @param dest_path: the destination path on the target node 2447 @type cluster_name: str 2448 @param cluster_name: the cluster name, needed for SSH hostalias 2449 @rtype: None 2450 2451 """ 2452 real_disk = _OpenRealBD(disk) 2453 2454 # the block size on the read dd is 1MiB to match our units 2455 expcmd = utils.BuildShellCmd("set -e; set -o pipefail; " 2456 "dd if=%s bs=1048576 count=%s", 2457 real_disk.dev_path, str(disk.size)) 2458 2459 # we set here a smaller block size as, due to ssh buffering, more 2460 # than 64-128k will mostly ignored; we use nocreat to fail if the 2461 # device is not already there or we pass a wrong path; we use 2462 # notrunc to no attempt truncate on an LV device; we use oflag=dsync 2463 # to not buffer too much memory; this means that at best, we flush 2464 # every 64k, which will not be very fast 2465 destcmd = utils.BuildShellCmd("dd of=%s conv=nocreat,notrunc bs=65536" 2466 " oflag=dsync", dest_path) 2467 2468 remotecmd = _GetSshRunner(cluster_name).BuildCmd(dest_node_ip, 2469 constants.SSH_LOGIN_USER, 2470 destcmd) 2471 2472 # all commands have been checked, so we're safe to combine them 2473 command = "|".join([expcmd, utils.ShellQuoteArgs(remotecmd)]) 2474 2475 result = utils.RunCmd(["bash", "-c", command]) 2476 2477 if result.failed: 2478 _Fail("Disk copy command '%s' returned error: %s" 2479 " output: %s", command, result.fail_reason, result.output)
2480
2481 2482 -def UploadFile(file_name, data, mode, uid, gid, atime, mtime):
2483 """Write a file to the filesystem. 2484 2485 This allows the master to overwrite(!) a file. It will only perform 2486 the operation if the file belongs to a list of configuration files. 2487 2488 @type file_name: str 2489 @param file_name: the target file name 2490 @type data: str 2491 @param data: the new contents of the file 2492 @type mode: int 2493 @param mode: the mode to give the file (can be None) 2494 @type uid: string 2495 @param uid: the owner of the file 2496 @type gid: string 2497 @param gid: the group of the file 2498 @type atime: float 2499 @param atime: the atime to set on the file (can be None) 2500 @type mtime: float 2501 @param mtime: the mtime to set on the file (can be None) 2502 @rtype: None 2503 2504 """ 2505 file_name = vcluster.LocalizeVirtualPath(file_name) 2506 2507 if not os.path.isabs(file_name): 2508 _Fail("Filename passed to UploadFile is not absolute: '%s'", file_name) 2509 2510 if file_name not in _ALLOWED_UPLOAD_FILES: 2511 _Fail("Filename passed to UploadFile not in allowed upload targets: '%s'", 2512 file_name) 2513 2514 raw_data = _Decompress(data) 2515 2516 if not (isinstance(uid, basestring) and isinstance(gid, basestring)): 2517 _Fail("Invalid username/groupname type") 2518 2519 getents = runtime.GetEnts() 2520 uid = getents.LookupUser(uid) 2521 gid = getents.LookupGroup(gid) 2522 2523 utils.SafeWriteFile(file_name, None, 2524 data=raw_data, mode=mode, uid=uid, gid=gid, 2525 atime=atime, mtime=mtime)
2526
2527 2528 -def RunOob(oob_program, command, node, timeout):
2529 """Executes oob_program with given command on given node. 2530 2531 @param oob_program: The path to the executable oob_program 2532 @param command: The command to invoke on oob_program 2533 @param node: The node given as an argument to the program 2534 @param timeout: Timeout after which we kill the oob program 2535 2536 @return: stdout 2537 @raise RPCFail: If execution fails for some reason 2538 2539 """ 2540 result = utils.RunCmd([oob_program, command, node], timeout=timeout) 2541 2542 if result.failed: 2543 _Fail("'%s' failed with reason '%s'; output: %s", result.cmd, 2544 result.fail_reason, result.output) 2545 2546 return result.stdout
2547
2548 2549 -def _OSOndiskAPIVersion(os_dir):
2550 """Compute and return the API version of a given OS. 2551 2552 This function will try to read the API version of the OS residing in 2553 the 'os_dir' directory. 2554 2555 @type os_dir: str 2556 @param os_dir: the directory in which we should look for the OS 2557 @rtype: tuple 2558 @return: tuple (status, data) with status denoting the validity and 2559 data holding either the vaid versions or an error message 2560 2561 """ 2562 api_file = utils.PathJoin(os_dir, constants.OS_API_FILE) 2563 2564 try: 2565 st = os.stat(api_file) 2566 except EnvironmentError, err: 2567 return False, ("Required file '%s' not found under path %s: %s" % 2568 (constants.OS_API_FILE, os_dir, utils.ErrnoOrStr(err))) 2569 2570 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)): 2571 return False, ("File '%s' in %s is not a regular file" % 2572 (constants.OS_API_FILE, os_dir)) 2573 2574 try: 2575 api_versions = utils.ReadFile(api_file).splitlines() 2576 except EnvironmentError, err: 2577 return False, ("Error while reading the API version file at %s: %s" % 2578 (api_file, utils.ErrnoOrStr(err))) 2579 2580 try: 2581 api_versions = [int(version.strip()) for version in api_versions] 2582 except (TypeError, ValueError), err: 2583 return False, ("API version(s) can't be converted to integer: %s" % 2584 str(err)) 2585 2586 return True, api_versions
2587
2588 2589 -def DiagnoseOS(top_dirs=None):
2590 """Compute the validity for all OSes. 2591 2592 @type top_dirs: list 2593 @param top_dirs: the list of directories in which to 2594 search (if not given defaults to 2595 L{pathutils.OS_SEARCH_PATH}) 2596 @rtype: list of L{objects.OS} 2597 @return: a list of tuples (name, path, status, diagnose, variants, 2598 parameters, api_version) for all (potential) OSes under all 2599 search paths, where: 2600 - name is the (potential) OS name 2601 - path is the full path to the OS 2602 - status True/False is the validity of the OS 2603 - diagnose is the error message for an invalid OS, otherwise empty 2604 - variants is a list of supported OS variants, if any 2605 - parameters is a list of (name, help) parameters, if any 2606 - api_version is a list of support OS API versions 2607 2608 """ 2609 if top_dirs is None: 2610 top_dirs = pathutils.OS_SEARCH_PATH 2611 2612 result = [] 2613 for dir_name in top_dirs: 2614 if os.path.isdir(dir_name): 2615 try: 2616 f_names = utils.ListVisibleFiles(dir_name) 2617 except EnvironmentError, err: 2618 logging.exception("Can't list the OS directory %s: %s", dir_name, err) 2619 break 2620 for name in f_names: 2621 os_path = utils.PathJoin(dir_name, name) 2622 status, os_inst = _TryOSFromDisk(name, base_dir=dir_name) 2623 if status: 2624 diagnose = "" 2625 variants = os_inst.supported_variants 2626 parameters = os_inst.supported_parameters 2627 api_versions = os_inst.api_versions 2628 else: 2629 diagnose = os_inst 2630 variants = parameters = api_versions = [] 2631 result.append((name, os_path, status, diagnose, variants, 2632 parameters, api_versions)) 2633 2634 return result
2635
2636 2637 -def _TryOSFromDisk(name, base_dir=None):
2638 """Create an OS instance from disk. 2639 2640 This function will return an OS instance if the given name is a 2641 valid OS name. 2642 2643 @type base_dir: string 2644 @keyword base_dir: Base directory containing OS installations. 2645 Defaults to a search in all the OS_SEARCH_PATH dirs. 2646 @rtype: tuple 2647 @return: success and either the OS instance if we find a valid one, 2648 or error message 2649 2650 """ 2651 if base_dir is None: 2652 os_dir = utils.FindFile(name, pathutils.OS_SEARCH_PATH, os.path.isdir) 2653 else: 2654 os_dir = utils.FindFile(name, [base_dir], os.path.isdir) 2655 2656 if os_dir is None: 2657 return False, "Directory for OS %s not found in search path" % name 2658 2659 status, api_versions = _OSOndiskAPIVersion(os_dir) 2660 if not status: 2661 # push the error up 2662 return status, api_versions 2663 2664 if not constants.OS_API_VERSIONS.intersection(api_versions): 2665 return False, ("API version mismatch for path '%s': found %s, want %s." % 2666 (os_dir, api_versions, constants.OS_API_VERSIONS)) 2667 2668 # OS Files dictionary, we will populate it with the absolute path 2669 # names; if the value is True, then it is a required file, otherwise 2670 # an optional one 2671 os_files = dict.fromkeys(constants.OS_SCRIPTS, True) 2672 2673 if max(api_versions) >= constants.OS_API_V15: 2674 os_files[constants.OS_VARIANTS_FILE] = False 2675 2676 if max(api_versions) >= constants.OS_API_V20: 2677 os_files[constants.OS_PARAMETERS_FILE] = True 2678 else: 2679 del os_files[constants.OS_SCRIPT_VERIFY] 2680 2681 for (filename, required) in os_files.items(): 2682 os_files[filename] = utils.PathJoin(os_dir, filename) 2683 2684 try: 2685 st = os.stat(os_files[filename]) 2686 except EnvironmentError, err: 2687 if err.errno == errno.ENOENT and not required: 2688 del os_files[filename] 2689 continue 2690 return False, ("File '%s' under path '%s' is missing (%s)" % 2691 (filename, os_dir, utils.ErrnoOrStr(err))) 2692 2693 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)): 2694 return False, ("File '%s' under path '%s' is not a regular file" % 2695 (filename, os_dir)) 2696 2697 if filename in constants.OS_SCRIPTS: 2698 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR: 2699 return False, ("File '%s' under path '%s' is not executable" % 2700 (filename, os_dir)) 2701 2702 variants = [] 2703 if constants.OS_VARIANTS_FILE in os_files: 2704 variants_file = os_files[constants.OS_VARIANTS_FILE] 2705 try: 2706 variants = \ 2707 utils.FilterEmptyLinesAndComments(utils.ReadFile(variants_file)) 2708 except EnvironmentError, err: 2709 # we accept missing files, but not other errors 2710 if err.errno != errno.ENOENT: 2711 return False, ("Error while reading the OS variants file at %s: %s" % 2712 (variants_file, utils.ErrnoOrStr(err))) 2713 2714 parameters = [] 2715 if constants.OS_PARAMETERS_FILE in os_files: 2716 parameters_file = os_files[constants.OS_PARAMETERS_FILE] 2717 try: 2718 parameters = utils.ReadFile(parameters_file).splitlines() 2719 except EnvironmentError, err: 2720 return False, ("Error while reading the OS parameters file at %s: %s" % 2721 (parameters_file, utils.ErrnoOrStr(err))) 2722 parameters = [v.split(None, 1) for v in parameters] 2723 2724 os_obj = objects.OS(name=name, path=os_dir, 2725 create_script=os_files[constants.OS_SCRIPT_CREATE], 2726 export_script=os_files[constants.OS_SCRIPT_EXPORT], 2727 import_script=os_files[constants.OS_SCRIPT_IMPORT], 2728 rename_script=os_files[constants.OS_SCRIPT_RENAME], 2729 verify_script=os_files.get(constants.OS_SCRIPT_VERIFY, 2730 None), 2731 supported_variants=variants, 2732 supported_parameters=parameters, 2733 api_versions=api_versions) 2734 return True, os_obj
2735
2736 2737 -def OSFromDisk(name, base_dir=None):
2738 """Create an OS instance from disk. 2739 2740 This function will return an OS instance if the given name is a 2741 valid OS name. Otherwise, it will raise an appropriate 2742 L{RPCFail} exception, detailing why this is not a valid OS. 2743 2744 This is just a wrapper over L{_TryOSFromDisk}, which doesn't raise 2745 an exception but returns true/false status data. 2746 2747 @type base_dir: string 2748 @keyword base_dir: Base directory containing OS installations. 2749 Defaults to a search in all the OS_SEARCH_PATH dirs. 2750 @rtype: L{objects.OS} 2751 @return: the OS instance if we find a valid one 2752 @raise RPCFail: if we don't find a valid OS 2753 2754 """ 2755 name_only = objects.OS.GetName(name) 2756 status, payload = _TryOSFromDisk(name_only, base_dir) 2757 2758 if not status: 2759 _Fail(payload) 2760 2761 return payload
2762
2763 2764 -def OSCoreEnv(os_name, inst_os, os_params, debug=0):
2765 """Calculate the basic environment for an os script. 2766 2767 @type os_name: str 2768 @param os_name: full operating system name (including variant) 2769 @type inst_os: L{objects.OS} 2770 @param inst_os: operating system for which the environment is being built 2771 @type os_params: dict 2772 @param os_params: the OS parameters 2773 @type debug: integer 2774 @param debug: debug level (0 or 1, for OS Api 10) 2775 @rtype: dict 2776 @return: dict of environment variables 2777 @raise errors.BlockDeviceError: if the block device 2778 cannot be found 2779 2780 """ 2781 result = {} 2782 api_version = \ 2783 max(constants.OS_API_VERSIONS.intersection(inst_os.api_versions)) 2784 result["OS_API_VERSION"] = "%d" % api_version 2785 result["OS_NAME"] = inst_os.name 2786 result["DEBUG_LEVEL"] = "%d" % debug 2787 2788 # OS variants 2789 if api_version >= constants.OS_API_V15 and inst_os.supported_variants: 2790 variant = objects.OS.GetVariant(os_name) 2791 if not variant: 2792 variant = inst_os.supported_variants[0] 2793 else: 2794 variant = "" 2795 result["OS_VARIANT"] = variant 2796 2797 # OS params 2798 for pname, pvalue in os_params.items(): 2799 result["OSP_%s" % pname.upper()] = pvalue 2800 2801 # Set a default path otherwise programs called by OS scripts (or 2802 # even hooks called from OS scripts) might break, and we don't want 2803 # to have each script require setting a PATH variable 2804 result["PATH"] = constants.HOOKS_PATH 2805 2806 return result
2807
2808 2809 -def OSEnvironment(instance, inst_os, debug=0):
2810 """Calculate the environment for an os script. 2811 2812 @type instance: L{objects.Instance} 2813 @param instance: target instance for the os script run 2814 @type inst_os: L{objects.OS} 2815 @param inst_os: operating system for which the environment is being built 2816 @type debug: integer 2817 @param debug: debug level (0 or 1, for OS Api 10) 2818 @rtype: dict 2819 @return: dict of environment variables 2820 @raise errors.BlockDeviceError: if the block device 2821 cannot be found 2822 2823 """ 2824 result = OSCoreEnv(instance.os, inst_os, instance.osparams, debug=debug) 2825 2826 for attr in ["name", "os", "uuid", "ctime", "mtime", "primary_node"]: 2827 result["INSTANCE_%s" % attr.upper()] = str(getattr(instance, attr)) 2828 2829 result["HYPERVISOR"] = instance.hypervisor 2830 result["DISK_COUNT"] = "%d" % len(instance.disks) 2831 result["NIC_COUNT"] = "%d" % len(instance.nics) 2832 result["INSTANCE_SECONDARY_NODES"] = \ 2833 ("%s" % " ".join(instance.secondary_nodes)) 2834 2835 # Disks 2836 for idx, disk in enumerate(instance.disks): 2837 real_disk = _OpenRealBD(disk) 2838 result["DISK_%d_PATH" % idx] = real_disk.dev_path 2839 result["DISK_%d_ACCESS" % idx] = disk.mode 2840 result["DISK_%d_UUID" % idx] = disk.uuid 2841 if disk.name: 2842 result["DISK_%d_NAME" % idx] = disk.name 2843 if constants.HV_DISK_TYPE in instance.hvparams: 2844 result["DISK_%d_FRONTEND_TYPE" % idx] = \ 2845 instance.hvparams[constants.HV_DISK_TYPE] 2846 if disk.dev_type in constants.DTS_BLOCK: 2847 result["DISK_%d_BACKEND_TYPE" % idx] = "block" 2848 elif disk.dev_type in [constants.DT_FILE, constants.DT_SHARED_FILE]: 2849 result["DISK_%d_BACKEND_TYPE" % idx] = \ 2850 "file:%s" % disk.physical_id[0] 2851 2852 # NICs 2853 for idx, nic in enumerate(instance.nics): 2854 result["NIC_%d_MAC" % idx] = nic.mac 2855 result["NIC_%d_UUID" % idx] = nic.uuid 2856 if nic.name: 2857 result["NIC_%d_NAME" % idx] = nic.name 2858 if nic.ip: 2859 result["NIC_%d_IP" % idx] = nic.ip 2860 result["NIC_%d_MODE" % idx] = nic.nicparams[constants.NIC_MODE] 2861 if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED: 2862 result["NIC_%d_BRIDGE" % idx] = nic.nicparams[constants.NIC_LINK] 2863 if nic.nicparams[constants.NIC_LINK]: 2864 result["NIC_%d_LINK" % idx] = nic.nicparams[constants.NIC_LINK] 2865 if nic.netinfo: 2866 nobj = objects.Network.FromDict(nic.netinfo) 2867 result.update(nobj.HooksDict("NIC_%d_" % idx)) 2868 if constants.HV_NIC_TYPE in instance.hvparams: 2869 result["NIC_%d_FRONTEND_TYPE" % idx] = \ 2870 instance.hvparams[constants.HV_NIC_TYPE] 2871 2872 # HV/BE params 2873 for source, kind in [(instance.beparams, "BE"), (instance.hvparams, "HV")]: 2874 for key, value in source.items(): 2875 result["INSTANCE_%s_%s" % (kind, key)] = str(value) 2876 2877 return result
2878
2879 2880 -def DiagnoseExtStorage(top_dirs=None):
2881 """Compute the validity for all ExtStorage Providers. 2882 2883 @type top_dirs: list 2884 @param top_dirs: the list of directories in which to 2885 search (if not given defaults to 2886 L{pathutils.ES_SEARCH_PATH}) 2887 @rtype: list of L{objects.ExtStorage} 2888 @return: a list of tuples (name, path, status, diagnose, parameters) 2889 for all (potential) ExtStorage Providers under all 2890 search paths, where: 2891 - name is the (potential) ExtStorage Provider 2892 - path is the full path to the ExtStorage Provider 2893 - status True/False is the validity of the ExtStorage Provider 2894 - diagnose is the error message for an invalid ExtStorage Provider, 2895 otherwise empty 2896 - parameters is a list of (name, help) parameters, if any 2897 2898 """ 2899 if top_dirs is None: 2900 top_dirs = pathutils.ES_SEARCH_PATH 2901 2902 result = [] 2903 for dir_name in top_dirs: 2904 if os.path.isdir(dir_name): 2905 try: 2906 f_names = utils.ListVisibleFiles(dir_name) 2907 except EnvironmentError, err: 2908 logging.exception("Can't list the ExtStorage directory %s: %s", 2909 dir_name, err) 2910 break 2911 for name in f_names: 2912 es_path = utils.PathJoin(dir_name, name) 2913 status, es_inst = bdev.ExtStorageFromDisk(name, base_dir=dir_name) 2914 if status: 2915 diagnose = "" 2916 parameters = es_inst.supported_parameters 2917 else: 2918 diagnose = es_inst 2919 parameters = [] 2920 result.append((name, es_path, status, diagnose, parameters)) 2921 2922 return result
2923
2924 2925 -def BlockdevGrow(disk, amount, dryrun, backingstore, excl_stor):
2926 """Grow a stack of block devices. 2927 2928 This function is called recursively, with the childrens being the 2929 first ones to resize. 2930 2931 @type disk: L{objects.Disk} 2932 @param disk: the disk to be grown 2933 @type amount: integer 2934 @param amount: the amount (in mebibytes) to grow with 2935 @type dryrun: boolean 2936 @param dryrun: whether to execute the operation in simulation mode 2937 only, without actually increasing the size 2938 @param backingstore: whether to execute the operation on backing storage 2939 only, or on "logical" storage only; e.g. DRBD is logical storage, 2940 whereas LVM, file, RBD are backing storage 2941 @rtype: (status, result) 2942 @type excl_stor: boolean 2943 @param excl_stor: Whether exclusive_storage is active 2944 @return: a tuple with the status of the operation (True/False), and 2945 the errors message if status is False 2946 2947 """ 2948 r_dev = _RecursiveFindBD(disk) 2949 if r_dev is None: 2950 _Fail("Cannot find block device %s", disk) 2951 2952 try: 2953 r_dev.Grow(amount, dryrun, backingstore, excl_stor) 2954 except errors.BlockDeviceError, err: 2955 _Fail("Failed to grow block device: %s", err, exc=True)
2956
2957 2958 -def BlockdevSnapshot(disk):
2959 """Create a snapshot copy of a block device. 2960 2961 This function is called recursively, and the snapshot is actually created 2962 just for the leaf lvm backend device. 2963 2964 @type disk: L{objects.Disk} 2965 @param disk: the disk to be snapshotted 2966 @rtype: string 2967 @return: snapshot disk ID as (vg, lv) 2968 2969 """ 2970 if disk.dev_type == constants.DT_DRBD8: 2971 if not disk.children: 2972 _Fail("DRBD device '%s' without backing storage cannot be snapshotted", 2973 disk.unique_id) 2974 return BlockdevSnapshot(disk.children[0]) 2975 elif disk.dev_type == constants.DT_PLAIN: 2976 r_dev = _RecursiveFindBD(disk) 2977 if r_dev is not None: 2978 # FIXME: choose a saner value for the snapshot size 2979 # let's stay on the safe side and ask for the full size, for now 2980 return r_dev.Snapshot(disk.size) 2981 else: 2982 _Fail("Cannot find block device %s", disk) 2983 else: 2984 _Fail("Cannot snapshot non-lvm block device '%s' of type '%s'", 2985 disk.unique_id, disk.dev_type)
2986
2987 2988 -def BlockdevSetInfo(disk, info):
2989 """Sets 'metadata' information on block devices. 2990 2991 This function sets 'info' metadata on block devices. Initial 2992 information is set at device creation; this function should be used 2993 for example after renames. 2994 2995 @type disk: L{objects.Disk} 2996 @param disk: the disk to be grown 2997 @type info: string 2998 @param info: new 'info' metadata 2999 @rtype: (status, result) 3000 @return: a tuple with the status of the operation (True/False), and 3001 the errors message if status is False 3002 3003 """ 3004 r_dev = _RecursiveFindBD(disk) 3005 if r_dev is None: 3006 _Fail("Cannot find block device %s", disk) 3007 3008 try: 3009 r_dev.SetInfo(info) 3010 except errors.BlockDeviceError, err: 3011 _Fail("Failed to set information on block device: %s", err, exc=True)
3012
3013 3014 -def FinalizeExport(instance, snap_disks):
3015 """Write out the export configuration information. 3016 3017 @type instance: L{objects.Instance} 3018 @param instance: the instance which we export, used for 3019 saving configuration 3020 @type snap_disks: list of L{objects.Disk} 3021 @param snap_disks: list of snapshot block devices, which 3022 will be used to get the actual name of the dump file 3023 3024 @rtype: None 3025 3026 """ 3027 destdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name + ".new") 3028 finaldestdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name) 3029 3030 config = objects.SerializableConfigParser() 3031 3032 config.add_section(constants.INISECT_EXP) 3033 config.set(constants.INISECT_EXP, "version", "0") 3034 config.set(constants.INISECT_EXP, "timestamp", "%d" % int(time.time())) 3035 config.set(constants.INISECT_EXP, "source", instance.primary_node) 3036 config.set(constants.INISECT_EXP, "os", instance.os) 3037 config.set(constants.INISECT_EXP, "compression", "none") 3038 3039 config.add_section(constants.INISECT_INS) 3040 config.set(constants.INISECT_INS, "name", instance.name) 3041 config.set(constants.INISECT_INS, "maxmem", "%d" % 3042 instance.beparams[constants.BE_MAXMEM]) 3043 config.set(constants.INISECT_INS, "minmem", "%d" % 3044 instance.beparams[constants.BE_MINMEM]) 3045 # "memory" is deprecated, but useful for exporting to old ganeti versions 3046 config.set(constants.INISECT_INS, "memory", "%d" % 3047 instance.beparams[constants.BE_MAXMEM]) 3048 config.set(constants.INISECT_INS, "vcpus", "%d" % 3049 instance.beparams[constants.BE_VCPUS]) 3050 config.set(constants.INISECT_INS, "disk_template", instance.disk_template) 3051 config.set(constants.INISECT_INS, "hypervisor", instance.hypervisor) 3052 config.set(constants.INISECT_INS, "tags", " ".join(instance.GetTags())) 3053 3054 nic_total = 0 3055 for nic_count, nic in enumerate(instance.nics): 3056 nic_total += 1 3057 config.set(constants.INISECT_INS, "nic%d_mac" % 3058 nic_count, "%s" % nic.mac) 3059 config.set(constants.INISECT_INS, "nic%d_ip" % nic_count, "%s" % nic.ip) 3060 config.set(constants.INISECT_INS, "nic%d_network" % nic_count, 3061 "%s" % nic.network) 3062 config.set(constants.INISECT_INS, "nic%d_name" % nic_count, 3063 "%s" % nic.name) 3064 for param in constants.NICS_PARAMETER_TYPES: 3065 config.set(constants.INISECT_INS, "nic%d_%s" % (nic_count, param), 3066 "%s" % nic.nicparams.get(param, None)) 3067 # TODO: redundant: on load can read nics until it doesn't exist 3068 config.set(constants.INISECT_INS, "nic_count", "%d" % nic_total) 3069 3070 disk_total = 0 3071 for disk_count, disk in enumerate(snap_disks): 3072 if disk: 3073 disk_total += 1 3074 config.set(constants.INISECT_INS, "disk%d_ivname" % disk_count, 3075 ("%s" % disk.iv_name)) 3076 config.set(constants.INISECT_INS, "disk%d_dump" % disk_count, 3077 ("%s" % disk.physical_id[1])) 3078 config.set(constants.INISECT_INS, "disk%d_size" % disk_count, 3079 ("%d" % disk.size)) 3080 config.set(constants.INISECT_INS, "disk%d_name" % disk_count, 3081 "%s" % disk.name) 3082 3083 config.set(constants.INISECT_INS, "disk_count", "%d" % disk_total) 3084 3085 # New-style hypervisor/backend parameters 3086 3087 config.add_section(constants.INISECT_HYP) 3088 for name, value in instance.hvparams.items(): 3089 if name not in constants.HVC_GLOBALS: 3090 config.set(constants.INISECT_HYP, name, str(value)) 3091 3092 config.add_section(constants.INISECT_BEP) 3093 for name, value in instance.beparams.items(): 3094 config.set(constants.INISECT_BEP, name, str(value)) 3095 3096 config.add_section(constants.INISECT_OSP) 3097 for name, value in instance.osparams.items(): 3098 config.set(constants.INISECT_OSP, name, str(value)) 3099 3100 utils.WriteFile(utils.PathJoin(destdir, constants.EXPORT_CONF_FILE), 3101 data=config.Dumps()) 3102 shutil.rmtree(finaldestdir, ignore_errors=True) 3103 shutil.move(destdir, finaldestdir)
3104
3105 3106 -def ExportInfo(dest):
3107 """Get export configuration information. 3108 3109 @type dest: str 3110 @param dest: directory containing the export 3111 3112 @rtype: L{objects.SerializableConfigParser} 3113 @return: a serializable config file containing the 3114 export info 3115 3116 """ 3117 cff = utils.PathJoin(dest, constants.EXPORT_CONF_FILE) 3118 3119 config = objects.SerializableConfigParser() 3120 config.read(cff) 3121 3122 if (not config.has_section(constants.INISECT_EXP) or 3123 not config.has_section(constants.INISECT_INS)): 3124 _Fail("Export info file doesn't have the required fields") 3125 3126 return config.Dumps()
3127
3128 3129 -def ListExports():
3130 """Return a list of exports currently available on this machine. 3131 3132 @rtype: list 3133 @return: list of the exports 3134 3135 """ 3136 if os.path.isdir(pathutils.EXPORT_DIR): 3137 return sorted(utils.ListVisibleFiles(pathutils.EXPORT_DIR)) 3138 else: 3139 _Fail("No exports directory")
3140
3141 3142 -def RemoveExport(export):
3143 """Remove an existing export from the node. 3144 3145 @type export: str 3146 @param export: the name of the export to remove 3147 @rtype: None 3148 3149 """ 3150 target = utils.PathJoin(pathutils.EXPORT_DIR, export) 3151 3152 try: 3153 shutil.rmtree(target) 3154 except EnvironmentError, err: 3155 _Fail("Error while removing the export: %s", err, exc=True)
3156
3157 3158 -def BlockdevRename(devlist):
3159 """Rename a list of block devices. 3160 3161 @type devlist: list of tuples 3162 @param devlist: list of tuples of the form (disk, 3163 new_logical_id, new_physical_id); disk is an 3164 L{objects.Disk} object describing the current disk, 3165 and new logical_id/physical_id is the name we 3166 rename it to 3167 @rtype: boolean 3168 @return: True if all renames succeeded, False otherwise 3169 3170 """ 3171 msgs = [] 3172 result = True 3173 for disk, unique_id in devlist: 3174 dev = _RecursiveFindBD(disk) 3175 if dev is None: 3176 msgs.append("Can't find device %s in rename" % str(disk)) 3177 result = False 3178 continue 3179 try: 3180 old_rpath = dev.dev_path 3181 dev.Rename(unique_id) 3182 new_rpath = dev.dev_path 3183 if old_rpath != new_rpath: 3184 DevCacheManager.RemoveCache(old_rpath) 3185 # FIXME: we should add the new cache information here, like: 3186 # DevCacheManager.UpdateCache(new_rpath, owner, ...) 3187 # but we don't have the owner here - maybe parse from existing 3188 # cache? for now, we only lose lvm data when we rename, which 3189 # is less critical than DRBD or MD 3190 except errors.BlockDeviceError, err: 3191 msgs.append("Can't rename device '%s' to '%s': %s" % 3192 (dev, unique_id, err)) 3193 logging.exception("Can't rename device '%s' to '%s'", dev, unique_id) 3194 result = False 3195 if not result: 3196 _Fail("; ".join(msgs))
3197
3198 3199 -def _TransformFileStorageDir(fs_dir):
3200 """Checks whether given file_storage_dir is valid. 3201 3202 Checks wheter the given fs_dir is within the cluster-wide default 3203 file_storage_dir or the shared_file_storage_dir, which are stored in 3204 SimpleStore. Only paths under those directories are allowed. 3205 3206 @type fs_dir: str 3207 @param fs_dir: the path to check 3208 3209 @return: the normalized path if valid, None otherwise 3210 3211 """ 3212 filestorage.CheckFileStoragePath(fs_dir) 3213 3214 return os.path.normpath(fs_dir)
3215
3216 3217 -def CreateFileStorageDir(file_storage_dir):
3218 """Create file storage directory. 3219 3220 @type file_storage_dir: str 3221 @param file_storage_dir: directory to create 3222 3223 @rtype: tuple 3224 @return: tuple with first element a boolean indicating wheter dir 3225 creation was successful or not 3226 3227 """ 3228 file_storage_dir = _TransformFileStorageDir(file_storage_dir) 3229 if os.path.exists(file_storage_dir): 3230 if not os.path.isdir(file_storage_dir): 3231 _Fail("Specified storage dir '%s' is not a directory", 3232 file_storage_dir) 3233 else: 3234 try: 3235 os.makedirs(file_storage_dir, 0750) 3236 except OSError, err: 3237 _Fail("Cannot create file storage directory '%s': %s", 3238 file_storage_dir, err, exc=True)
3239
3240 3241 -def RemoveFileStorageDir(file_storage_dir):
3242 """Remove file storage directory. 3243 3244 Remove it only if it's empty. If not log an error and return. 3245 3246 @type file_storage_dir: str 3247 @param file_storage_dir: the directory we should cleanup 3248 @rtype: tuple (success,) 3249 @return: tuple of one element, C{success}, denoting 3250 whether the operation was successful 3251 3252 """ 3253 file_storage_dir = _TransformFileStorageDir(file_storage_dir) 3254 if os.path.exists(file_storage_dir): 3255 if not os.path.isdir(file_storage_dir): 3256 _Fail("Specified Storage directory '%s' is not a directory", 3257 file_storage_dir) 3258 # deletes dir only if empty, otherwise we want to fail the rpc call 3259 try: 3260 os.rmdir(file_storage_dir) 3261 except OSError, err: 3262 _Fail("Cannot remove file storage directory '%s': %s", 3263 file_storage_dir, err)
3264
3265 3266 -def RenameFileStorageDir(old_file_storage_dir, new_file_storage_dir):
3267 """Rename the file storage directory. 3268 3269 @type old_file_storage_dir: str 3270 @param old_file_storage_dir: the current path 3271 @type new_file_storage_dir: str 3272 @param new_file_storage_dir: the name we should rename to 3273 @rtype: tuple (success,) 3274 @return: tuple of one element, C{success}, denoting 3275 whether the operation was successful 3276 3277 """ 3278 old_file_storage_dir = _TransformFileStorageDir(old_file_storage_dir) 3279 new_file_storage_dir = _TransformFileStorageDir(new_file_storage_dir) 3280 if not os.path.exists(new_file_storage_dir): 3281 if os.path.isdir(old_file_storage_dir): 3282 try: 3283 os.rename(old_file_storage_dir, new_file_storage_dir) 3284 except OSError, err: 3285 _Fail("Cannot rename '%s' to '%s': %s", 3286 old_file_storage_dir, new_file_storage_dir, err) 3287 else: 3288 _Fail("Specified storage dir '%s' is not a directory", 3289 old_file_storage_dir) 3290 else: 3291 if os.path.exists(old_file_storage_dir): 3292 _Fail("Cannot rename '%s' to '%s': both locations exist", 3293 old_file_storage_dir, new_file_storage_dir)
3294
3295 3296 -def _EnsureJobQueueFile(file_name):
3297 """Checks whether the given filename is in the queue directory. 3298 3299 @type file_name: str 3300 @param file_name: the file name we should check 3301 @rtype: None 3302 @raises RPCFail: if the file is not valid 3303 3304 """ 3305 if not utils.IsBelowDir(pathutils.QUEUE_DIR, file_name): 3306 _Fail("Passed job queue file '%s' does not belong to" 3307 " the queue directory '%s'", file_name, pathutils.QUEUE_DIR)
3308
3309 3310 -def JobQueueUpdate(file_name, content):
3311 """Updates a file in the queue directory. 3312 3313 This is just a wrapper over L{utils.io.WriteFile}, with proper 3314 checking. 3315 3316 @type file_name: str 3317 @param file_name: the job file name 3318 @type content: str 3319 @param content: the new job contents 3320 @rtype: boolean 3321 @return: the success of the operation 3322 3323 """ 3324 file_name = vcluster.LocalizeVirtualPath(file_name) 3325 3326 _EnsureJobQueueFile(file_name) 3327 getents = runtime.GetEnts() 3328 3329 # Write and replace the file atomically 3330 utils.WriteFile(file_name, data=_Decompress(content), uid=getents.masterd_uid, 3331 gid=getents.daemons_gid, mode=constants.JOB_QUEUE_FILES_PERMS)
3332
3333 3334 -def JobQueueRename(old, new):
3335 """Renames a job queue file. 3336 3337 This is just a wrapper over os.rename with proper checking. 3338 3339 @type old: str 3340 @param old: the old (actual) file name 3341 @type new: str 3342 @param new: the desired file name 3343 @rtype: tuple 3344 @return: the success of the operation and payload 3345 3346 """ 3347 old = vcluster.LocalizeVirtualPath(old) 3348 new = vcluster.LocalizeVirtualPath(new) 3349 3350 _EnsureJobQueueFile(old) 3351 _EnsureJobQueueFile(new) 3352 3353 getents = runtime.GetEnts() 3354 3355 utils.RenameFile(old, new, mkdir=True, mkdir_mode=0750, 3356 dir_uid=getents.masterd_uid, dir_gid=getents.daemons_gid)
3357
3358 3359 -def BlockdevClose(instance_name, disks):
3360 """Closes the given block devices. 3361 3362 This means they will be switched to secondary mode (in case of 3363 DRBD). 3364 3365 @param instance_name: if the argument is not empty, the symlinks 3366 of this instance will be removed 3367 @type disks: list of L{objects.Disk} 3368 @param disks: the list of disks to be closed 3369 @rtype: tuple (success, message) 3370 @return: a tuple of success and message, where success 3371 indicates the succes of the operation, and message 3372 which will contain the error details in case we 3373 failed 3374 3375 """ 3376 bdevs = [] 3377 for cf in disks: 3378 rd = _RecursiveFindBD(cf) 3379 if rd is None: 3380 _Fail("Can't find device %s", cf) 3381 bdevs.append(rd) 3382 3383 msg = [] 3384 for rd in bdevs: 3385 try: 3386 rd.Close() 3387 except errors.BlockDeviceError, err: 3388 msg.append(str(err)) 3389 if msg: 3390 _Fail("Can't make devices secondary: %s", ",".join(msg)) 3391 else: 3392 if instance_name: 3393 _RemoveBlockDevLinks(instance_name, disks)
3394
3395 3396 -def ValidateHVParams(hvname, hvparams):
3397 """Validates the given hypervisor parameters. 3398 3399 @type hvname: string 3400 @param hvname: the hypervisor name 3401 @type hvparams: dict 3402 @param hvparams: the hypervisor parameters to be validated 3403 @rtype: None 3404 3405 """ 3406 try: 3407 hv_type = hypervisor.GetHypervisor(hvname) 3408 hv_type.ValidateParameters(hvparams) 3409 except errors.HypervisorError, err: 3410 _Fail(str(err), log=False)
3411
3412 3413 -def _CheckOSPList(os_obj, parameters):
3414 """Check whether a list of parameters is supported by the OS. 3415 3416 @type os_obj: L{objects.OS} 3417 @param os_obj: OS object to check 3418 @type parameters: list 3419 @param parameters: the list of parameters to check 3420 3421 """ 3422 supported = [v[0] for v in os_obj.supported_parameters] 3423 delta = frozenset(parameters).difference(supported) 3424 if delta: 3425 _Fail("The following parameters are not supported" 3426 " by the OS %s: %s" % (os_obj.name, utils.CommaJoin(delta)))
3427
3428 3429 -def ValidateOS(required, osname, checks, osparams):
3430 """Validate the given OS' parameters. 3431 3432 @type required: boolean 3433 @param required: whether absence of the OS should translate into 3434 failure or not 3435 @type osname: string 3436 @param osname: the OS to be validated 3437 @type checks: list 3438 @param checks: list of the checks to run (currently only 'parameters') 3439 @type osparams: dict 3440 @param osparams: dictionary with OS parameters 3441 @rtype: boolean 3442 @return: True if the validation passed, or False if the OS was not 3443 found and L{required} was false 3444 3445 """ 3446 if not constants.OS_VALIDATE_CALLS.issuperset(checks): 3447 _Fail("Unknown checks required for OS %s: %s", osname, 3448 set(checks).difference(constants.OS_VALIDATE_CALLS)) 3449 3450 name_only = objects.OS.GetName(osname) 3451 status, tbv = _TryOSFromDisk(name_only, None) 3452 3453 if not status: 3454 if required: 3455 _Fail(tbv) 3456 else: 3457 return False 3458 3459 if max(tbv.api_versions) < constants.OS_API_V20: 3460 return True 3461 3462 if constants.OS_VALIDATE_PARAMETERS in checks: 3463 _CheckOSPList(tbv, osparams.keys()) 3464 3465 validate_env = OSCoreEnv(osname, tbv, osparams) 3466 result = utils.RunCmd([tbv.verify_script] + checks, env=validate_env, 3467 cwd=tbv.path, reset_env=True) 3468 if result.failed: 3469 logging.error("os validate command '%s' returned error: %s output: %s", 3470 result.cmd, result.fail_reason, result.output) 3471 _Fail("OS validation script failed (%s), output: %s", 3472 result.fail_reason, result.output, log=False) 3473 3474 return True
3475
3476 3477 -def DemoteFromMC():
3478 """Demotes the current node from master candidate role. 3479 3480 """ 3481 # try to ensure we're not the master by mistake 3482 master, myself = ssconf.GetMasterAndMyself() 3483 if master == myself: 3484 _Fail("ssconf status shows I'm the master node, will not demote") 3485 3486 result = utils.RunCmd([pathutils.DAEMON_UTIL, "check", constants.MASTERD]) 3487 if not result.failed: 3488 _Fail("The master daemon is running, will not demote") 3489 3490 try: 3491 if os.path.isfile(pathutils.CLUSTER_CONF_FILE): 3492 utils.CreateBackup(pathutils.CLUSTER_CONF_FILE) 3493 except EnvironmentError, err: 3494 if err.errno != errno.ENOENT: 3495 _Fail("Error while backing up cluster file: %s", err, exc=True) 3496 3497 utils.RemoveFile(pathutils.CLUSTER_CONF_FILE)
3498
3499 3500 -def _GetX509Filenames(cryptodir, name):
3501 """Returns the full paths for the private key and certificate. 3502 3503 """ 3504 return (utils.PathJoin(cryptodir, name), 3505 utils.PathJoin(cryptodir, name, _X509_KEY_FILE), 3506 utils.PathJoin(cryptodir, name, _X509_CERT_FILE))
3507
3508 3509 -def CreateX509Certificate(validity, cryptodir=pathutils.CRYPTO_KEYS_DIR):
3510 """Creates a new X509 certificate for SSL/TLS. 3511 3512 @type validity: int 3513 @param validity: Validity in seconds 3514 @rtype: tuple; (string, string) 3515 @return: Certificate name and public part 3516 3517 """ 3518 (key_pem, cert_pem) = \ 3519 utils.GenerateSelfSignedX509Cert(netutils.Hostname.GetSysName(),