Package ganeti :: Module cli
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cli

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
   5  # 
   6  # This program is free software; you can redistribute it and/or modify 
   7  # it under the terms of the GNU General Public License as published by 
   8  # the Free Software Foundation; either version 2 of the License, or 
   9  # (at your option) any later version. 
  10  # 
  11  # This program is distributed in the hope that it will be useful, but 
  12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
  13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
  14  # General Public License for more details. 
  15  # 
  16  # You should have received a copy of the GNU General Public License 
  17  # along with this program; if not, write to the Free Software 
  18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
  19  # 02110-1301, USA. 
  20   
  21   
  22  """Module dealing with command line parsing""" 
  23   
  24   
  25  import sys 
  26  import textwrap 
  27  import os.path 
  28  import time 
  29  import logging 
  30  import errno 
  31  import itertools 
  32  import shlex 
  33  from cStringIO import StringIO 
  34   
  35  from ganeti import utils 
  36  from ganeti import errors 
  37  from ganeti import constants 
  38  from ganeti import opcodes 
  39  from ganeti import luxi 
  40  from ganeti import ssconf 
  41  from ganeti import rpc 
  42  from ganeti import ssh 
  43  from ganeti import compat 
  44  from ganeti import netutils 
  45  from ganeti import qlang 
  46  from ganeti import objects 
  47  from ganeti import pathutils 
  48   
  49  from optparse import (OptionParser, TitledHelpFormatter, 
  50                        Option, OptionValueError) 
  51   
  52   
  53  __all__ = [ 
  54    # Command line options 
  55    "ABSOLUTE_OPT", 
  56    "ADD_UIDS_OPT", 
  57    "ADD_RESERVED_IPS_OPT", 
  58    "ALLOCATABLE_OPT", 
  59    "ALLOC_POLICY_OPT", 
  60    "ALL_OPT", 
  61    "ALLOW_FAILOVER_OPT", 
  62    "AUTO_PROMOTE_OPT", 
  63    "AUTO_REPLACE_OPT", 
  64    "BACKEND_OPT", 
  65    "BLK_OS_OPT", 
  66    "CAPAB_MASTER_OPT", 
  67    "CAPAB_VM_OPT", 
  68    "CLEANUP_OPT", 
  69    "CLUSTER_DOMAIN_SECRET_OPT", 
  70    "CONFIRM_OPT", 
  71    "CP_SIZE_OPT", 
  72    "DEBUG_OPT", 
  73    "DEBUG_SIMERR_OPT", 
  74    "DISKIDX_OPT", 
  75    "DISK_OPT", 
  76    "DISK_PARAMS_OPT", 
  77    "DISK_TEMPLATE_OPT", 
  78    "DRAINED_OPT", 
  79    "DRY_RUN_OPT", 
  80    "DRBD_HELPER_OPT", 
  81    "DST_NODE_OPT", 
  82    "EARLY_RELEASE_OPT", 
  83    "ENABLED_HV_OPT", 
  84    "ERROR_CODES_OPT", 
  85    "FAILURE_ONLY_OPT", 
  86    "FIELDS_OPT", 
  87    "FILESTORE_DIR_OPT", 
  88    "FILESTORE_DRIVER_OPT", 
  89    "FORCE_FILTER_OPT", 
  90    "FORCE_OPT", 
  91    "FORCE_VARIANT_OPT", 
  92    "GATEWAY_OPT", 
  93    "GATEWAY6_OPT", 
  94    "GLOBAL_FILEDIR_OPT", 
  95    "HID_OS_OPT", 
  96    "GLOBAL_SHARED_FILEDIR_OPT", 
  97    "HVLIST_OPT", 
  98    "HVOPTS_OPT", 
  99    "HYPERVISOR_OPT", 
 100    "IALLOCATOR_OPT", 
 101    "DEFAULT_IALLOCATOR_OPT", 
 102    "IDENTIFY_DEFAULTS_OPT", 
 103    "IGNORE_CONSIST_OPT", 
 104    "IGNORE_ERRORS_OPT", 
 105    "IGNORE_FAILURES_OPT", 
 106    "IGNORE_OFFLINE_OPT", 
 107    "IGNORE_REMOVE_FAILURES_OPT", 
 108    "IGNORE_SECONDARIES_OPT", 
 109    "IGNORE_SIZE_OPT", 
 110    "INTERVAL_OPT", 
 111    "MAC_PREFIX_OPT", 
 112    "MAINTAIN_NODE_HEALTH_OPT", 
 113    "MASTER_NETDEV_OPT", 
 114    "MASTER_NETMASK_OPT", 
 115    "MC_OPT", 
 116    "MIGRATION_MODE_OPT", 
 117    "NET_OPT", 
 118    "NETWORK_OPT", 
 119    "NETWORK6_OPT", 
 120    "NEW_CLUSTER_CERT_OPT", 
 121    "NEW_CLUSTER_DOMAIN_SECRET_OPT", 
 122    "NEW_CONFD_HMAC_KEY_OPT", 
 123    "NEW_RAPI_CERT_OPT", 
 124    "NEW_SECONDARY_OPT", 
 125    "NEW_SPICE_CERT_OPT", 
 126    "NIC_PARAMS_OPT", 
 127    "NOCONFLICTSCHECK_OPT", 
 128    "NODE_FORCE_JOIN_OPT", 
 129    "NODE_LIST_OPT", 
 130    "NODE_PLACEMENT_OPT", 
 131    "NODEGROUP_OPT", 
 132    "NODE_PARAMS_OPT", 
 133    "NODE_POWERED_OPT", 
 134    "NODRBD_STORAGE_OPT", 
 135    "NOHDR_OPT", 
 136    "NOIPCHECK_OPT", 
 137    "NO_INSTALL_OPT", 
 138    "NONAMECHECK_OPT", 
 139    "NOLVM_STORAGE_OPT", 
 140    "NOMODIFY_ETCHOSTS_OPT", 
 141    "NOMODIFY_SSH_SETUP_OPT", 
 142    "NONICS_OPT", 
 143    "NONLIVE_OPT", 
 144    "NONPLUS1_OPT", 
 145    "NORUNTIME_CHGS_OPT", 
 146    "NOSHUTDOWN_OPT", 
 147    "NOSTART_OPT", 
 148    "NOSSH_KEYCHECK_OPT", 
 149    "NOVOTING_OPT", 
 150    "NO_REMEMBER_OPT", 
 151    "NWSYNC_OPT", 
 152    "OFFLINE_INST_OPT", 
 153    "ONLINE_INST_OPT", 
 154    "ON_PRIMARY_OPT", 
 155    "ON_SECONDARY_OPT", 
 156    "OFFLINE_OPT", 
 157    "OSPARAMS_OPT", 
 158    "OS_OPT", 
 159    "OS_SIZE_OPT", 
 160    "OOB_TIMEOUT_OPT", 
 161    "POWER_DELAY_OPT", 
 162    "PREALLOC_WIPE_DISKS_OPT", 
 163    "PRIMARY_IP_VERSION_OPT", 
 164    "PRIMARY_ONLY_OPT", 
 165    "PRIORITY_OPT", 
 166    "RAPI_CERT_OPT", 
 167    "READD_OPT", 
 168    "REBOOT_TYPE_OPT", 
 169    "REMOVE_INSTANCE_OPT", 
 170    "REMOVE_RESERVED_IPS_OPT", 
 171    "REMOVE_UIDS_OPT", 
 172    "RESERVED_LVS_OPT", 
 173    "RUNTIME_MEM_OPT", 
 174    "ROMAN_OPT", 
 175    "SECONDARY_IP_OPT", 
 176    "SECONDARY_ONLY_OPT", 
 177    "SELECT_OS_OPT", 
 178    "SEP_OPT", 
 179    "SHOWCMD_OPT", 
 180    "SHOW_MACHINE_OPT", 
 181    "SHUTDOWN_TIMEOUT_OPT", 
 182    "SINGLE_NODE_OPT", 
 183    "SPECS_CPU_COUNT_OPT", 
 184    "SPECS_DISK_COUNT_OPT", 
 185    "SPECS_DISK_SIZE_OPT", 
 186    "SPECS_MEM_SIZE_OPT", 
 187    "SPECS_NIC_COUNT_OPT", 
 188    "IPOLICY_DISK_TEMPLATES", 
 189    "IPOLICY_VCPU_RATIO", 
 190    "SPICE_CACERT_OPT", 
 191    "SPICE_CERT_OPT", 
 192    "SRC_DIR_OPT", 
 193    "SRC_NODE_OPT", 
 194    "SUBMIT_OPT", 
 195    "STARTUP_PAUSED_OPT", 
 196    "STATIC_OPT", 
 197    "SYNC_OPT", 
 198    "TAG_ADD_OPT", 
 199    "TAG_SRC_OPT", 
 200    "TIMEOUT_OPT", 
 201    "TO_GROUP_OPT", 
 202    "UIDPOOL_OPT", 
 203    "USEUNITS_OPT", 
 204    "USE_EXTERNAL_MIP_SCRIPT", 
 205    "USE_REPL_NET_OPT", 
 206    "VERBOSE_OPT", 
 207    "VG_NAME_OPT", 
 208    "WFSYNC_OPT", 
 209    "YES_DOIT_OPT", 
 210    "DISK_STATE_OPT", 
 211    "HV_STATE_OPT", 
 212    "IGNORE_IPOLICY_OPT", 
 213    "INSTANCE_POLICY_OPTS", 
 214    # Generic functions for CLI programs 
 215    "ConfirmOperation", 
 216    "CreateIPolicyFromOpts", 
 217    "GenericMain", 
 218    "GenericInstanceCreate", 
 219    "GenericList", 
 220    "GenericListFields", 
 221    "GetClient", 
 222    "GetOnlineNodes", 
 223    "JobExecutor", 
 224    "JobSubmittedException", 
 225    "ParseTimespec", 
 226    "RunWhileClusterStopped", 
 227    "SubmitOpCode", 
 228    "SubmitOrSend", 
 229    "UsesRPC", 
 230    # Formatting functions 
 231    "ToStderr", "ToStdout", 
 232    "FormatError", 
 233    "FormatQueryResult", 
 234    "FormatParameterDict", 
 235    "GenerateTable", 
 236    "AskUser", 
 237    "FormatTimestamp", 
 238    "FormatLogMessage", 
 239    # Tags functions 
 240    "ListTags", 
 241    "AddTags", 
 242    "RemoveTags", 
 243    # command line options support infrastructure 
 244    "ARGS_MANY_INSTANCES", 
 245    "ARGS_MANY_NODES", 
 246    "ARGS_MANY_GROUPS", 
 247    "ARGS_MANY_NETWORKS", 
 248    "ARGS_NONE", 
 249    "ARGS_ONE_INSTANCE", 
 250    "ARGS_ONE_NODE", 
 251    "ARGS_ONE_GROUP", 
 252    "ARGS_ONE_OS", 
 253    "ARGS_ONE_NETWORK", 
 254    "ArgChoice", 
 255    "ArgCommand", 
 256    "ArgFile", 
 257    "ArgGroup", 
 258    "ArgHost", 
 259    "ArgInstance", 
 260    "ArgJobId", 
 261    "ArgNetwork", 
 262    "ArgNode", 
 263    "ArgOs", 
 264    "ArgExtStorage", 
 265    "ArgSuggest", 
 266    "ArgUnknown", 
 267    "OPT_COMPL_INST_ADD_NODES", 
 268    "OPT_COMPL_MANY_NODES", 
 269    "OPT_COMPL_ONE_IALLOCATOR", 
 270    "OPT_COMPL_ONE_INSTANCE", 
 271    "OPT_COMPL_ONE_NODE", 
 272    "OPT_COMPL_ONE_NODEGROUP", 
 273    "OPT_COMPL_ONE_NETWORK", 
 274    "OPT_COMPL_ONE_OS", 
 275    "OPT_COMPL_ONE_EXTSTORAGE", 
 276    "cli_option", 
 277    "SplitNodeOption", 
 278    "CalculateOSNames", 
 279    "ParseFields", 
 280    "COMMON_CREATE_OPTS", 
 281    ] 
 282   
 283  NO_PREFIX = "no_" 
 284  UN_PREFIX = "-" 
 285   
 286  #: Priorities (sorted) 
 287  _PRIORITY_NAMES = [ 
 288    ("low", constants.OP_PRIO_LOW), 
 289    ("normal", constants.OP_PRIO_NORMAL), 
 290    ("high", constants.OP_PRIO_HIGH), 
 291    ] 
 292   
 293  #: Priority dictionary for easier lookup 
 294  # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once 
 295  # we migrate to Python 2.6 
 296  _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES) 
 297   
 298  # Query result status for clients 
 299  (QR_NORMAL, 
 300   QR_UNKNOWN, 
 301   QR_INCOMPLETE) = range(3) 
 302   
 303  #: Maximum batch size for ChooseJob 
 304  _CHOOSE_BATCH = 25 
 305   
 306   
 307  # constants used to create InstancePolicy dictionary 
 308  TISPECS_GROUP_TYPES = { 
 309    constants.ISPECS_MIN: constants.VTYPE_INT, 
 310    constants.ISPECS_MAX: constants.VTYPE_INT, 
 311    } 
 312   
 313  TISPECS_CLUSTER_TYPES = { 
 314    constants.ISPECS_MIN: constants.VTYPE_INT, 
 315    constants.ISPECS_MAX: constants.VTYPE_INT, 
 316    constants.ISPECS_STD: constants.VTYPE_INT, 
 317    } 
 318   
 319  #: User-friendly names for query2 field types 
 320  _QFT_NAMES = { 
 321    constants.QFT_UNKNOWN: "Unknown", 
 322    constants.QFT_TEXT: "Text", 
 323    constants.QFT_BOOL: "Boolean", 
 324    constants.QFT_NUMBER: "Number", 
 325    constants.QFT_UNIT: "Storage size", 
 326    constants.QFT_TIMESTAMP: "Timestamp", 
 327    constants.QFT_OTHER: "Custom", 
 328    } 
329 330 331 -class _Argument:
332 - def __init__(self, min=0, max=None): # pylint: disable=W0622
333 self.min = min 334 self.max = max
335
336 - def __repr__(self):
337 return ("<%s min=%s max=%s>" % 338 (self.__class__.__name__, self.min, self.max))
339
340 341 -class ArgSuggest(_Argument):
342 """Suggesting argument. 343 344 Value can be any of the ones passed to the constructor. 345 346 """ 347 # pylint: disable=W0622
348 - def __init__(self, min=0, max=None, choices=None):
349 _Argument.__init__(self, min=min, max=max) 350 self.choices = choices
351
352 - def __repr__(self):
353 return ("<%s min=%s max=%s choices=%r>" % 354 (self.__class__.__name__, self.min, self.max, self.choices))
355
356 357 -class ArgChoice(ArgSuggest):
358 """Choice argument. 359 360 Value can be any of the ones passed to the constructor. Like L{ArgSuggest}, 361 but value must be one of the choices. 362 363 """
364
365 366 -class ArgUnknown(_Argument):
367 """Unknown argument to program (e.g. determined at runtime). 368 369 """
370
371 372 -class ArgInstance(_Argument):
373 """Instances argument. 374 375 """
376
377 378 -class ArgNode(_Argument):
379 """Node argument. 380 381 """
382
383 384 -class ArgNetwork(_Argument):
385 """Network argument. 386 387 """
388
389 390 -class ArgGroup(_Argument):
391 """Node group argument. 392 393 """
394
395 396 -class ArgJobId(_Argument):
397 """Job ID argument. 398 399 """
400
401 402 -class ArgFile(_Argument):
403 """File path argument. 404 405 """
406
407 408 -class ArgCommand(_Argument):
409 """Command argument. 410 411 """
412
413 414 -class ArgHost(_Argument):
415 """Host argument. 416 417 """
418
419 420 -class ArgOs(_Argument):
421 """OS argument. 422 423 """
424
425 426 -class ArgExtStorage(_Argument):
427 """ExtStorage argument. 428 429 """
430 431 432 ARGS_NONE = [] 433 ARGS_MANY_INSTANCES = [ArgInstance()] 434 ARGS_MANY_NETWORKS = [ArgNetwork()] 435 ARGS_MANY_NODES = [ArgNode()] 436 ARGS_MANY_GROUPS = [ArgGroup()] 437 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)] 438 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)] 439 ARGS_ONE_NODE = [ArgNode(min=1, max=1)] 440 # TODO 441 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)] 442 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
443 444 445 -def _ExtractTagsObject(opts, args):
446 """Extract the tag type object. 447 448 Note that this function will modify its args parameter. 449 450 """ 451 if not hasattr(opts, "tag_type"): 452 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject") 453 kind = opts.tag_type 454 if kind == constants.TAG_CLUSTER: 455 retval = kind, None 456 elif kind in (constants.TAG_NODEGROUP, 457 constants.TAG_NODE, 458 constants.TAG_NETWORK, 459 constants.TAG_INSTANCE): 460 if not args: 461 raise errors.OpPrereqError("no arguments passed to the command", 462 errors.ECODE_INVAL) 463 name = args.pop(0) 464 retval = kind, name 465 else: 466 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind) 467 return retval
468
469 470 -def _ExtendTags(opts, args):
471 """Extend the args if a source file has been given. 472 473 This function will extend the tags with the contents of the file 474 passed in the 'tags_source' attribute of the opts parameter. A file 475 named '-' will be replaced by stdin. 476 477 """ 478 fname = opts.tags_source 479 if fname is None: 480 return 481 if fname == "-": 482 new_fh = sys.stdin 483 else: 484 new_fh = open(fname, "r") 485 new_data = [] 486 try: 487 # we don't use the nice 'new_data = [line.strip() for line in fh]' 488 # because of python bug 1633941 489 while True: 490 line = new_fh.readline() 491 if not line: 492 break 493 new_data.append(line.strip()) 494 finally: 495 new_fh.close() 496 args.extend(new_data)
497
498 499 -def ListTags(opts, args):
500 """List the tags on a given object. 501 502 This is a generic implementation that knows how to deal with all 503 three cases of tag objects (cluster, node, instance). The opts 504 argument is expected to contain a tag_type field denoting what 505 object type we work on. 506 507 """ 508 kind, name = _ExtractTagsObject(opts, args) 509 cl = GetClient(query=True) 510 result = cl.QueryTags(kind, name) 511 result = list(result) 512 result.sort() 513 for tag in result: 514 ToStdout(tag)
515
516 517 -def AddTags(opts, args):
518 """Add tags on a given object. 519 520 This is a generic implementation that knows how to deal with all 521 three cases of tag objects (cluster, node, instance). The opts 522 argument is expected to contain a tag_type field denoting what 523 object type we work on. 524 525 """ 526 kind, name = _ExtractTagsObject(opts, args) 527 _ExtendTags(opts, args) 528 if not args: 529 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL) 530 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args) 531 SubmitOrSend(op, opts)
532
533 534 -def RemoveTags(opts, args):
535 """Remove tags from a given object. 536 537 This is a generic implementation that knows how to deal with all 538 three cases of tag objects (cluster, node, instance). The opts 539 argument is expected to contain a tag_type field denoting what 540 object type we work on. 541 542 """ 543 kind, name = _ExtractTagsObject(opts, args) 544 _ExtendTags(opts, args) 545 if not args: 546 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL) 547 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args) 548 SubmitOrSend(op, opts)
549
550 551 -def check_unit(option, opt, value): # pylint: disable=W0613
552 """OptParsers custom converter for units. 553 554 """ 555 try: 556 return utils.ParseUnit(value) 557 except errors.UnitParseError, err: 558 raise OptionValueError("option %s: %s" % (opt, err)) 559
560 561 -def _SplitKeyVal(opt, data):
562 """Convert a KeyVal string into a dict. 563 564 This function will convert a key=val[,...] string into a dict. Empty 565 values will be converted specially: keys which have the prefix 'no_' 566 will have the value=False and the prefix stripped, the others will 567 have value=True. 568 569 @type opt: string 570 @param opt: a string holding the option name for which we process the 571 data, used in building error messages 572 @type data: string 573 @param data: a string of the format key=val,key=val,... 574 @rtype: dict 575 @return: {key=val, key=val} 576 @raises errors.ParameterError: if there are duplicate keys 577 578 """ 579 kv_dict = {} 580 if data: 581 for elem in utils.UnescapeAndSplit(data, sep=","): 582 if "=" in elem: 583 key, val = elem.split("=", 1) 584 else: 585 if elem.startswith(NO_PREFIX): 586 key, val = elem[len(NO_PREFIX):], False 587 elif elem.startswith(UN_PREFIX): 588 key, val = elem[len(UN_PREFIX):], None 589 else: 590 key, val = elem, True 591 if key in kv_dict: 592 raise errors.ParameterError("Duplicate key '%s' in option %s" % 593 (key, opt)) 594 kv_dict[key] = val 595 return kv_dict
596
597 598 -def check_ident_key_val(option, opt, value): # pylint: disable=W0613
599 """Custom parser for ident:key=val,key=val options. 600 601 This will store the parsed values as a tuple (ident, {key: val}). As such, 602 multiple uses of this option via action=append is possible. 603 604 """ 605 if ":" not in value: 606 ident, rest = value, "" 607 else: 608 ident, rest = value.split(":", 1) 609 610 if ident.startswith(NO_PREFIX): 611 if rest: 612 msg = "Cannot pass options when removing parameter groups: %s" % value 613 raise errors.ParameterError(msg) 614 retval = (ident[len(NO_PREFIX):], False) 615 elif (ident.startswith(UN_PREFIX) and 616 (len(ident) <= len(UN_PREFIX) or 617 not ident[len(UN_PREFIX)][0].isdigit())): 618 if rest: 619 msg = "Cannot pass options when removing parameter groups: %s" % value 620 raise errors.ParameterError(msg) 621 retval = (ident[len(UN_PREFIX):], None) 622 else: 623 kv_dict = _SplitKeyVal(opt, rest) 624 retval = (ident, kv_dict) 625 return retval 626
627 628 -def check_key_val(option, opt, value): # pylint: disable=W0613
629 """Custom parser class for key=val,key=val options. 630 631 This will store the parsed values as a dict {key: val}. 632 633 """ 634 return _SplitKeyVal(opt, value) 635
636 637 -def check_bool(option, opt, value): # pylint: disable=W0613
638 """Custom parser for yes/no options. 639 640 This will store the parsed value as either True or False. 641 642 """ 643 value = value.lower() 644 if value == constants.VALUE_FALSE or value == "no": 645 return False 646 elif value == constants.VALUE_TRUE or value == "yes": 647 return True 648 else: 649 raise errors.ParameterError("Invalid boolean value '%s'" % value) 650
651 652 -def check_list(option, opt, value): # pylint: disable=W0613
653 """Custom parser for comma-separated lists. 654 655 """ 656 # we have to make this explicit check since "".split(",") is [""], 657 # not an empty list :( 658 if not value: 659 return [] 660 else: 661 return utils.UnescapeAndSplit(value) 662
663 664 -def check_maybefloat(option, opt, value): # pylint: disable=W0613
665 """Custom parser for float numbers which might be also defaults. 666 667 """ 668 value = value.lower() 669 670 if value == constants.VALUE_DEFAULT: 671 return value 672 else: 673 return float(value) 674 675 676 # completion_suggestion is normally a list. Using numeric values not evaluating 677 # to False for dynamic completion. 678 (OPT_COMPL_MANY_NODES, 679 OPT_COMPL_ONE_NODE, 680 OPT_COMPL_ONE_INSTANCE, 681 OPT_COMPL_ONE_OS, 682 OPT_COMPL_ONE_EXTSTORAGE, 683 OPT_COMPL_ONE_IALLOCATOR, 684 OPT_COMPL_ONE_NETWORK, 685 OPT_COMPL_INST_ADD_NODES, 686 OPT_COMPL_ONE_NODEGROUP) = range(100, 109) 687 688 OPT_COMPL_ALL = compat.UniqueFrozenset([ 689 OPT_COMPL_MANY_NODES, 690 OPT_COMPL_ONE_NODE, 691 OPT_COMPL_ONE_INSTANCE, 692 OPT_COMPL_ONE_OS, 693 OPT_COMPL_ONE_EXTSTORAGE, 694 OPT_COMPL_ONE_IALLOCATOR, 695 OPT_COMPL_ONE_NETWORK, 696 OPT_COMPL_INST_ADD_NODES, 697 OPT_COMPL_ONE_NODEGROUP, 698 ])
699 700 701 -class CliOption(Option):
702 """Custom option class for optparse. 703 704 """ 705 ATTRS = Option.ATTRS + [ 706 "completion_suggest", 707 ] 708 TYPES = Option.TYPES + ( 709 "identkeyval", 710 "keyval", 711 "unit", 712 "bool", 713 "list", 714 "maybefloat", 715 ) 716 TYPE_CHECKER = Option.TYPE_CHECKER.copy() 717 TYPE_CHECKER["identkeyval"] = check_ident_key_val 718 TYPE_CHECKER["keyval"] = check_key_val 719 TYPE_CHECKER["unit"] = check_unit 720 TYPE_CHECKER["bool"] = check_bool 721 TYPE_CHECKER["list"] = check_list 722 TYPE_CHECKER["maybefloat"] = check_maybefloat
723 724 725 # optparse.py sets make_option, so we do it for our own option class, too 726 cli_option = CliOption 727 728 729 _YORNO = "yes|no" 730 731 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count", 732 help="Increase debugging level") 733 734 NOHDR_OPT = cli_option("--no-headers", default=False, 735 action="store_true", dest="no_headers", 736 help="Don't display column headers") 737 738 SEP_OPT = cli_option("--separator", default=None, 739 action="store", dest="separator", 740 help=("Separator between output fields" 741 " (defaults to one space)")) 742 743 USEUNITS_OPT = cli_option("--units", default=None, 744 dest="units", choices=("h", "m", "g", "t"), 745 help="Specify units for output (one of h/m/g/t)") 746 747 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store", 748 type="string", metavar="FIELDS", 749 help="Comma separated list of output fields") 750 751 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true", 752 default=False, help="Force the operation") 753 754 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true", 755 default=False, help="Do not require confirmation") 756 757 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline", 758 action="store_true", default=False, 759 help=("Ignore offline nodes and do as much" 760 " as possible")) 761 762 TAG_ADD_OPT = cli_option("--tags", dest="tags", 763 default=None, help="Comma-separated list of instance" 764 " tags") 765 766 TAG_SRC_OPT = cli_option("--from", dest="tags_source", 767 default=None, help="File with tag names") 768 769 SUBMIT_OPT = cli_option("--submit", dest="submit_only", 770 default=False, action="store_true", 771 help=("Submit the job and return the job ID, but" 772 " don't wait for the job to finish")) 773 774 SYNC_OPT = cli_option("--sync", dest="do_locking", 775 default=False, action="store_true", 776 help=("Grab locks while doing the queries" 777 " in order to ensure more consistent results")) 778 779 DRY_RUN_OPT = cli_option("--dry-run", default=False, 780 action="store_true", 781 help=("Do not execute the operation, just run the" 782 " check steps and verify if it could be" 783 " executed")) 784 785 VERBOSE_OPT = cli_option("-v", "--verbose", default=False, 786 action="store_true", 787 help="Increase the verbosity of the operation") 788 789 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False, 790 action="store_true", dest="simulate_errors", 791 help="Debugging option that makes the operation" 792 " treat most runtime checks as failed") 793 794 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync", 795 default=True, action="store_false", 796 help="Don't wait for sync (DANGEROUS!)") 797 798 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync", 799 default=False, action="store_true", 800 help="Wait for disks to sync") 801 802 ONLINE_INST_OPT = cli_option("--online", dest="online_inst", 803 action="store_true", default=False, 804 help="Enable offline instance") 805 806 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst", 807 action="store_true", default=False, 808 help="Disable down instance") 809 810 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template", 811 help=("Custom disk setup (%s)" % 812 utils.CommaJoin(constants.DISK_TEMPLATES)), 813 default=None, metavar="TEMPL", 814 choices=list(constants.DISK_TEMPLATES)) 815 816 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true", 817 help="Do not create any network cards for" 818 " the instance") 819 820 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", 821 help="Relative path under default cluster-wide" 822 " file storage dir to store file-based disks", 823 default=None, metavar="<DIR>") 824 825 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver", 826 help="Driver to use for image files", 827 default="loop", metavar="<DRIVER>", 828 choices=list(constants.FILE_DRIVER)) 829 830 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>", 831 help="Select nodes for the instance automatically" 832 " using the <NAME> iallocator plugin", 833 default=None, type="string", 834 completion_suggest=OPT_COMPL_ONE_IALLOCATOR) 835 836 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator", 837 metavar="<NAME>", 838 help="Set the default instance" 839 " allocator plugin", 840 default=None, type="string", 841 completion_suggest=OPT_COMPL_ONE_IALLOCATOR) 842 843 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run", 844 metavar="<os>", 845 completion_suggest=OPT_COMPL_ONE_OS) 846 847 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams", 848 type="keyval", default={}, 849 help="OS parameters") 850 851 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant", 852 action="store_true", default=False, 853 help="Force an unknown variant") 854 855 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install", 856 action="store_true", default=False, 857 help="Do not install the OS (will" 858 " enable no-start)") 859 860 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes", 861 dest="allow_runtime_chgs", 862 default=True, action="store_false", 863 help="Don't allow runtime changes") 864 865 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams", 866 type="keyval", default={}, 867 help="Backend parameters") 868 869 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval", 870 default={}, dest="hvparams", 871 help="Hypervisor parameters") 872 873 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams", 874 help="Disk template parameters, in the format" 875 " template:option=value,option=value,...", 876 type="identkeyval", action="append", default=[]) 877 878 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size", 879 type="keyval", default={}, 880 help="Memory size specs: list of key=value," 881 " where key is one of min, max, std" 882 " (in MB or using a unit)") 883 884 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count", 885 type="keyval", default={}, 886 help="CPU count specs: list of key=value," 887 " where key is one of min, max, std") 888 889 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count", 890 dest="ispecs_disk_count", 891 type="keyval", default={}, 892 help="Disk count specs: list of key=value," 893 " where key is one of min, max, std") 894 895 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size", 896 type="keyval", default={}, 897 help="Disk size specs: list of key=value," 898 " where key is one of min, max, std" 899 " (in MB or using a unit)") 900 901 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count", 902 type="keyval", default={}, 903 help="NIC count specs: list of key=value," 904 " where key is one of min, max, std") 905 906 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates", 907 dest="ipolicy_disk_templates", 908 type="list", default=None, 909 help="Comma-separated list of" 910 " enabled disk templates") 911 912 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio", 913 dest="ipolicy_vcpu_ratio", 914 type="maybefloat", default=None, 915 help="The maximum allowed vcpu-to-cpu ratio") 916 917 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio", 918 dest="ipolicy_spindle_ratio", 919 type="maybefloat", default=None, 920 help=("The maximum allowed instances to" 921 " spindle ratio")) 922 923 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor", 924 help="Hypervisor and hypervisor options, in the" 925 " format hypervisor:option=value,option=value,...", 926 default=None, type="identkeyval") 927 928 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams", 929 help="Hypervisor and hypervisor options, in the" 930 " format hypervisor:option=value,option=value,...", 931 default=[], action="append", type="identkeyval") 932 933 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True, 934 action="store_false", 935 help="Don't check that the instance's IP" 936 " is alive") 937 938 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check", 939 default=True, action="store_false", 940 help="Don't check that the instance's name" 941 " is resolvable") 942 943 NET_OPT = cli_option("--net", 944 help="NIC parameters", default=[], 945 dest="nics", action="append", type="identkeyval") 946 947 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[], 948 dest="disks", action="append", type="identkeyval") 949 950 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None, 951 help="Comma-separated list of disks" 952 " indices to act on (e.g. 0,2) (optional," 953 " defaults to all disks)") 954 955 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size", 956 help="Enforces a single-disk configuration using the" 957 " given disk size, in MiB unless a suffix is used", 958 default=None, type="unit", metavar="<size>") 959 960 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency", 961 dest="ignore_consistency", 962 action="store_true", default=False, 963 help="Ignore the consistency of the disks on" 964 " the secondary") 965 966 ALLOW_FAILOVER_OPT = cli_option("--allow-failover", 967 dest="allow_failover", 968 action="store_true", default=False, 969 help="If migration is not possible fallback to" 970 " failover") 971 972 NONLIVE_OPT = cli_option("--non-live", dest="live", 973 default=True, action="store_false", 974 help="Do a non-live migration (this usually means" 975 " freeze the instance, save the state, transfer and" 976 " only then resume running on the secondary node)") 977 978 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode", 979 default=None, 980 choices=list(constants.HT_MIGRATION_MODES), 981 help="Override default migration mode (choose" 982 " either live or non-live") 983 984 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node", 985 help="Target node and optional secondary node", 986 metavar="<pnode>[:<snode>]", 987 completion_suggest=OPT_COMPL_INST_ADD_NODES) 988 989 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[], 990 action="append", metavar="<node>", 991 help="Use only this node (can be used multiple" 992 " times, if not given defaults to all nodes)", 993 completion_suggest=OPT_COMPL_ONE_NODE) 994 995 NODEGROUP_OPT_NAME = "--node-group" 996 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME, 997 dest="nodegroup", 998 help="Node group (name or uuid)", 999 metavar="<nodegroup>", 1000 default=None, type="string", 1001 completion_suggest=OPT_COMPL_ONE_NODEGROUP) 1002 1003 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node", 1004 metavar="<node>", 1005 completion_suggest=OPT_COMPL_ONE_NODE) 1006 1007 NOSTART_OPT = cli_option("--no-start", dest="start", default=True, 1008 action="store_false", 1009 help="Don't start the instance after creation") 1010 1011 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command", 1012 action="store_true", default=False, 1013 help="Show command instead of executing it") 1014 1015 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup", 1016 default=False, action="store_true", 1017 help="Instead of performing the migration, try to" 1018 " recover from a failed cleanup. This is safe" 1019 " to run even if the instance is healthy, but it" 1020 " will create extra replication traffic and " 1021 " disrupt briefly the replication (like during the" 1022 " migration") 1023 1024 STATIC_OPT = cli_option("-s", "--static", dest="static", 1025 action="store_true", default=False, 1026 help="Only show configuration data, not runtime data") 1027 1028 ALL_OPT = cli_option("--all", dest="show_all", 1029 default=False, action="store_true", 1030 help="Show info on all instances on the cluster." 1031 " This can take a long time to run, use wisely") 1032 1033 SELECT_OS_OPT = cli_option("--select-os", dest="select_os", 1034 action="store_true", default=False, 1035 help="Interactive OS reinstall, lists available" 1036 " OS templates for selection") 1037 1038 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures", 1039 action="store_true", default=False, 1040 help="Remove the instance from the cluster" 1041 " configuration even if there are failures" 1042 " during the removal process") 1043 1044 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures", 1045 dest="ignore_remove_failures", 1046 action="store_true", default=False, 1047 help="Remove the instance from the" 1048 " cluster configuration even if there" 1049 " are failures during the removal" 1050 " process") 1051 1052 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance", 1053 action="store_true", default=False, 1054 help="Remove the instance from the cluster") 1055 1056 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node", 1057 help="Specifies the new node for the instance", 1058 metavar="NODE", default=None, 1059 completion_suggest=OPT_COMPL_ONE_NODE) 1060 1061 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node", 1062 help="Specifies the new secondary node", 1063 metavar="NODE", default=None, 1064 completion_suggest=OPT_COMPL_ONE_NODE) 1065 1066 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary", 1067 default=False, action="store_true", 1068 help="Replace the disk(s) on the primary" 1069 " node (applies only to internally mirrored" 1070 " disk templates, e.g. %s)" % 1071 utils.CommaJoin(constants.DTS_INT_MIRROR)) 1072 1073 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary", 1074 default=False, action="store_true", 1075 help="Replace the disk(s) on the secondary" 1076 " node (applies only to internally mirrored" 1077 " disk templates, e.g. %s)" % 1078 utils.CommaJoin(constants.DTS_INT_MIRROR)) 1079 1080 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote", 1081 default=False, action="store_true", 1082 help="Lock all nodes and auto-promote as needed" 1083 " to MC status") 1084 1085 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto", 1086 default=False, action="store_true", 1087 help="Automatically replace faulty disks" 1088 " (applies only to internally mirrored" 1089 " disk templates, e.g. %s)" % 1090 utils.CommaJoin(constants.DTS_INT_MIRROR)) 1091 1092 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size", 1093 default=False, action="store_true", 1094 help="Ignore current recorded size" 1095 " (useful for forcing activation when" 1096 " the recorded size is wrong)") 1097 1098 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node", 1099 metavar="<node>", 1100 completion_suggest=OPT_COMPL_ONE_NODE) 1101 1102 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory", 1103 metavar="<dir>") 1104 1105 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip", 1106 help="Specify the secondary ip for the node", 1107 metavar="ADDRESS", default=None) 1108 1109 READD_OPT = cli_option("--readd", dest="readd", 1110 default=False, action="store_true", 1111 help="Readd old node after replacing it") 1112 1113 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check", 1114 default=True, action="store_false", 1115 help="Disable SSH key fingerprint checking") 1116 1117 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join", 1118 default=False, action="store_true", 1119 help="Force the joining of a node") 1120 1121 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate", 1122 type="bool", default=None, metavar=_YORNO, 1123 help="Set the master_candidate flag on the node") 1124 1125 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO, 1126 type="bool", default=None, 1127 help=("Set the offline flag on the node" 1128 " (cluster does not communicate with offline" 1129 " nodes)")) 1130 1131 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO, 1132 type="bool", default=None, 1133 help=("Set the drained flag on the node" 1134 " (excluded from allocation operations)")) 1135 1136 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable", 1137 type="bool", default=None, metavar=_YORNO, 1138 help="Set the master_capable flag on the node") 1139 1140 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable", 1141 type="bool", default=None, metavar=_YORNO, 1142 help="Set the vm_capable flag on the node") 1143 1144 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable", 1145 type="bool", default=None, metavar=_YORNO, 1146 help="Set the allocatable flag on a volume") 1147 1148 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage", 1149 help="Disable support for lvm based instances" 1150 " (cluster-wide)", 1151 action="store_false", default=True) 1152 1153 ENABLED_HV_OPT = cli_option("--enabled-hypervisors", 1154 dest="enabled_hypervisors", 1155 help="Comma-separated list of hypervisors", 1156 type="string", default=None) 1157 1158 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams", 1159 type="keyval", default={}, 1160 help="NIC parameters") 1161 1162 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None, 1163 dest="candidate_pool_size", type="int", 1164 help="Set the candidate pool size") 1165 1166 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name", 1167 help=("Enables LVM and specifies the volume group" 1168 " name (cluster-wide) for disk allocation" 1169 " [%s]" % constants.DEFAULT_VG), 1170 metavar="VG", default=None) 1171 1172 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it", 1173 help="Destroy cluster", action="store_true") 1174 1175 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting", 1176 help="Skip node agreement check (dangerous)", 1177 action="store_true", default=False) 1178 1179 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix", 1180 help="Specify the mac prefix for the instance IP" 1181 " addresses, in the format XX:XX:XX", 1182 metavar="PREFIX", 1183 default=None) 1184 1185 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev", 1186 help="Specify the node interface (cluster-wide)" 1187 " on which the master IP address will be added" 1188 " (cluster init default: %s)" % 1189 constants.DEFAULT_BRIDGE, 1190 metavar="NETDEV", 1191 default=None) 1192 1193 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask", 1194 help="Specify the netmask of the master IP", 1195 metavar="NETMASK", 1196 default=None) 1197 1198 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script", 1199 dest="use_external_mip_script", 1200 help="Specify whether to run a" 1201 " user-provided script for the master" 1202 " IP address turnup and" 1203 " turndown operations", 1204 type="bool", metavar=_YORNO, default=None) 1205 1206 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", 1207 help="Specify the default directory (cluster-" 1208 "wide) for storing the file-based disks [%s]" % 1209 pathutils.DEFAULT_FILE_STORAGE_DIR, 1210 metavar="DIR", 1211 default=pathutils.DEFAULT_FILE_STORAGE_DIR) 1212 1213 GLOBAL_SHARED_FILEDIR_OPT = cli_option( 1214 "--shared-file-storage-dir", 1215 dest="shared_file_storage_dir", 1216 help="Specify the default directory (cluster-wide) for storing the" 1217 " shared file-based disks [%s]" % 1218 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR, 1219 metavar="SHAREDDIR", default=pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR) 1220 1221 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts", 1222 help="Don't modify %s" % pathutils.ETC_HOSTS, 1223 action="store_false", default=True) 1224 1225 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup", 1226 help="Don't initialize SSH keys", 1227 action="store_false", default=True) 1228 1229 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes", 1230 help="Enable parseable error messages", 1231 action="store_true", default=False) 1232 1233 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem", 1234 help="Skip N+1 memory redundancy tests", 1235 action="store_true", default=False) 1236 1237 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type", 1238 help="Type of reboot: soft/hard/full", 1239 default=constants.INSTANCE_REBOOT_HARD, 1240 metavar="<REBOOT>", 1241 choices=list(constants.REBOOT_TYPES)) 1242 1243 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries", 1244 dest="ignore_secondaries", 1245 default=False, action="store_true", 1246 help="Ignore errors from secondaries") 1247 1248 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown", 1249 action="store_false", default=True, 1250 help="Don't shutdown the instance (unsafe)") 1251 1252 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int", 1253 default=constants.DEFAULT_SHUTDOWN_TIMEOUT, 1254 help="Maximum time to wait") 1255 1256 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout", 1257 dest="shutdown_timeout", type="int", 1258 default=constants.DEFAULT_SHUTDOWN_TIMEOUT, 1259 help="Maximum time to wait for instance" 1260 " shutdown") 1261 1262 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int", 1263 default=None, 1264 help=("Number of seconds between repetions of the" 1265 " command")) 1266 1267 EARLY_RELEASE_OPT = cli_option("--early-release", 1268 dest="early_release", default=False, 1269 action="store_true", 1270 help="Release the locks on the secondary" 1271 " node(s) early") 1272 1273 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate", 1274 dest="new_cluster_cert", 1275 default=False, action="store_true", 1276 help="Generate a new cluster certificate") 1277 1278 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert", 1279 default=None, 1280 help="File containing new RAPI certificate") 1281 1282 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert", 1283 default=None, action="store_true", 1284 help=("Generate a new self-signed RAPI" 1285 " certificate")) 1286 1287 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert", 1288 default=None, 1289 help="File containing new SPICE certificate") 1290 1291 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert", 1292 default=None, 1293 help="File containing the certificate of the CA" 1294 " which signed the SPICE certificate") 1295 1296 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate", 1297 dest="new_spice_cert", default=None, 1298 action="store_true", 1299 help=("Generate a new self-signed SPICE" 1300 " certificate")) 1301 1302 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key", 1303 dest="new_confd_hmac_key", 1304 default=False, action="store_true", 1305 help=("Create a new HMAC key for %s" % 1306 constants.CONFD)) 1307 1308 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret", 1309 dest="cluster_domain_secret", 1310 default=None, 1311 help=("Load new new cluster domain" 1312 " secret from file")) 1313 1314 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret", 1315 dest="new_cluster_domain_secret", 1316 default=False, action="store_true", 1317 help=("Create a new cluster domain" 1318 " secret")) 1319 1320 USE_REPL_NET_OPT = cli_option("--use-replication-network", 1321 dest="use_replication_network", 1322 help="Whether to use the replication network" 1323 " for talking to the nodes", 1324 action="store_true", default=False) 1325 1326 MAINTAIN_NODE_HEALTH_OPT = \ 1327 cli_option("--maintain-node-health", dest="maintain_node_health", 1328 metavar=_YORNO, default=None, type="bool", 1329 help="Configure the cluster to automatically maintain node" 1330 " health, by shutting down unknown instances, shutting down" 1331 " unknown DRBD devices, etc.") 1332 1333 IDENTIFY_DEFAULTS_OPT = \ 1334 cli_option("--identify-defaults", dest="identify_defaults", 1335 default=False, action="store_true", 1336 help="Identify which saved instance parameters are equal to" 1337 " the current cluster defaults and set them as such, instead" 1338 " of marking them as overridden") 1339 1340 UIDPOOL_OPT = cli_option("--uid-pool", default=None, 1341 action="store", dest="uid_pool", 1342 help=("A list of user-ids or user-id" 1343 " ranges separated by commas")) 1344 1345 ADD_UIDS_OPT = cli_option("--add-uids", default=None, 1346 action="store", dest="add_uids", 1347 help=("A list of user-ids or user-id" 1348 " ranges separated by commas, to be" 1349 " added to the user-id pool")) 1350 1351 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None, 1352 action="store", dest="remove_uids", 1353 help=("A list of user-ids or user-id" 1354 " ranges separated by commas, to be" 1355 " removed from the user-id pool")) 1356 1357 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None, 1358 action="store", dest="reserved_lvs", 1359 help=("A comma-separated list of reserved" 1360 " logical volumes names, that will be" 1361 " ignored by cluster verify")) 1362 1363 ROMAN_OPT = cli_option("--roman", 1364 dest="roman_integers", default=False, 1365 action="store_true", 1366 help="Use roman numbers for positive integers") 1367 1368 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper", 1369 action="store", default=None, 1370 help="Specifies usermode helper for DRBD") 1371 1372 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage", 1373 action="store_false", default=True, 1374 help="Disable support for DRBD") 1375 1376 PRIMARY_IP_VERSION_OPT = \ 1377 cli_option("--primary-ip-version", default=constants.IP4_VERSION, 1378 action="store", dest="primary_ip_version", 1379 metavar="%d|%d" % (constants.IP4_VERSION, 1380 constants.IP6_VERSION), 1381 help="Cluster-wide IP version for primary IP") 1382 1383 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False, 1384 action="store_true", 1385 help="Show machine name for every line in output") 1386 1387 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False, 1388 action="store_true", 1389 help=("Hide successful results and show failures" 1390 " only (determined by the exit code)"))
1391 1392 1393 -def _PriorityOptionCb(option, _, value, parser):
1394 """Callback for processing C{--priority} option. 1395 1396 """ 1397 value = _PRIONAME_TO_VALUE[value] 1398 1399 setattr(parser.values, option.dest, value)
1400 1401 1402 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority", 1403 metavar="|".join(name for name, _ in _PRIORITY_NAMES), 1404 choices=_PRIONAME_TO_VALUE.keys(), 1405 action="callback", type="choice", 1406 callback=_PriorityOptionCb, 1407 help="Priority for opcode processing") 1408 1409 HID_OS_OPT = cli_option("--hidden", dest="hidden", 1410 type="bool", default=None, metavar=_YORNO, 1411 help="Sets the hidden flag on the OS") 1412 1413 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted", 1414 type="bool", default=None, metavar=_YORNO, 1415 help="Sets the blacklisted flag on the OS") 1416 1417 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None, 1418 type="bool", metavar=_YORNO, 1419 dest="prealloc_wipe_disks", 1420 help=("Wipe disks prior to instance" 1421 " creation")) 1422 1423 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams", 1424 type="keyval", default=None, 1425 help="Node parameters") 1426 1427 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy", 1428 action="store", metavar="POLICY", default=None, 1429 help="Allocation policy for the node group") 1430 1431 NODE_POWERED_OPT = cli_option("--node-powered", default=None, 1432 type="bool", metavar=_YORNO, 1433 dest="node_powered", 1434 help="Specify if the SoR for node is powered") 1435 1436 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int", 1437 default=constants.OOB_TIMEOUT, 1438 help="Maximum time to wait for out-of-band helper") 1439 1440 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float", 1441 default=constants.OOB_POWER_DELAY, 1442 help="Time in seconds to wait between power-ons") 1443 1444 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter", 1445 action="store_true", default=False, 1446 help=("Whether command argument should be treated" 1447 " as filter")) 1448 1449 NO_REMEMBER_OPT = cli_option("--no-remember", 1450 dest="no_remember", 1451 action="store_true", default=False, 1452 help="Perform but do not record the change" 1453 " in the configuration") 1454 1455 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only", 1456 default=False, action="store_true", 1457 help="Evacuate primary instances only") 1458 1459 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only", 1460 default=False, action="store_true", 1461 help="Evacuate secondary instances only" 1462 " (applies only to internally mirrored" 1463 " disk templates, e.g. %s)" % 1464 utils.CommaJoin(constants.DTS_INT_MIRROR)) 1465 1466 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused", 1467 action="store_true", default=False, 1468 help="Pause instance at startup") 1469 1470 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>", 1471 help="Destination node group (name or uuid)", 1472 default=None, action="append", 1473 completion_suggest=OPT_COMPL_ONE_NODEGROUP) 1474 1475 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[], 1476 action="append", dest="ignore_errors", 1477 choices=list(constants.CV_ALL_ECODES_STRINGS), 1478 help="Error code to be ignored") 1479 1480 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state", 1481 action="append", 1482 help=("Specify disk state information in the" 1483 " format" 1484 " storage_type/identifier:option=value,...;" 1485 " note this is unused for now"), 1486 type="identkeyval") 1487 1488 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state", 1489 action="append", 1490 help=("Specify hypervisor state information in the" 1491 " format hypervisor:option=value,...;" 1492 " note this is unused for now"), 1493 type="identkeyval") 1494 1495 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy", 1496 action="store_true", default=False, 1497 help="Ignore instance policy violations") 1498 1499 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem", 1500 help="Sets the instance's runtime memory," 1501 " ballooning it up or down to the new value", 1502 default=None, type="unit", metavar="<size>") 1503 1504 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute", 1505 action="store_true", default=False, 1506 help="Marks the grow as absolute instead of the" 1507 " (default) relative mode") 1508 1509 NETWORK_OPT = cli_option("--network", 1510 action="store", default=None, dest="network", 1511 help="IP network in CIDR notation") 1512 1513 GATEWAY_OPT = cli_option("--gateway", 1514 action="store", default=None, dest="gateway", 1515 help="IP address of the router (gateway)") 1516 1517 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips", 1518 action="store", default=None, 1519 dest="add_reserved_ips", 1520 help="Comma-separated list of" 1521 " reserved IPs to add") 1522 1523 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips", 1524 action="store", default=None, 1525 dest="remove_reserved_ips", 1526 help="Comma-delimited list of" 1527 " reserved IPs to remove") 1528 1529 NETWORK6_OPT = cli_option("--network6", 1530 action="store", default=None, dest="network6", 1531 help="IP network in CIDR notation") 1532 1533 GATEWAY6_OPT = cli_option("--gateway6", 1534 action="store", default=None, dest="gateway6", 1535 help="IP6 address of the router (gateway)") 1536 1537 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check", 1538 dest="conflicts_check", 1539 default=True, 1540 action="store_false", 1541 help="Don't check for conflicting IPs") 1542 1543 #: Options provided by all commands 1544 COMMON_OPTS = [DEBUG_OPT] 1545 1546 # common options for creating instances. add and import then add their own 1547 # specific ones. 1548 COMMON_CREATE_OPTS = [ 1549 BACKEND_OPT, 1550 DISK_OPT, 1551 DISK_TEMPLATE_OPT, 1552 FILESTORE_DIR_OPT, 1553 FILESTORE_DRIVER_OPT, 1554 HYPERVISOR_OPT, 1555 IALLOCATOR_OPT, 1556 NET_OPT, 1557 NODE_PLACEMENT_OPT, 1558 NOIPCHECK_OPT, 1559 NOCONFLICTSCHECK_OPT, 1560 NONAMECHECK_OPT, 1561 NONICS_OPT, 1562 NWSYNC_OPT, 1563 OSPARAMS_OPT, 1564 OS_SIZE_OPT, 1565 SUBMIT_OPT, 1566 TAG_ADD_OPT, 1567 DRY_RUN_OPT, 1568 PRIORITY_OPT, 1569 ] 1570 1571 # common instance policy options 1572 INSTANCE_POLICY_OPTS = [ 1573 SPECS_CPU_COUNT_OPT, 1574 SPECS_DISK_COUNT_OPT, 1575 SPECS_DISK_SIZE_OPT, 1576 SPECS_MEM_SIZE_OPT, 1577 SPECS_NIC_COUNT_OPT, 1578 IPOLICY_DISK_TEMPLATES, 1579 IPOLICY_VCPU_RATIO, 1580 IPOLICY_SPINDLE_RATIO, 1581 ]
1582 1583 1584 -class _ShowUsage(Exception):
1585 """Exception class for L{_ParseArgs}. 1586 1587 """
1588 - def __init__(self, exit_error):
1589 """Initializes instances of this class. 1590 1591 @type exit_error: bool 1592 @param exit_error: Whether to report failure on exit 1593 1594 """ 1595 Exception.__init__(self) 1596 self.exit_error = exit_error
1597
1598 1599 -class _ShowVersion(Exception):
1600 """Exception class for L{_ParseArgs}. 1601 1602 """
1603
1604 1605 -def _ParseArgs(binary, argv, commands, aliases, env_override):
1606 """Parser for the command line arguments. 1607 1608 This function parses the arguments and returns the function which 1609 must be executed together with its (modified) arguments. 1610 1611 @param binary: Script name 1612 @param argv: Command line arguments 1613 @param commands: Dictionary containing command definitions 1614 @param aliases: dictionary with command aliases {"alias": "target", ...} 1615 @param env_override: list of env variables allowed for default args 1616 @raise _ShowUsage: If usage description should be shown 1617 @raise _ShowVersion: If version should be shown 1618 1619 """ 1620 assert not (env_override - set(commands)) 1621 assert not (set(aliases.keys()) & set(commands.keys())) 1622 1623 if len(argv) > 1: 1624 cmd = argv[1] 1625 else: 1626 # No option or command given 1627 raise _ShowUsage(exit_error=True) 1628 1629 if cmd == "--version": 1630 raise _ShowVersion() 1631 elif cmd == "--help": 1632 raise _ShowUsage(exit_error=False) 1633 elif not (cmd in commands or cmd in aliases): 1634 raise _ShowUsage(exit_error=True) 1635 1636 # get command, unalias it, and look it up in commands 1637 if cmd in aliases: 1638 if aliases[cmd] not in commands: 1639 raise errors.ProgrammerError("Alias '%s' maps to non-existing" 1640 " command '%s'" % (cmd, aliases[cmd])) 1641 1642 cmd = aliases[cmd] 1643 1644 if cmd in env_override: 1645 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper() 1646 env_args = os.environ.get(args_env_name) 1647 if env_args: 1648 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args)) 1649 1650 func, args_def, parser_opts, usage, description = commands[cmd] 1651 parser = OptionParser(option_list=parser_opts + COMMON_OPTS, 1652 description=description, 1653 formatter=TitledHelpFormatter(), 1654 usage="%%prog %s %s" % (cmd, usage)) 1655 parser.disable_interspersed_args() 1656 options, args = parser.parse_args(args=argv[2:]) 1657 1658 if not _CheckArguments(cmd, args_def, args): 1659 return None, None, None 1660 1661 return func, options, args
1662
1663 1664 -def _FormatUsage(binary, commands):
1665 """Generates a nice description of all commands. 1666 1667 @param binary: Script name 1668 @param commands: Dictionary containing command definitions 1669 1670 """ 1671 # compute the max line length for cmd + usage 1672 mlen = min(60, max(map(len, commands))) 1673 1674 yield "Usage: %s {command} [options...] [argument...]" % binary 1675 yield "%s <command> --help to see details, or man %s" % (binary, binary) 1676 yield "" 1677 yield "Commands:" 1678 1679 # and format a nice command list 1680 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()): 1681 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen) 1682 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0)) 1683 for line in help_lines: 1684 yield " %-*s %s" % (mlen, "", line) 1685 1686 yield ""
1687
1688 1689 -def _CheckArguments(cmd, args_def, args):
1690 """Verifies the arguments using the argument definition. 1691 1692 Algorithm: 1693 1694 1. Abort with error if values specified by user but none expected. 1695 1696 1. For each argument in definition 1697 1698 1. Keep running count of minimum number of values (min_count) 1699 1. Keep running count of maximum number of values (max_count) 1700 1. If it has an unlimited number of values 1701 1702 1. Abort with error if it's not the last argument in the definition 1703 1704 1. If last argument has limited number of values 1705 1706 1. Abort with error if number of values doesn't match or is too large 1707 1708 1. Abort with error if user didn't pass enough values (min_count) 1709 1710 """ 1711 if args and not args_def: 1712 ToStderr("Error: Command %s expects no arguments", cmd) 1713 return False 1714 1715 min_count = None 1716 max_count = None 1717 check_max = None 1718 1719 last_idx = len(args_def) - 1 1720 1721 for idx, arg in enumerate(args_def): 1722 if min_count is None: 1723 min_count = arg.min 1724 elif arg.min is not None: 1725 min_count += arg.min 1726 1727 if max_count is None: 1728 max_count = arg.max 1729 elif arg.max is not None: 1730 max_count += arg.max 1731 1732 if idx == last_idx: 1733 check_max = (arg.max is not None) 1734 1735 elif arg.max is None: 1736 raise errors.ProgrammerError("Only the last argument can have max=None") 1737 1738 if check_max: 1739 # Command with exact number of arguments 1740 if (min_count is not None and max_count is not None and 1741 min_count == max_count and len(args) != min_count): 1742 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count) 1743 return False 1744 1745 # Command with limited number of arguments 1746 if max_count is not None and len(args) > max_count: 1747 ToStderr("Error: Command %s expects only %d argument(s)", 1748 cmd, max_count) 1749 return False 1750 1751 # Command with some required arguments 1752 if min_count is not None and len(args) < min_count: 1753 ToStderr("Error: Command %s expects at least %d argument(s)", 1754 cmd, min_count) 1755 return False 1756 1757 return True
1758
1759 1760 -def SplitNodeOption(value):
1761 """Splits the value of a --node option. 1762 1763 """ 1764 if value and ":" in value: 1765 return value.split(":", 1) 1766 else: 1767 return (value, None)
1768
1769 1770 -def CalculateOSNames(os_name, os_variants):
1771 """Calculates all the names an OS can be called, according to its variants. 1772 1773 @type os_name: string 1774 @param os_name: base name of the os 1775 @type os_variants: list or None 1776 @param os_variants: list of supported variants 1777 @rtype: list 1778 @return: list of valid names 1779 1780 """ 1781 if os_variants: 1782 return ["%s+%s" % (os_name, v) for v in os_variants] 1783 else: 1784 return [os_name]
1785
1786 1787 -def ParseFields(selected, default):
1788 """Parses the values of "--field"-like options. 1789 1790 @type selected: string or None 1791 @param selected: User-selected options 1792 @type default: list 1793 @param default: Default fields 1794 1795 """ 1796 if selected is None: 1797 return default 1798 1799 if selected.startswith("+"): 1800 return default + selected[1:].split(",") 1801 1802 return selected.split(",")
1803 1804 1805 UsesRPC = rpc.RunWithRPC
1806 1807 1808 -def AskUser(text, choices=None):
1809 """Ask the user a question. 1810 1811 @param text: the question to ask 1812 1813 @param choices: list with elements tuples (input_char, return_value, 1814 description); if not given, it will default to: [('y', True, 1815 'Perform the operation'), ('n', False, 'Do no do the operation')]; 1816 note that the '?' char is reserved for help 1817 1818 @return: one of the return values from the choices list; if input is 1819 not possible (i.e. not running with a tty, we return the last 1820 entry from the list 1821 1822 """ 1823 if choices is None: 1824 choices = [("y", True, "Perform the operation"), 1825 ("n", False, "Do not perform the operation")] 1826 if not choices or not isinstance(choices, list): 1827 raise errors.ProgrammerError("Invalid choices argument to AskUser") 1828 for entry in choices: 1829 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?": 1830 raise errors.ProgrammerError("Invalid choices element to AskUser") 1831 1832 answer = choices[-1][1] 1833 new_text = [] 1834 for line in text.splitlines(): 1835 new_text.append(textwrap.fill(line, 70, replace_whitespace=False)) 1836 text = "\n".join(new_text) 1837 try: 1838 f = file("/dev/tty", "a+") 1839 except IOError: 1840 return answer 1841 try: 1842 chars = [entry[0] for entry in choices] 1843 chars[-1] = "[%s]" % chars[-1] 1844 chars.append("?") 1845 maps = dict([(entry[0], entry[1]) for entry in choices]) 1846 while True: 1847 f.write(text) 1848 f.write("\n") 1849 f.write("/".join(chars)) 1850 f.write(": ") 1851 line = f.readline(2).strip().lower() 1852 if line in maps: 1853 answer = maps[line] 1854 break 1855 elif line == "?": 1856 for entry in choices: 1857 f.write(" %s - %s\n" % (entry[0], entry[2])) 1858 f.write("\n") 1859 continue 1860 finally: 1861 f.close() 1862 return answer
1863
1864 1865 -class JobSubmittedException(Exception):
1866 """Job was submitted, client should exit. 1867 1868 This exception has one argument, the ID of the job that was 1869 submitted. The handler should print this ID. 1870 1871 This is not an error, just a structured way to exit from clients. 1872 1873 """
1874
1875 1876 -def SendJob(ops, cl=None):
1877 """Function to submit an opcode without waiting for the results. 1878 1879 @type ops: list 1880 @param ops: list of opcodes 1881 @type cl: luxi.Client 1882 @param cl: the luxi client to use for communicating with the master; 1883 if None, a new client will be created 1884 1885 """ 1886 if cl is None: 1887 cl = GetClient() 1888 1889 job_id = cl.SubmitJob(ops) 1890 1891 return job_id
1892
1893 1894 -def GenericPollJob(job_id, cbs, report_cbs):
1895 """Generic job-polling function. 1896 1897 @type job_id: number 1898 @param job_id: Job ID 1899 @type cbs: Instance of L{JobPollCbBase} 1900 @param cbs: Data callbacks 1901 @type report_cbs: Instance of L{JobPollReportCbBase} 1902 @param report_cbs: Reporting callbacks 1903 1904 """ 1905 prev_job_info = None 1906 prev_logmsg_serial = None 1907 1908 status = None 1909 1910 while True: 1911 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info, 1912 prev_logmsg_serial) 1913 if not result: 1914 # job not found, go away! 1915 raise errors.JobLost("Job with id %s lost" % job_id) 1916 1917 if result == constants.JOB_NOTCHANGED: 1918 report_cbs.ReportNotChanged(job_id, status) 1919 1920 # Wait again 1921 continue 1922 1923 # Split result, a tuple of (field values, log entries) 1924 (job_info, log_entries) = result 1925 (status, ) = job_info 1926 1927 if log_entries: 1928 for log_entry in log_entries: 1929 (serial, timestamp, log_type, message) = log_entry 1930 report_cbs.ReportLogMessage(job_id, serial, timestamp, 1931 log_type, message) 1932 prev_logmsg_serial = max(prev_logmsg_serial, serial) 1933 1934 # TODO: Handle canceled and archived jobs 1935 elif status in (constants.JOB_STATUS_SUCCESS, 1936 constants.JOB_STATUS_ERROR, 1937 constants.JOB_STATUS_CANCELING, 1938 constants.JOB_STATUS_CANCELED): 1939 break 1940 1941 prev_job_info = job_info 1942 1943 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"]) 1944 if not jobs: 1945 raise errors.JobLost("Job with id %s lost" % job_id) 1946 1947 status, opstatus, result = jobs[0] 1948 1949 if status == constants.JOB_STATUS_SUCCESS: 1950 return result 1951 1952 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED): 1953 raise errors.OpExecError("Job was canceled") 1954 1955 has_ok = False 1956 for idx, (status, msg) in enumerate(zip(opstatus, result)): 1957 if status == constants.OP_STATUS_SUCCESS: 1958 has_ok = True 1959 elif status == constants.OP_STATUS_ERROR: 1960 errors.MaybeRaise(msg) 1961 1962 if has_ok: 1963 raise errors.OpExecError("partial failure (opcode %d): %s" % 1964 (idx, msg)) 1965 1966 raise errors.OpExecError(str(msg)) 1967 1968 # default failure mode 1969 raise errors.OpExecError(result)
1970
1971 1972 -class JobPollCbBase:
1973 """Base class for L{GenericPollJob} callbacks. 1974 1975 """
1976 - def __init__(self):
1977 """Initializes this class. 1978 1979 """
1980
1981 - def WaitForJobChangeOnce(self, job_id, fields, 1982 prev_job_info, prev_log_serial):
1983 """Waits for changes on a job. 1984 1985 """ 1986 raise NotImplementedError()
1987
1988 - def QueryJobs(self, job_ids, fields):
1989 """Returns the selected fields for the selected job IDs. 1990 1991 @type job_ids: list of numbers 1992 @param job_ids: Job IDs 1993 @type fields: list of strings 1994 @param fields: Fields 1995 1996 """ 1997 raise NotImplementedError()
1998
1999 2000 -class JobPollReportCbBase:
2001 """Base class for L{GenericPollJob} reporting callbacks. 2002 2003 """
2004 - def __init__(self):
2005 """Initializes this class. 2006 2007 """
2008
2009 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2010 """Handles a log message. 2011 2012 """ 2013 raise NotImplementedError()
2014
2015 - def ReportNotChanged(self, job_id, status):
2016 """Called for if a job hasn't changed in a while. 2017 2018 @type job_id: number 2019 @param job_id: Job ID 2020 @type status: string or None 2021 @param status: Job status if available 2022 2023 """ 2024 raise NotImplementedError()
2025
2026 2027 -class _LuxiJobPollCb(JobPollCbBase):
2028 - def __init__(self, cl):
2029 """Initializes this class. 2030 2031 """ 2032 JobPollCbBase.__init__(self) 2033 self.cl = cl
2034
2035 - def WaitForJobChangeOnce(self, job_id, fields, 2036 prev_job_info, prev_log_serial):
2037 """Waits for changes on a job. 2038 2039 """ 2040 return self.cl.WaitForJobChangeOnce(job_id, fields, 2041 prev_job_info, prev_log_serial)
2042
2043 - def QueryJobs(self, job_ids, fields):
2044 """Returns the selected fields for the selected job IDs. 2045 2046 """ 2047 return self.cl.QueryJobs(job_ids, fields)
2048
2049 2050 -class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2051 - def __init__(self, feedback_fn):
2052 """Initializes this class. 2053 2054 """ 2055 JobPollReportCbBase.__init__(self) 2056 2057 self.feedback_fn = feedback_fn 2058 2059 assert callable(feedback_fn)
2060
2061 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2062 """Handles a log message. 2063 2064 """ 2065 self.feedback_fn((timestamp, log_type, log_msg))
2066
2067 - def ReportNotChanged(self, job_id, status):
2068 """Called if a job hasn't changed in a while. 2069 2070 """
2071 # Ignore
2072 2073 2074 -class StdioJobPollReportCb(JobPollReportCbBase):
2075 - def __init__(self):
2076 """Initializes this class. 2077 2078 """ 2079 JobPollReportCbBase.__init__(self) 2080 2081 self.notified_queued = False 2082 self.notified_waitlock = False
2083
2084 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2085 """Handles a log message. 2086 2087 """ 2088 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)), 2089 FormatLogMessage(log_type, log_msg))
2090
2091 - def ReportNotChanged(self, job_id, status):
2092 """Called if a job hasn't changed in a while. 2093 2094 """ 2095 if status is None: 2096 return 2097 2098 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued: 2099 ToStderr("Job %s is waiting in queue", job_id) 2100 self.notified_queued = True 2101 2102 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock: 2103 ToStderr("Job %s is trying to acquire all necessary locks", job_id) 2104 self.notified_waitlock = True
2105
2106 2107 -def FormatLogMessage(log_type, log_msg):
2108 """Formats a job message according to its type. 2109 2110 """ 2111 if log_type != constants.ELOG_MESSAGE: 2112 log_msg = str(log_msg) 2113 2114 return utils.SafeEncode(log_msg)
2115
2116 2117 -def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2118 """Function to poll for the result of a job. 2119 2120 @type job_id: job identified 2121 @param job_id: the job to poll for results 2122 @type cl: luxi.Client 2123 @param cl: the luxi client to use for communicating with the master; 2124 if None, a new client will be created 2125 2126 """ 2127 if cl is None: 2128 cl = GetClient() 2129 2130 if reporter is None: 2131 if feedback_fn: 2132 reporter = FeedbackFnJobPollReportCb(feedback_fn) 2133 else: 2134 reporter = StdioJobPollReportCb() 2135 elif feedback_fn: 2136 raise errors.ProgrammerError("Can't specify reporter and feedback function") 2137 2138 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2139
2140 2141 -def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2142 """Legacy function to submit an opcode. 2143 2144 This is just a simple wrapper over the construction of the processor 2145 instance. It should be extended to better handle feedback and 2146 interaction functions. 2147 2148 """ 2149 if cl is None: 2150 cl = GetClient() 2151 2152 SetGenericOpcodeOpts([op], opts) 2153 2154 job_id = SendJob([op], cl=cl) 2155 2156 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn, 2157 reporter=reporter) 2158 2159 return op_results[0]
2160
2161 2162 -def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2163 """Wrapper around SubmitOpCode or SendJob. 2164 2165 This function will decide, based on the 'opts' parameter, whether to 2166 submit and wait for the result of the opcode (and return it), or 2167 whether to just send the job and print its identifier. It is used in 2168 order to simplify the implementation of the '--submit' option. 2169 2170 It will also process the opcodes if we're sending the via SendJob 2171 (otherwise SubmitOpCode does it). 2172 2173 """ 2174 if opts and opts.submit_only: 2175 job = [op] 2176 SetGenericOpcodeOpts(job, opts) 2177 job_id = SendJob(job, cl=cl) 2178 raise JobSubmittedException(job_id) 2179 else: 2180 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2181
2182 2183 -def SetGenericOpcodeOpts(opcode_list, options):
2184 """Processor for generic options. 2185 2186 This function updates the given opcodes based on generic command 2187 line options (like debug, dry-run, etc.). 2188 2189 @param opcode_list: list of opcodes 2190 @param options: command line options or None 2191 @return: None (in-place modification) 2192 2193 """ 2194 if not options: 2195 return 2196 for op in opcode_list: 2197 op.debug_level = options.debug 2198 if hasattr(options, "dry_run"): 2199 op.dry_run = options.dry_run 2200 if getattr(options, "priority", None) is not None: 2201 op.priority = options.priority
2202
2203 2204 -def GetClient(query=False):
2205 """Connects to the a luxi socket and returns a client. 2206 2207 @type query: boolean 2208 @param query: this signifies that the client will only be 2209 used for queries; if the build-time parameter 2210 enable-split-queries is enabled, then the client will be 2211 connected to the query socket instead of the masterd socket 2212 2213 """ 2214 if query and constants.ENABLE_SPLIT_QUERY: 2215 address = pathutils.QUERY_SOCKET 2216 else: 2217 address = None 2218 # TODO: Cache object? 2219 try: 2220 client = luxi.Client(address=address) 2221 except luxi.NoMasterError: 2222 ss = ssconf.SimpleStore() 2223 2224 # Try to read ssconf file 2225 try: 2226 ss.GetMasterNode() 2227 except errors.ConfigurationError: 2228 raise errors.OpPrereqError("Cluster not initialized or this machine is" 2229 " not part of a cluster", 2230 errors.ECODE_INVAL) 2231 2232 master, myself = ssconf.GetMasterAndMyself(ss=ss) 2233 if master != myself: 2234 raise errors.OpPrereqError("This is not the master node, please connect" 2235 " to node '%s' and rerun the command" % 2236 master, errors.ECODE_INVAL) 2237 raise 2238 return client
2239
2240 2241 -def FormatError(err):
2242 """Return a formatted error message for a given error. 2243 2244 This function takes an exception instance and returns a tuple 2245 consisting of two values: first, the recommended exit code, and 2246 second, a string describing the error message (not 2247 newline-terminated). 2248 2249 """ 2250 retcode = 1 2251 obuf = StringIO() 2252 msg = str(err) 2253 if isinstance(err, errors.ConfigurationError): 2254 txt = "Corrupt configuration file: %s" % msg 2255 logging.error(txt) 2256 obuf.write(txt + "\n") 2257 obuf.write("Aborting.") 2258 retcode = 2 2259 elif isinstance(err, errors.HooksAbort): 2260 obuf.write("Failure: hooks execution failed:\n") 2261 for node, script, out in err.args[0]: 2262 if out: 2263 obuf.write(" node: %s, script: %s, output: %s\n" % 2264 (node, script, out)) 2265 else: 2266 obuf.write(" node: %s, script: %s (no output)\n" % 2267 (node, script)) 2268 elif isinstance(err, errors.HooksFailure): 2269 obuf.write("Failure: hooks general failure: %s" % msg) 2270 elif isinstance(err, errors.ResolverError): 2271 this_host = netutils.Hostname.GetSysName() 2272 if err.args[0] == this_host: 2273 msg = "Failure: can't resolve my own hostname ('%s')" 2274 else: 2275 msg = "Failure: can't resolve hostname '%s'" 2276 obuf.write(msg % err.args[0]) 2277 elif isinstance(err, errors.OpPrereqError): 2278 if len(err.args) == 2: 2279 obuf.write("Failure: prerequisites not met for this" 2280 " operation:\nerror type: %s, error details:\n%s" % 2281 (err.args[1], err.args[0])) 2282 else: 2283 obuf.write("Failure: prerequisites not met for this" 2284 " operation:\n%s" % msg) 2285 elif isinstance(err, errors.OpExecError): 2286 obuf.write("Failure: command execution error:\n%s" % msg) 2287 elif isinstance(err, errors.TagError): 2288 obuf.write("Failure: invalid tag(s) given:\n%s" % msg) 2289 elif isinstance(err, errors.JobQueueDrainError): 2290 obuf.write("Failure: the job queue is marked for drain and doesn't" 2291 " accept new requests\n") 2292 elif isinstance(err, errors.JobQueueFull): 2293 obuf.write("Failure: the job queue is full and doesn't accept new" 2294 " job submissions until old jobs are archived\n") 2295 elif isinstance(err, errors.TypeEnforcementError): 2296 obuf.write("Parameter Error: %s" % msg) 2297 elif isinstance(err, errors.ParameterError): 2298 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg) 2299 elif isinstance(err, luxi.NoMasterError): 2300 if err.args[0] == pathutils.MASTER_SOCKET: 2301 daemon = "master" 2302 else: 2303 daemon = "config" 2304 obuf.write("Cannot communicate with the %s daemon.\nIs it running" 2305 " and listening for connections?" % daemon) 2306 elif isinstance(err, luxi.TimeoutError): 2307 obuf.write("Timeout while talking to the master daemon. Jobs might have" 2308 " been submitted and will continue to run even if the call" 2309 " timed out. Useful commands in this situation are \"gnt-job" 2310 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n") 2311 obuf.write(msg) 2312 elif isinstance(err, luxi.PermissionError): 2313 obuf.write("It seems you don't have permissions to connect to the" 2314 " master daemon.\nPlease retry as a different user.") 2315 elif isinstance(err, luxi.ProtocolError): 2316 obuf.write("Unhandled protocol error while talking to the master daemon:\n" 2317 "%s" % msg) 2318 elif isinstance(err, errors.JobLost): 2319 obuf.write("Error checking job status: %s" % msg) 2320 elif isinstance(err, errors.QueryFilterParseError): 2321 obuf.write("Error while parsing query filter: %s\n" % err.args[0]) 2322 obuf.write("\n".join(err.GetDetails())) 2323 elif isinstance(err, errors.GenericError): 2324 obuf.write("Unhandled Ganeti error: %s" % msg) 2325 elif isinstance(err, JobSubmittedException): 2326 obuf.write("JobID: %s\n" % err.args[0]) 2327 retcode = 0 2328 else: 2329 obuf.write("Unhandled exception: %s" % msg) 2330 return retcode, obuf.getvalue().rstrip("\n")
2331
2332 2333 -def GenericMain(commands, override=None, aliases=None, 2334 env_override=frozenset()):
2335 """Generic main function for all the gnt-* commands. 2336 2337 @param commands: a dictionary with a special structure, see the design doc 2338 for command line handling. 2339 @param override: if not None, we expect a dictionary with keys that will 2340 override command line options; this can be used to pass 2341 options from the scripts to generic functions 2342 @param aliases: dictionary with command aliases {'alias': 'target, ...} 2343 @param env_override: list of environment names which are allowed to submit 2344 default args for commands 2345 2346 """ 2347 # save the program name and the entire command line for later logging 2348 if sys.argv: 2349 binary = os.path.basename(sys.argv[0]) 2350 if not binary: 2351 binary = sys.argv[0] 2352 2353 if len(sys.argv) >= 2: 2354 logname = utils.ShellQuoteArgs([binary, sys.argv[1]]) 2355 else: 2356 logname = binary 2357 2358 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:]) 2359 else: 2360 binary = "<unknown program>" 2361 cmdline = "<unknown>" 2362 2363 if aliases is None: 2364 aliases = {} 2365 2366 try: 2367 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases, 2368 env_override) 2369 except _ShowVersion: 2370 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION, 2371 constants.RELEASE_VERSION) 2372 return constants.EXIT_SUCCESS 2373 except _ShowUsage, err: 2374 for line in _FormatUsage(binary, commands): 2375 ToStdout(line) 2376 2377 if err.exit_error: 2378 return constants.EXIT_FAILURE 2379 else: 2380 return constants.EXIT_SUCCESS 2381 except errors.ParameterError, err: 2382 result, err_msg = FormatError(err) 2383 ToStderr(err_msg) 2384 return 1 2385 2386 if func is None: # parse error 2387 return 1 2388 2389 if override is not None: 2390 for key, val in override.iteritems(): 2391 setattr(options, key, val) 2392 2393 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug, 2394 stderr_logging=True) 2395 2396 logging.info("Command line: %s", cmdline) 2397 2398 try: 2399 result = func(options, args) 2400 except (errors.GenericError, luxi.ProtocolError, 2401 JobSubmittedException), err: 2402 result, err_msg = FormatError(err) 2403 logging.exception("Error during command processing") 2404 ToStderr(err_msg) 2405 except KeyboardInterrupt: 2406 result = constants.EXIT_FAILURE 2407 ToStderr("Aborted. Note that if the operation created any jobs, they" 2408 " might have been submitted and" 2409 " will continue to run in the background.") 2410 except IOError, err: 2411 if err.errno == errno.EPIPE: 2412 # our terminal went away, we'll exit 2413 sys.exit(constants.EXIT_FAILURE) 2414 else: 2415 raise 2416 2417 return result
2418
2419 2420 -def ParseNicOption(optvalue):
2421 """Parses the value of the --net option(s). 2422 2423 """ 2424 try: 2425 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue) 2426 except (TypeError, ValueError), err: 2427 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err), 2428 errors.ECODE_INVAL) 2429 2430 nics = [{}] * nic_max 2431 for nidx, ndict in optvalue: 2432 nidx = int(nidx) 2433 2434 if not isinstance(ndict, dict): 2435 raise errors.OpPrereqError("Invalid nic/%d value: expected dict," 2436 " got %s" % (nidx, ndict), errors.ECODE_INVAL) 2437 2438 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES) 2439 2440 nics[nidx] = ndict 2441 2442 return nics
2443
2444 2445 -def GenericInstanceCreate(mode, opts, args):
2446 """Add an instance to the cluster via either creation or import. 2447 2448 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT 2449 @param opts: the command line options selected by the user 2450 @type args: list 2451 @param args: should contain only one element, the new instance name 2452 @rtype: int 2453 @return: the desired exit code 2454 2455 """ 2456 instance = args[0] 2457 2458 (pnode, snode) = SplitNodeOption(opts.node) 2459 2460 hypervisor = None 2461 hvparams = {} 2462 if opts.hypervisor: 2463 hypervisor, hvparams = opts.hypervisor 2464 2465 if opts.nics: 2466 nics = ParseNicOption(opts.nics) 2467 elif opts.no_nics: 2468 # no nics 2469 nics = [] 2470 elif mode == constants.INSTANCE_CREATE: 2471 # default of one nic, all auto 2472 nics = [{}] 2473 else: 2474 # mode == import 2475 nics = [] 2476 2477 if opts.disk_template == constants.DT_DISKLESS: 2478 if opts.disks or opts.sd_size is not None: 2479 raise errors.OpPrereqError("Diskless instance but disk" 2480 " information passed", errors.ECODE_INVAL) 2481 disks = [] 2482 else: 2483 if (not opts.disks and not opts.sd_size 2484 and mode == constants.INSTANCE_CREATE): 2485 raise errors.OpPrereqError("No disk information specified", 2486 errors.ECODE_INVAL) 2487 if opts.disks and opts.sd_size is not None: 2488 raise errors.OpPrereqError("Please use either the '--disk' or" 2489 " '-s' option", errors.ECODE_INVAL) 2490 if opts.sd_size is not None: 2491 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})] 2492 2493 if opts.disks: 2494 try: 2495 disk_max = max(int(didx[0]) + 1 for didx in opts.disks) 2496 except ValueError, err: 2497 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err), 2498 errors.ECODE_INVAL) 2499 disks = [{}] * disk_max 2500 else: 2501 disks = [] 2502 for didx, ddict in opts.disks: 2503 didx = int(didx) 2504 if not isinstance(ddict, dict): 2505 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict) 2506 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 2507 elif constants.IDISK_SIZE in ddict: 2508 if constants.IDISK_ADOPT in ddict: 2509 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed" 2510 " (disk %d)" % didx, errors.ECODE_INVAL) 2511 try: 2512 ddict[constants.IDISK_SIZE] = \ 2513 utils.ParseUnit(ddict[constants.IDISK_SIZE]) 2514 except ValueError, err: 2515 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" % 2516 (didx, err), errors.ECODE_INVAL) 2517 elif constants.IDISK_ADOPT in ddict: 2518 if mode == constants.INSTANCE_IMPORT: 2519 raise errors.OpPrereqError("Disk adoption not allowed for instance" 2520 " import", errors.ECODE_INVAL) 2521 ddict[constants.IDISK_SIZE] = 0 2522 else: 2523 raise errors.OpPrereqError("Missing size or adoption source for" 2524 " disk %d" % didx, errors.ECODE_INVAL) 2525 disks[didx] = ddict 2526 2527 if opts.tags is not None: 2528 tags = opts.tags.split(",") 2529 else: 2530 tags = [] 2531 2532 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT) 2533 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES) 2534 2535 if mode == constants.INSTANCE_CREATE: 2536 start = opts.start 2537 os_type = opts.os 2538 force_variant = opts.force_variant 2539 src_node = None 2540 src_path = None 2541 no_install = opts.no_install 2542 identify_defaults = False 2543 elif mode == constants.INSTANCE_IMPORT: 2544 start = False 2545 os_type = None 2546 force_variant = False 2547 src_node = opts.src_node 2548 src_path = opts.src_dir 2549 no_install = None 2550 identify_defaults = opts.identify_defaults 2551 else: 2552 raise errors.ProgrammerError("Invalid creation mode %s" % mode) 2553 2554 op = opcodes.OpInstanceCreate(instance_name=instance, 2555 disks=disks, 2556 disk_template=opts.disk_template, 2557 nics=nics, 2558 conflicts_check=opts.conflicts_check, 2559 pnode=pnode, snode=snode, 2560 ip_check=opts.ip_check, 2561 name_check=opts.name_check, 2562 wait_for_sync=opts.wait_for_sync, 2563 file_storage_dir=opts.file_storage_dir, 2564 file_driver=opts.file_driver, 2565 iallocator=opts.iallocator, 2566 hypervisor=hypervisor, 2567 hvparams=hvparams, 2568 beparams=opts.beparams, 2569 osparams=opts.osparams, 2570 mode=mode, 2571 start=start, 2572 os_type=os_type, 2573 force_variant=force_variant, 2574 src_node=src_node, 2575 src_path=src_path, 2576 tags=tags, 2577 no_install=no_install, 2578 identify_defaults=identify_defaults, 2579 ignore_ipolicy=opts.ignore_ipolicy) 2580 2581 SubmitOrSend(op, opts) 2582 return 0
2583
2584 2585 -class _RunWhileClusterStoppedHelper:
2586 """Helper class for L{RunWhileClusterStopped} to simplify state management 2587 2588 """
2589 - def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2590 """Initializes this class. 2591 2592 @type feedback_fn: callable 2593 @param feedback_fn: Feedback function 2594 @type cluster_name: string 2595 @param cluster_name: Cluster name 2596 @type master_node: string 2597 @param master_node Master node name 2598 @type online_nodes: list 2599 @param online_nodes: List of names of online nodes 2600 2601 """ 2602 self.feedback_fn = feedback_fn 2603 self.cluster_name = cluster_name 2604 self.master_node = master_node 2605 self.online_nodes = online_nodes 2606 2607 self.ssh = ssh.SshRunner(self.cluster_name) 2608 2609 self.nonmaster_nodes = [name for name in online_nodes 2610 if name != master_node] 2611 2612 assert self.master_node not in self.nonmaster_nodes
2613
2614 - def _RunCmd(self, node_name, cmd):
2615 """Runs a command on the local or a remote machine. 2616 2617 @type node_name: string 2618 @param node_name: Machine name 2619 @type cmd: list 2620 @param cmd: Command 2621 2622 """ 2623 if node_name is None or node_name == self.master_node: 2624 # No need to use SSH 2625 result = utils.RunCmd(cmd) 2626 else: 2627 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER, 2628 utils.ShellQuoteArgs(cmd)) 2629 2630 if result.failed: 2631 errmsg = ["Failed to run command %s" % result.cmd] 2632 if node_name: 2633 errmsg.append("on node %s" % node_name) 2634 errmsg.append(": exitcode %s and error %s" % 2635 (result.exit_code, result.output)) 2636 raise errors.OpExecError(" ".join(errmsg))
2637
2638 - def Call(self, fn, *args):
2639 """Call function while all daemons are stopped. 2640 2641 @type fn: callable 2642 @param fn: Function to be called 2643 2644 """ 2645 # Pause watcher by acquiring an exclusive lock on watcher state file 2646 self.feedback_fn("Blocking watcher") 2647 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE) 2648 try: 2649 # TODO: Currently, this just blocks. There's no timeout. 2650 # TODO: Should it be a shared lock? 2651 watcher_block.Exclusive(blocking=True) 2652 2653 # Stop master daemons, so that no new jobs can come in and all running 2654 # ones are finished 2655 self.feedback_fn("Stopping master daemons") 2656 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"]) 2657 try: 2658 # Stop daemons on all nodes 2659 for node_name in self.online_nodes: 2660 self.feedback_fn("Stopping daemons on %s" % node_name) 2661 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"]) 2662 2663 # All daemons are shut down now 2664 try: 2665 return fn(self, *args) 2666 except Exception, err: 2667 _, errmsg = FormatError(err) 2668 logging.exception("Caught exception") 2669 self.feedback_fn(errmsg) 2670 raise 2671 finally: 2672 # Start cluster again, master node last 2673 for node_name in self.nonmaster_nodes + [self.master_node]: 2674 self.feedback_fn("Starting daemons on %s" % node_name) 2675 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"]) 2676 finally: 2677 # Resume watcher 2678 watcher_block.Close()
2679
2680 2681 -def RunWhileClusterStopped(feedback_fn, fn, *args):
2682 """Calls a function while all cluster daemons are stopped. 2683 2684 @type feedback_fn: callable 2685 @param feedback_fn: Feedback function 2686 @type fn: callable 2687 @param fn: Function to be called when daemons are stopped 2688 2689 """ 2690 feedback_fn("Gathering cluster information") 2691 2692 # This ensures we're running on the master daemon 2693 cl = GetClient() 2694 2695 (cluster_name, master_node) = \ 2696 cl.QueryConfigValues(["cluster_name", "master_node"]) 2697 2698 online_nodes = GetOnlineNodes([], cl=cl) 2699 2700 # Don't keep a reference to the client. The master daemon will go away. 2701 del cl 2702 2703 assert master_node in online_nodes 2704 2705 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node, 2706 online_nodes).Call(fn, *args)
2707
2708 2709 -def GenerateTable(headers, fields, separator, data, 2710 numfields=None, unitfields=None, 2711 units=None):
2712 """Prints a table with headers and different fields. 2713 2714 @type headers: dict 2715 @param headers: dictionary mapping field names to headers for 2716 the table 2717 @type fields: list 2718 @param fields: the field names corresponding to each row in 2719 the data field 2720 @param separator: the separator to be used; if this is None, 2721 the default 'smart' algorithm is used which computes optimal 2722 field width, otherwise just the separator is used between 2723 each field 2724 @type data: list 2725 @param data: a list of lists, each sublist being one row to be output 2726 @type numfields: list 2727 @param numfields: a list with the fields that hold numeric 2728 values and thus should be right-aligned 2729 @type unitfields: list 2730 @param unitfields: a list with the fields that hold numeric 2731 values that should be formatted with the units field 2732 @type units: string or None 2733 @param units: the units we should use for formatting, or None for 2734 automatic choice (human-readable for non-separator usage, otherwise 2735 megabytes); this is a one-letter string 2736 2737 """ 2738 if units is None: 2739 if separator: 2740 units = "m" 2741 else: 2742 units = "h" 2743 2744 if numfields is None: 2745 numfields = [] 2746 if unitfields is None: 2747 unitfields = [] 2748 2749 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142 2750 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142 2751 2752 format_fields = [] 2753 for field in fields: 2754 if headers and field not in headers: 2755 # TODO: handle better unknown fields (either revert to old 2756 # style of raising exception, or deal more intelligently with 2757 # variable fields) 2758 headers[field] = field 2759 if separator is not None: 2760 format_fields.append("%s") 2761 elif numfields.Matches(field): 2762 format_fields.append("%*s") 2763 else: 2764 format_fields.append("%-*s") 2765 2766 if separator is None: 2767 mlens = [0 for name in fields] 2768 format_str = " ".join(format_fields) 2769 else: 2770 format_str = separator.replace("%", "%%").join(format_fields) 2771 2772 for row in data: 2773 if row is None: 2774 continue 2775 for idx, val in enumerate(row): 2776 if unitfields.Matches(fields[idx]): 2777 try: 2778 val = int(val) 2779 except (TypeError, ValueError): 2780 pass 2781 else: 2782 val = row[idx] = utils.FormatUnit(val, units) 2783 val = row[idx] = str(val) 2784 if separator is None: 2785 mlens[idx] = max(mlens[idx], len(val)) 2786 2787 result = [] 2788 if headers: 2789 args = [] 2790 for idx, name in enumerate(fields): 2791 hdr = headers[name] 2792 if separator is None: 2793 mlens[idx] = max(mlens[idx], len(hdr)) 2794 args.append(mlens[idx]) 2795 args.append(hdr) 2796 result.append(format_str % tuple(args)) 2797 2798 if separator is None: 2799 assert len(mlens) == len(fields) 2800 2801 if fields and not numfields.Matches(fields[-1]): 2802 mlens[-1] = 0 2803 2804 for line in data: 2805 args = [] 2806 if line is None: 2807 line = ["-" for _ in fields] 2808 for idx in range(len(fields)): 2809 if separator is None: 2810 args.append(mlens[idx]) 2811 args.append(line[idx]) 2812 result.append(format_str % tuple(args)) 2813 2814 return result
2815
2816 2817 -def _FormatBool(value):
2818 """Formats a boolean value as a string. 2819 2820 """ 2821 if value: 2822 return "Y" 2823 return "N"
2824 2825 2826 #: Default formatting for query results; (callback, align right) 2827 _DEFAULT_FORMAT_QUERY = { 2828 constants.QFT_TEXT: (str, False), 2829 constants.QFT_BOOL: (_FormatBool, False), 2830 constants.QFT_NUMBER: (str, True), 2831 constants.QFT_TIMESTAMP: (utils.FormatTime, False), 2832 constants.QFT_OTHER: (str, False), 2833 constants.QFT_UNKNOWN: (str, False), 2834 }
2835 2836 2837 -def _GetColumnFormatter(fdef, override, unit):
2838 """Returns formatting function for a field. 2839 2840 @type fdef: L{objects.QueryFieldDefinition} 2841 @type override: dict 2842 @param override: Dictionary for overriding field formatting functions, 2843 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 2844 @type unit: string 2845 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} 2846 @rtype: tuple; (callable, bool) 2847 @return: Returns the function to format a value (takes one parameter) and a 2848 boolean for aligning the value on the right-hand side 2849 2850 """ 2851 fmt = override.get(fdef.name, None) 2852 if fmt is not None: 2853 return fmt 2854 2855 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY 2856 2857 if fdef.kind == constants.QFT_UNIT: 2858 # Can't keep this information in the static dictionary 2859 return (lambda value: utils.FormatUnit(value, unit), True) 2860 2861 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None) 2862 if fmt is not None: 2863 return fmt 2864 2865 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2866
2867 2868 -class _QueryColumnFormatter:
2869 """Callable class for formatting fields of a query. 2870 2871 """
2872 - def __init__(self, fn, status_fn, verbose):
2873 """Initializes this class. 2874 2875 @type fn: callable 2876 @param fn: Formatting function 2877 @type status_fn: callable 2878 @param status_fn: Function to report fields' status 2879 @type verbose: boolean 2880 @param verbose: whether to use verbose field descriptions or not 2881 2882 """ 2883 self._fn = fn 2884 self._status_fn = status_fn 2885 self._verbose = verbose
2886
2887 - def __call__(self, data):
2888 """Returns a field's string representation. 2889 2890 """ 2891 (status, value) = data 2892 2893 # Report status 2894 self._status_fn(status) 2895 2896 if status == constants.RS_NORMAL: 2897 return self._fn(value) 2898 2899 assert value is None, \ 2900 "Found value %r for abnormal status %s" % (value, status) 2901 2902 return FormatResultError(status, self._verbose)
2903
2904 2905 -def FormatResultError(status, verbose):
2906 """Formats result status other than L{constants.RS_NORMAL}. 2907 2908 @param status: The result status 2909 @type verbose: boolean 2910 @param verbose: Whether to return the verbose text 2911 @return: Text of result status 2912 2913 """ 2914 assert status != constants.RS_NORMAL, \ 2915 "FormatResultError called with status equal to constants.RS_NORMAL" 2916 try: 2917 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status] 2918 except KeyError: 2919 raise NotImplementedError("Unknown status %s" % status) 2920 else: 2921 if verbose: 2922 return verbose_text 2923 return normal_text
2924
2925 2926 -def FormatQueryResult(result, unit=None, format_override=None, separator=None, 2927 header=False, verbose=False):
2928 """Formats data in L{objects.QueryResponse}. 2929 2930 @type result: L{objects.QueryResponse} 2931 @param result: result of query operation 2932 @type unit: string 2933 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}, 2934 see L{utils.text.FormatUnit} 2935 @type format_override: dict 2936 @param format_override: Dictionary for overriding field formatting functions, 2937 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 2938 @type separator: string or None 2939 @param separator: String used to separate fields 2940 @type header: bool 2941 @param header: Whether to output header row 2942 @type verbose: boolean 2943 @param verbose: whether to use verbose field descriptions or not 2944 2945 """ 2946 if unit is None: 2947 if separator: 2948 unit = "m" 2949 else: 2950 unit = "h" 2951 2952 if format_override is None: 2953 format_override = {} 2954 2955 stats = dict.fromkeys(constants.RS_ALL, 0) 2956 2957 def _RecordStatus(status): 2958 if status in stats: 2959 stats[status] += 1
2960 2961 columns = [] 2962 for fdef in result.fields: 2963 assert fdef.title and fdef.name 2964 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit) 2965 columns.append(TableColumn(fdef.title, 2966 _QueryColumnFormatter(fn, _RecordStatus, 2967 verbose), 2968 align_right)) 2969 2970 table = FormatTable(result.data, columns, header, separator) 2971 2972 # Collect statistics 2973 assert len(stats) == len(constants.RS_ALL) 2974 assert compat.all(count >= 0 for count in stats.values()) 2975 2976 # Determine overall status. If there was no data, unknown fields must be 2977 # detected via the field definitions. 2978 if (stats[constants.RS_UNKNOWN] or 2979 (not result.data and _GetUnknownFields(result.fields))): 2980 status = QR_UNKNOWN 2981 elif compat.any(count > 0 for key, count in stats.items() 2982 if key != constants.RS_NORMAL): 2983 status = QR_INCOMPLETE 2984 else: 2985 status = QR_NORMAL 2986 2987 return (status, table) 2988
2989 2990 -def _GetUnknownFields(fdefs):
2991 """Returns list of unknown fields included in C{fdefs}. 2992 2993 @type fdefs: list of L{objects.QueryFieldDefinition} 2994 2995 """ 2996 return [fdef for fdef in fdefs 2997 if fdef.kind == constants.QFT_UNKNOWN]
2998
2999 3000 -def _WarnUnknownFields(fdefs):
3001 """Prints a warning to stderr if a query included unknown fields. 3002 3003 @type fdefs: list of L{objects.QueryFieldDefinition} 3004 3005 """ 3006 unknown = _GetUnknownFields(fdefs) 3007 if unknown: 3008 ToStderr("Warning: Queried for unknown fields %s", 3009 utils.CommaJoin(fdef.name for fdef in unknown)) 3010 return True 3011 3012 return False
3013
3014 3015 -def GenericList(resource, fields, names, unit, separator, header, cl=None, 3016 format_override=None, verbose=False, force_filter=False, 3017 namefield=None, qfilter=None, isnumeric=False):
3018 """Generic implementation for listing all items of a resource. 3019 3020 @param resource: One of L{constants.QR_VIA_LUXI} 3021 @type fields: list of strings 3022 @param fields: List of fields to query for 3023 @type names: list of strings 3024 @param names: Names of items to query for 3025 @type unit: string or None 3026 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or 3027 None for automatic choice (human-readable for non-separator usage, 3028 otherwise megabytes); this is a one-letter string 3029 @type separator: string or None 3030 @param separator: String used to separate fields 3031 @type header: bool 3032 @param header: Whether to show header row 3033 @type force_filter: bool 3034 @param force_filter: Whether to always treat names as filter 3035 @type format_override: dict 3036 @param format_override: Dictionary for overriding field formatting functions, 3037 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 3038 @type verbose: boolean 3039 @param verbose: whether to use verbose field descriptions or not 3040 @type namefield: string 3041 @param namefield: Name of field to use for simple filters (see 3042 L{qlang.MakeFilter} for details) 3043 @type qfilter: list or None 3044 @param qfilter: Query filter (in addition to names) 3045 @param isnumeric: bool 3046 @param isnumeric: Whether the namefield's type is numeric, and therefore 3047 any simple filters built by namefield should use integer values to 3048 reflect that 3049 3050 """ 3051 if not names: 3052 names = None 3053 3054 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield, 3055 isnumeric=isnumeric) 3056 3057 if qfilter is None: 3058 qfilter = namefilter 3059 elif namefilter is not None: 3060 qfilter = [qlang.OP_AND, namefilter, qfilter] 3061 3062 if cl is None: 3063 cl = GetClient() 3064 3065 response = cl.Query(resource, fields, qfilter) 3066 3067 found_unknown = _WarnUnknownFields(response.fields) 3068 3069 (status, data) = FormatQueryResult(response, unit=unit, separator=separator, 3070 header=header, 3071 format_override=format_override, 3072 verbose=verbose) 3073 3074 for line in data: 3075 ToStdout(line) 3076 3077 assert ((found_unknown and status == QR_UNKNOWN) or 3078 (not found_unknown and status != QR_UNKNOWN)) 3079 3080 if status == QR_UNKNOWN: 3081 return constants.EXIT_UNKNOWN_FIELD 3082 3083 # TODO: Should the list command fail if not all data could be collected? 3084 return constants.EXIT_SUCCESS
3085
3086 3087 -def _FieldDescValues(fdef):
3088 """Helper function for L{GenericListFields} to get query field description. 3089 3090 @type fdef: L{objects.QueryFieldDefinition} 3091 @rtype: list 3092 3093 """ 3094 return [ 3095 fdef.name, 3096 _QFT_NAMES.get(fdef.kind, fdef.kind), 3097 fdef.title, 3098 fdef.doc, 3099 ]
3100
3101 3102 -def GenericListFields(resource, fields, separator, header, cl=None):
3103 """Generic implementation for listing fields for a resource. 3104 3105 @param resource: One of L{constants.QR_VIA_LUXI} 3106 @type fields: list of strings 3107 @param fields: List of fields to query for 3108 @type separator: string or None 3109 @param separator: String used to separate fields 3110 @type header: bool 3111 @param header: Whether to show header row 3112 3113 """ 3114 if cl is None: 3115 cl = GetClient() 3116 3117 if not fields: 3118 fields = None 3119 3120 response = cl.QueryFields(resource, fields) 3121 3122 found_unknown = _WarnUnknownFields(response.fields) 3123 3124 columns = [ 3125 TableColumn("Name", str, False), 3126 TableColumn("Type", str, False), 3127 TableColumn("Title", str, False), 3128 TableColumn("Description", str, False), 3129 ] 3130 3131 rows = map(_FieldDescValues, response.fields) 3132 3133 for line in FormatTable(rows, columns, header, separator): 3134 ToStdout(line) 3135 3136 if found_unknown: 3137 return constants.EXIT_UNKNOWN_FIELD 3138 3139 return constants.EXIT_SUCCESS
3140
3141 3142 -class TableColumn:
3143 """Describes a column for L{FormatTable}. 3144 3145 """
3146 - def __init__(self, title, fn, align_right):
3147 """Initializes this class. 3148 3149 @type title: string 3150 @param title: Column title 3151 @type fn: callable 3152 @param fn: Formatting function 3153 @type align_right: bool 3154 @param align_right: Whether to align values on the right-hand side 3155 3156 """ 3157 self.title = title 3158 self.format = fn 3159 self.align_right = align_right
3160
3161 3162 -def _GetColFormatString(width, align_right):
3163 """Returns the format string for a field. 3164 3165 """ 3166 if align_right: 3167 sign = "" 3168 else: 3169 sign = "-" 3170 3171 return "%%%s%ss" % (sign, width)
3172
3173 3174 -def FormatTable(rows, columns, header, separator):
3175 """Formats data as a table. 3176 3177 @type rows: list of lists 3178 @param rows: Row data, one list per row 3179 @type columns: list of L{TableColumn} 3180 @param columns: Column descriptions 3181 @type header: bool 3182 @param header: Whether to show header row 3183 @type separator: string or None 3184 @param separator: String used to separate columns 3185 3186 """ 3187 if header: 3188 data = [[col.title for col in columns]] 3189 colwidth = [len(col.title) for col in columns] 3190 else: 3191 data = [] 3192 colwidth = [0 for _ in columns] 3193 3194 # Format row data 3195 for row in rows: 3196 assert len(row) == len(columns) 3197 3198 formatted = [col.format(value) for value, col in zip(row, columns)] 3199 3200 if separator is None: 3201 # Update column widths 3202 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)): 3203 # Modifying a list's items while iterating is fine 3204 colwidth[idx] = max(oldwidth, len(value)) 3205 3206 data.append(formatted) 3207 3208 if separator is not None: 3209 # Return early if a separator is used 3210 return [separator.join(row) for row in data] 3211 3212 if columns and not columns[-1].align_right: 3213 # Avoid unnecessary spaces at end of line 3214 colwidth[-1] = 0 3215 3216 # Build format string 3217 fmt = " ".join([_GetColFormatString(width, col.align_right) 3218 for col, width in zip(columns, colwidth)]) 3219 3220 return [fmt % tuple(row) for row in data]
3221
3222 3223 -def FormatTimestamp(ts):
3224 """Formats a given timestamp. 3225 3226 @type ts: timestamp 3227 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds 3228 3229 @rtype: string 3230 @return: a string with the formatted timestamp 3231 3232 """ 3233 if not isinstance(ts, (tuple, list)) or len(ts) != 2: 3234 return "?" 3235 3236 (sec, usecs) = ts 3237 return utils.FormatTime(sec, usecs=usecs)
3238
3239 3240 -def ParseTimespec(value):
3241 """Parse a time specification. 3242 3243 The following suffixed will be recognized: 3244 3245 - s: seconds 3246 - m: minutes 3247 - h: hours 3248 - d: day 3249 - w: weeks 3250 3251 Without any suffix, the value will be taken to be in seconds. 3252 3253 """ 3254 value = str(value) 3255 if not value: 3256 raise errors.OpPrereqError("Empty time specification passed", 3257 errors.ECODE_INVAL) 3258 suffix_map = { 3259 "s": 1, 3260 "m": 60, 3261 "h": 3600, 3262 "d": 86400, 3263 "w": 604800, 3264 } 3265 if value[-1] not in suffix_map: 3266 try: 3267 value = int(value) 3268 except (TypeError, ValueError): 3269 raise errors.OpPrereqError("Invalid time specification '%s'" % value, 3270 errors.ECODE_INVAL) 3271 else: 3272 multiplier = suffix_map[value[-1]] 3273 value = value[:-1] 3274 if not value: # no data left after stripping the suffix 3275 raise errors.OpPrereqError("Invalid time specification (only" 3276 " suffix passed)", errors.ECODE_INVAL) 3277 try: 3278 value = int(value) * multiplier 3279 except (TypeError, ValueError): 3280 raise errors.OpPrereqError("Invalid time specification '%s'" % value, 3281 errors.ECODE_INVAL) 3282 return value
3283
3284 3285 -def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False, 3286 filter_master=False, nodegroup=None):
3287 """Returns the names of online nodes. 3288 3289 This function will also log a warning on stderr with the names of 3290 the online nodes. 3291 3292 @param nodes: if not empty, use only this subset of nodes (minus the 3293 offline ones) 3294 @param cl: if not None, luxi client to use 3295 @type nowarn: boolean 3296 @param nowarn: by default, this function will output a note with the 3297 offline nodes that are skipped; if this parameter is True the 3298 note is not displayed 3299 @type secondary_ips: boolean 3300 @param secondary_ips: if True, return the secondary IPs instead of the 3301 names, useful for doing network traffic over the replication interface 3302 (if any) 3303 @type filter_master: boolean 3304 @param filter_master: if True, do not return the master node in the list 3305 (useful in coordination with secondary_ips where we cannot check our 3306 node name against the list) 3307 @type nodegroup: string 3308 @param nodegroup: If set, only return nodes in this node group 3309 3310 """ 3311 if cl is None: 3312 cl = GetClient() 3313 3314 qfilter = [] 3315 3316 if nodes: 3317 qfilter.append(qlang.MakeSimpleFilter("name", nodes)) 3318 3319 if nodegroup is not None: 3320 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup], 3321 [qlang.OP_EQUAL, "group.uuid", nodegroup]]) 3322 3323 if filter_master: 3324 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]]) 3325 3326 if qfilter: 3327 if len(qfilter) > 1: 3328 final_filter = [qlang.OP_AND] + qfilter 3329 else: 3330 assert len(qfilter) == 1 3331 final_filter = qfilter[0] 3332 else: 3333 final_filter = None 3334 3335 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter) 3336 3337 def _IsOffline(row): 3338 (_, (_, offline), _) = row 3339 return offline
3340 3341 def _GetName(row): 3342 ((_, name), _, _) = row 3343 return name 3344 3345 def _GetSip(row): 3346 (_, _, (_, sip)) = row 3347 return sip 3348 3349 (offline, online) = compat.partition(result.data, _IsOffline) 3350 3351 if offline and not nowarn: 3352 ToStderr("Note: skipping offline node(s): %s" % 3353 utils.CommaJoin(map(_GetName, offline))) 3354 3355 if secondary_ips: 3356 fn = _GetSip 3357 else: 3358 fn = _GetName 3359 3360 return map(fn, online) 3361
3362 3363 -def _ToStream(stream, txt, *args):
3364 """Write a message to a stream, bypassing the logging system 3365 3366 @type stream: file object 3367 @param stream: the file to which we should write 3368 @type txt: str 3369 @param txt: the message 3370 3371 """ 3372 try: 3373 if args: 3374 args = tuple(args) 3375 stream.write(txt % args) 3376 else: 3377 stream.write(txt) 3378 stream.write("\n") 3379 stream.flush() 3380 except IOError, err: 3381 if err.errno == errno.EPIPE: 3382 # our terminal went away, we'll exit 3383 sys.exit(constants.EXIT_FAILURE) 3384 else: 3385 raise
3386
3387 3388 -def ToStdout(txt, *args):
3389 """Write a message to stdout only, bypassing the logging system 3390 3391 This is just a wrapper over _ToStream. 3392 3393 @type txt: str 3394 @param txt: the message 3395 3396 """ 3397 _ToStream(sys.stdout, txt, *args)
3398
3399 3400 -def ToStderr(txt, *args):
3401 """Write a message to stderr only, bypassing the logging system 3402 3403 This is just a wrapper over _ToStream. 3404 3405 @type txt: str 3406 @param txt: the message 3407 3408 """ 3409 _ToStream(sys.stderr, txt, *args)
3410
3411 3412 -class JobExecutor(object):
3413 """Class which manages the submission and execution of multiple jobs. 3414 3415 Note that instances of this class should not be reused between 3416 GetResults() calls. 3417 3418 """
3419 - def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3420 self.queue = [] 3421 if cl is None: 3422 cl = GetClient() 3423 self.cl = cl 3424 self.verbose = verbose 3425 self.jobs = [] 3426 self.opts = opts 3427 self.feedback_fn = feedback_fn 3428 self._counter = itertools.count()
3429 3430 @staticmethod
3431 - def _IfName(name, fmt):
3432 """Helper function for formatting name. 3433 3434 """ 3435 if name: 3436 return fmt % name 3437 3438 return ""
3439
3440 - def QueueJob(self, name, *ops):
3441 """Record a job for later submit. 3442 3443 @type name: string 3444 @param name: a description of the job, will be used in WaitJobSet 3445 3446 """ 3447 SetGenericOpcodeOpts(ops, self.opts) 3448 self.queue.append((self._counter.next(), name, ops))
3449
3450 - def AddJobId(self, name, status, job_id):
3451 """Adds a job ID to the internal queue. 3452 3453 """ 3454 self.jobs.append((self._counter.next(), status, job_id, name))
3455
3456 - def SubmitPending(self, each=False):
3457 """Submit all pending jobs. 3458 3459 """ 3460 if each: 3461 results = [] 3462 for (_, _, ops) in self.queue: 3463 # SubmitJob will remove the success status, but raise an exception if 3464 # the submission fails, so we'll notice that anyway. 3465 results.append([True, self.cl.SubmitJob(ops)[0]]) 3466 else: 3467 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue]) 3468 for ((status, data), (idx, name, _)) in zip(results, self.queue): 3469 self.jobs.append((idx, status, data, name))
3470
3471 - def _ChooseJob(self):
3472 """Choose a non-waiting/queued job to poll next. 3473 3474 """ 3475 assert self.jobs, "_ChooseJob called with empty job list" 3476 3477 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]], 3478 ["status"]) 3479 assert result 3480 3481 for job_data, status in zip(self.jobs, result): 3482 if (isinstance(status, list) and status and 3483 status[0] in (constants.JOB_STATUS_QUEUED, 3484 constants.JOB_STATUS_WAITING, 3485 constants.JOB_STATUS_CANCELING)): 3486 # job is still present and waiting 3487 continue 3488 # good candidate found (either running job or lost job) 3489 self.jobs.remove(job_data) 3490 return job_data 3491 3492 # no job found 3493 return self.jobs.pop(0)
3494
3495 - def GetResults(self):
3496 """Wait for and return the results of all jobs. 3497 3498 @rtype: list 3499 @return: list of tuples (success, job results), in the same order 3500 as the submitted jobs; if a job has failed, instead of the result 3501 there will be the error message 3502 3503 """ 3504 if not self.jobs: 3505 self.SubmitPending() 3506 results = [] 3507 if self.verbose: 3508 ok_jobs = [row[2] for row in self.jobs if row[1]] 3509 if ok_jobs: 3510 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs)) 3511 3512 # first, remove any non-submitted jobs 3513 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1]) 3514 for idx, _, jid, name in failures: 3515 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid) 3516 results.append((idx, False, jid)) 3517 3518 while self.jobs: 3519 (idx, _, jid, name) = self._ChooseJob() 3520 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s")) 3521 try: 3522 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn) 3523 success = True 3524 except errors.JobLost, err: 3525 _, job_result = FormatError(err) 3526 ToStderr("Job %s%s has been archived, cannot check its result", 3527 jid, self._IfName(name, " for %s")) 3528 success = False 3529 except (errors.GenericError, luxi.ProtocolError), err: 3530 _, job_result = FormatError(err) 3531 success = False 3532 # the error message will always be shown, verbose or not 3533 ToStderr("Job %s%s has failed: %s", 3534 jid, self._IfName(name, " for %s"), job_result) 3535 3536 results.append((idx, success, job_result)) 3537 3538 # sort based on the index, then drop it 3539 results.sort() 3540 results = [i[1:] for i in results] 3541 3542 return results
3543
3544 - def WaitOrShow(self, wait):
3545 """Wait for job results or only print the job IDs. 3546 3547 @type wait: boolean 3548 @param wait: whether to wait or not 3549 3550 """ 3551 if wait: 3552 return self.GetResults() 3553 else: 3554 if not self.jobs: 3555 self.SubmitPending() 3556 for _, status, result, name in self.jobs: 3557 if status: 3558 ToStdout("%s: %s", result, name) 3559 else: 3560 ToStderr("Failure for %s: %s", name, result) 3561 return [row[1:3] for row in self.jobs]
3562
3563 3564 -def FormatParameterDict(buf, param_dict, actual, level=1):
3565 """Formats a parameter dictionary. 3566 3567 @type buf: L{StringIO} 3568 @param buf: the buffer into which to write 3569 @type param_dict: dict 3570 @param param_dict: the own parameters 3571 @type actual: dict 3572 @param actual: the current parameter set (including defaults) 3573 @param level: Level of indent 3574 3575 """ 3576 indent = " " * level 3577 3578 for key in sorted(actual): 3579 data = actual[key] 3580 buf.write("%s- %s:" % (indent, key)) 3581 3582 if isinstance(data, dict) and data: 3583 buf.write("\n") 3584 FormatParameterDict(buf, param_dict.get(key, {}), data, 3585 level=level + 1) 3586 else: 3587 val = param_dict.get(key, "default (%s)" % data) 3588 buf.write(" %s\n" % val)
3589
3590 3591 -def ConfirmOperation(names, list_type, text, extra=""):
3592 """Ask the user to confirm an operation on a list of list_type. 3593 3594 This function is used to request confirmation for doing an operation 3595 on a given list of list_type. 3596 3597 @type names: list 3598 @param names: the list of names that we display when 3599 we ask for confirmation 3600 @type list_type: str 3601 @param list_type: Human readable name for elements in the list (e.g. nodes) 3602 @type text: str 3603 @param text: the operation that the user should confirm 3604 @rtype: boolean 3605 @return: True or False depending on user's confirmation. 3606 3607 """ 3608 count = len(names) 3609 msg = ("The %s will operate on %d %s.\n%s" 3610 "Do you want to continue?" % (text, count, list_type, extra)) 3611 affected = (("\nAffected %s:\n" % list_type) + 3612 "\n".join([" %s" % name for name in names])) 3613 3614 choices = [("y", True, "Yes, execute the %s" % text), 3615 ("n", False, "No, abort the %s" % text)] 3616 3617 if count > 20: 3618 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type)) 3619 question = msg 3620 else: 3621 question = msg + affected 3622 3623 choice = AskUser(question, choices) 3624 if choice == "v": 3625 choices.pop(1) 3626 choice = AskUser(msg + affected, choices) 3627 return choice
3628
3629 3630 -def _MaybeParseUnit(elements):
3631 """Parses and returns an array of potential values with units. 3632 3633 """ 3634 parsed = {} 3635 for k, v in elements.items(): 3636 if v == constants.VALUE_DEFAULT: 3637 parsed[k] = v 3638 else: 3639 parsed[k] = utils.ParseUnit(v) 3640 return parsed
3641
3642 3643 -def CreateIPolicyFromOpts(ispecs_mem_size=None, 3644 ispecs_cpu_count=None, 3645 ispecs_disk_count=None, 3646 ispecs_disk_size=None, 3647 ispecs_nic_count=None, 3648 ipolicy_disk_templates=None, 3649 ipolicy_vcpu_ratio=None, 3650 ipolicy_spindle_ratio=None, 3651 group_ipolicy=False, 3652 allowed_values=None, 3653 fill_all=False):
3654 """Creation of instance policy based on command line options. 3655 3656 @param fill_all: whether for cluster policies we should ensure that 3657 all values are filled 3658 3659 3660 """ 3661 try: 3662 if ispecs_mem_size: 3663 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size) 3664 if ispecs_disk_size: 3665 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size) 3666 except (TypeError, ValueError, errors.UnitParseError), err: 3667 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size" 3668 " in policy: %s" % 3669 (ispecs_disk_size, ispecs_mem_size, err), 3670 errors.ECODE_INVAL) 3671 3672 # prepare ipolicy dict 3673 ipolicy_transposed = { 3674 constants.ISPEC_MEM_SIZE: ispecs_mem_size, 3675 constants.ISPEC_CPU_COUNT: ispecs_cpu_count, 3676 constants.ISPEC_DISK_COUNT: ispecs_disk_count, 3677 constants.ISPEC_DISK_SIZE: ispecs_disk_size, 3678 constants.ISPEC_NIC_COUNT: ispecs_nic_count, 3679 } 3680 3681 # first, check that the values given are correct 3682 if group_ipolicy: 3683 forced_type = TISPECS_GROUP_TYPES 3684 else: 3685 forced_type = TISPECS_CLUSTER_TYPES 3686 3687 for specs in ipolicy_transposed.values(): 3688 utils.ForceDictType(specs, forced_type, allowed_values=allowed_values) 3689 3690 # then transpose 3691 ipolicy_out = objects.MakeEmptyIPolicy() 3692 for name, specs in ipolicy_transposed.iteritems(): 3693 assert name in constants.ISPECS_PARAMETERS 3694 for key, val in specs.items(): # {min: .. ,max: .., std: ..} 3695 ipolicy_out[key][name] = val 3696 3697 # no filldict for non-dicts 3698 if not group_ipolicy and fill_all: 3699 if ipolicy_disk_templates is None: 3700 ipolicy_disk_templates = constants.DISK_TEMPLATES 3701 if ipolicy_vcpu_ratio is None: 3702 ipolicy_vcpu_ratio = \ 3703 constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO] 3704 if ipolicy_spindle_ratio is None: 3705 ipolicy_spindle_ratio = \ 3706 constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO] 3707 if ipolicy_disk_templates is not None: 3708 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates) 3709 if ipolicy_vcpu_ratio is not None: 3710 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio 3711 if ipolicy_spindle_ratio is not None: 3712 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio 3713 3714 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS) 3715 3716 return ipolicy_out
3717