Package ganeti :: Module cli
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cli

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc. 
   5  # 
   6  # This program is free software; you can redistribute it and/or modify 
   7  # it under the terms of the GNU General Public License as published by 
   8  # the Free Software Foundation; either version 2 of the License, or 
   9  # (at your option) any later version. 
  10  # 
  11  # This program is distributed in the hope that it will be useful, but 
  12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
  13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
  14  # General Public License for more details. 
  15  # 
  16  # You should have received a copy of the GNU General Public License 
  17  # along with this program; if not, write to the Free Software 
  18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
  19  # 02110-1301, USA. 
  20   
  21   
  22  """Module dealing with command line parsing""" 
  23   
  24   
  25  import sys 
  26  import textwrap 
  27  import os.path 
  28  import time 
  29  import logging 
  30  import errno 
  31  import itertools 
  32  import shlex 
  33  from cStringIO import StringIO 
  34   
  35  from ganeti import utils 
  36  from ganeti import errors 
  37  from ganeti import constants 
  38  from ganeti import opcodes 
  39  from ganeti import luxi 
  40  from ganeti import ssconf 
  41  from ganeti import rpc 
  42  from ganeti import ssh 
  43  from ganeti import compat 
  44  from ganeti import netutils 
  45  from ganeti import qlang 
  46  from ganeti import objects 
  47   
  48  from optparse import (OptionParser, TitledHelpFormatter, 
  49                        Option, OptionValueError) 
  50   
  51   
  52  __all__ = [ 
  53    # Command line options 
  54    "ABSOLUTE_OPT", 
  55    "ADD_UIDS_OPT", 
  56    "ALLOCATABLE_OPT", 
  57    "ALLOC_POLICY_OPT", 
  58    "ALL_OPT", 
  59    "ALLOW_FAILOVER_OPT", 
  60    "AUTO_PROMOTE_OPT", 
  61    "AUTO_REPLACE_OPT", 
  62    "BACKEND_OPT", 
  63    "BLK_OS_OPT", 
  64    "CAPAB_MASTER_OPT", 
  65    "CAPAB_VM_OPT", 
  66    "CLEANUP_OPT", 
  67    "CLUSTER_DOMAIN_SECRET_OPT", 
  68    "CONFIRM_OPT", 
  69    "CP_SIZE_OPT", 
  70    "DEBUG_OPT", 
  71    "DEBUG_SIMERR_OPT", 
  72    "DISKIDX_OPT", 
  73    "DISK_OPT", 
  74    "DISK_PARAMS_OPT", 
  75    "DISK_TEMPLATE_OPT", 
  76    "DRAINED_OPT", 
  77    "DRY_RUN_OPT", 
  78    "DRBD_HELPER_OPT", 
  79    "DST_NODE_OPT", 
  80    "EARLY_RELEASE_OPT", 
  81    "ENABLED_HV_OPT", 
  82    "ERROR_CODES_OPT", 
  83    "FIELDS_OPT", 
  84    "FILESTORE_DIR_OPT", 
  85    "FILESTORE_DRIVER_OPT", 
  86    "FORCE_FILTER_OPT", 
  87    "FORCE_OPT", 
  88    "FORCE_VARIANT_OPT", 
  89    "GLOBAL_FILEDIR_OPT", 
  90    "HID_OS_OPT", 
  91    "GLOBAL_SHARED_FILEDIR_OPT", 
  92    "HVLIST_OPT", 
  93    "HVOPTS_OPT", 
  94    "HYPERVISOR_OPT", 
  95    "IALLOCATOR_OPT", 
  96    "DEFAULT_IALLOCATOR_OPT", 
  97    "IDENTIFY_DEFAULTS_OPT", 
  98    "IGNORE_CONSIST_OPT", 
  99    "IGNORE_ERRORS_OPT", 
 100    "IGNORE_FAILURES_OPT", 
 101    "IGNORE_OFFLINE_OPT", 
 102    "IGNORE_REMOVE_FAILURES_OPT", 
 103    "IGNORE_SECONDARIES_OPT", 
 104    "IGNORE_SIZE_OPT", 
 105    "INTERVAL_OPT", 
 106    "MAC_PREFIX_OPT", 
 107    "MAINTAIN_NODE_HEALTH_OPT", 
 108    "MASTER_NETDEV_OPT", 
 109    "MASTER_NETMASK_OPT", 
 110    "MC_OPT", 
 111    "MIGRATION_MODE_OPT", 
 112    "NET_OPT", 
 113    "NEW_CLUSTER_CERT_OPT", 
 114    "NEW_CLUSTER_DOMAIN_SECRET_OPT", 
 115    "NEW_CONFD_HMAC_KEY_OPT", 
 116    "NEW_RAPI_CERT_OPT", 
 117    "NEW_SECONDARY_OPT", 
 118    "NEW_SPICE_CERT_OPT", 
 119    "NIC_PARAMS_OPT", 
 120    "NODE_FORCE_JOIN_OPT", 
 121    "NODE_LIST_OPT", 
 122    "NODE_PLACEMENT_OPT", 
 123    "NODEGROUP_OPT", 
 124    "NODE_PARAMS_OPT", 
 125    "NODE_POWERED_OPT", 
 126    "NODRBD_STORAGE_OPT", 
 127    "NOHDR_OPT", 
 128    "NOIPCHECK_OPT", 
 129    "NO_INSTALL_OPT", 
 130    "NONAMECHECK_OPT", 
 131    "NOLVM_STORAGE_OPT", 
 132    "NOMODIFY_ETCHOSTS_OPT", 
 133    "NOMODIFY_SSH_SETUP_OPT", 
 134    "NONICS_OPT", 
 135    "NONLIVE_OPT", 
 136    "NONPLUS1_OPT", 
 137    "NORUNTIME_CHGS_OPT", 
 138    "NOSHUTDOWN_OPT", 
 139    "NOSTART_OPT", 
 140    "NOSSH_KEYCHECK_OPT", 
 141    "NOVOTING_OPT", 
 142    "NO_REMEMBER_OPT", 
 143    "NWSYNC_OPT", 
 144    "OFFLINE_INST_OPT", 
 145    "ONLINE_INST_OPT", 
 146    "ON_PRIMARY_OPT", 
 147    "ON_SECONDARY_OPT", 
 148    "OFFLINE_OPT", 
 149    "OSPARAMS_OPT", 
 150    "OS_OPT", 
 151    "OS_SIZE_OPT", 
 152    "OOB_TIMEOUT_OPT", 
 153    "POWER_DELAY_OPT", 
 154    "PREALLOC_WIPE_DISKS_OPT", 
 155    "PRIMARY_IP_VERSION_OPT", 
 156    "PRIMARY_ONLY_OPT", 
 157    "PRIORITY_OPT", 
 158    "RAPI_CERT_OPT", 
 159    "READD_OPT", 
 160    "REBOOT_TYPE_OPT", 
 161    "REMOVE_INSTANCE_OPT", 
 162    "REMOVE_UIDS_OPT", 
 163    "RESERVED_LVS_OPT", 
 164    "RUNTIME_MEM_OPT", 
 165    "ROMAN_OPT", 
 166    "SECONDARY_IP_OPT", 
 167    "SECONDARY_ONLY_OPT", 
 168    "SELECT_OS_OPT", 
 169    "SEP_OPT", 
 170    "SHOWCMD_OPT", 
 171    "SHUTDOWN_TIMEOUT_OPT", 
 172    "SINGLE_NODE_OPT", 
 173    "SPECS_CPU_COUNT_OPT", 
 174    "SPECS_DISK_COUNT_OPT", 
 175    "SPECS_DISK_SIZE_OPT", 
 176    "SPECS_MEM_SIZE_OPT", 
 177    "SPECS_NIC_COUNT_OPT", 
 178    "IPOLICY_DISK_TEMPLATES", 
 179    "IPOLICY_VCPU_RATIO", 
 180    "SPICE_CACERT_OPT", 
 181    "SPICE_CERT_OPT", 
 182    "SRC_DIR_OPT", 
 183    "SRC_NODE_OPT", 
 184    "SUBMIT_OPT", 
 185    "STARTUP_PAUSED_OPT", 
 186    "STATIC_OPT", 
 187    "SYNC_OPT", 
 188    "TAG_ADD_OPT", 
 189    "TAG_SRC_OPT", 
 190    "TIMEOUT_OPT", 
 191    "TO_GROUP_OPT", 
 192    "UIDPOOL_OPT", 
 193    "USEUNITS_OPT", 
 194    "USE_EXTERNAL_MIP_SCRIPT", 
 195    "USE_REPL_NET_OPT", 
 196    "VERBOSE_OPT", 
 197    "VG_NAME_OPT", 
 198    "YES_DOIT_OPT", 
 199    "DISK_STATE_OPT", 
 200    "HV_STATE_OPT", 
 201    "IGNORE_IPOLICY_OPT", 
 202    "INSTANCE_POLICY_OPTS", 
 203    # Generic functions for CLI programs 
 204    "ConfirmOperation", 
 205    "CreateIPolicyFromOpts", 
 206    "GenericMain", 
 207    "GenericInstanceCreate", 
 208    "GenericList", 
 209    "GenericListFields", 
 210    "GetClient", 
 211    "GetOnlineNodes", 
 212    "JobExecutor", 
 213    "JobSubmittedException", 
 214    "ParseTimespec", 
 215    "RunWhileClusterStopped", 
 216    "SubmitOpCode", 
 217    "SubmitOrSend", 
 218    "UsesRPC", 
 219    # Formatting functions 
 220    "ToStderr", "ToStdout", 
 221    "FormatError", 
 222    "FormatQueryResult", 
 223    "FormatParameterDict", 
 224    "GenerateTable", 
 225    "AskUser", 
 226    "FormatTimestamp", 
 227    "FormatLogMessage", 
 228    # Tags functions 
 229    "ListTags", 
 230    "AddTags", 
 231    "RemoveTags", 
 232    # command line options support infrastructure 
 233    "ARGS_MANY_INSTANCES", 
 234    "ARGS_MANY_NODES", 
 235    "ARGS_MANY_GROUPS", 
 236    "ARGS_NONE", 
 237    "ARGS_ONE_INSTANCE", 
 238    "ARGS_ONE_NODE", 
 239    "ARGS_ONE_GROUP", 
 240    "ARGS_ONE_OS", 
 241    "ArgChoice", 
 242    "ArgCommand", 
 243    "ArgFile", 
 244    "ArgGroup", 
 245    "ArgHost", 
 246    "ArgInstance", 
 247    "ArgJobId", 
 248    "ArgNode", 
 249    "ArgOs", 
 250    "ArgSuggest", 
 251    "ArgUnknown", 
 252    "OPT_COMPL_INST_ADD_NODES", 
 253    "OPT_COMPL_MANY_NODES", 
 254    "OPT_COMPL_ONE_IALLOCATOR", 
 255    "OPT_COMPL_ONE_INSTANCE", 
 256    "OPT_COMPL_ONE_NODE", 
 257    "OPT_COMPL_ONE_NODEGROUP", 
 258    "OPT_COMPL_ONE_OS", 
 259    "cli_option", 
 260    "SplitNodeOption", 
 261    "CalculateOSNames", 
 262    "ParseFields", 
 263    "COMMON_CREATE_OPTS", 
 264    ] 
 265   
 266  NO_PREFIX = "no_" 
 267  UN_PREFIX = "-" 
 268   
 269  #: Priorities (sorted) 
 270  _PRIORITY_NAMES = [ 
 271    ("low", constants.OP_PRIO_LOW), 
 272    ("normal", constants.OP_PRIO_NORMAL), 
 273    ("high", constants.OP_PRIO_HIGH), 
 274    ] 
 275   
 276  #: Priority dictionary for easier lookup 
 277  # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once 
 278  # we migrate to Python 2.6 
 279  _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES) 
 280   
 281  # Query result status for clients 
 282  (QR_NORMAL, 
 283   QR_UNKNOWN, 
 284   QR_INCOMPLETE) = range(3) 
 285   
 286  #: Maximum batch size for ChooseJob 
 287  _CHOOSE_BATCH = 25 
 288   
 289   
 290  # constants used to create InstancePolicy dictionary 
 291  TISPECS_GROUP_TYPES = { 
 292    constants.ISPECS_MIN: constants.VTYPE_INT, 
 293    constants.ISPECS_MAX: constants.VTYPE_INT, 
 294    } 
 295   
 296  TISPECS_CLUSTER_TYPES = { 
 297    constants.ISPECS_MIN: constants.VTYPE_INT, 
 298    constants.ISPECS_MAX: constants.VTYPE_INT, 
 299    constants.ISPECS_STD: constants.VTYPE_INT, 
 300    } 
301 302 303 -class _Argument:
304 - def __init__(self, min=0, max=None): # pylint: disable=W0622
305 self.min = min 306 self.max = max
307
308 - def __repr__(self):
309 return ("<%s min=%s max=%s>" % 310 (self.__class__.__name__, self.min, self.max))
311
312 313 -class ArgSuggest(_Argument):
314 """Suggesting argument. 315 316 Value can be any of the ones passed to the constructor. 317 318 """ 319 # pylint: disable=W0622
320 - def __init__(self, min=0, max=None, choices=None):
321 _Argument.__init__(self, min=min, max=max) 322 self.choices = choices
323
324 - def __repr__(self):
325 return ("<%s min=%s max=%s choices=%r>" % 326 (self.__class__.__name__, self.min, self.max, self.choices))
327
328 329 -class ArgChoice(ArgSuggest):
330 """Choice argument. 331 332 Value can be any of the ones passed to the constructor. Like L{ArgSuggest}, 333 but value must be one of the choices. 334 335 """
336
337 338 -class ArgUnknown(_Argument):
339 """Unknown argument to program (e.g. determined at runtime). 340 341 """
342
343 344 -class ArgInstance(_Argument):
345 """Instances argument. 346 347 """
348
349 350 -class ArgNode(_Argument):
351 """Node argument. 352 353 """
354
355 356 -class ArgGroup(_Argument):
357 """Node group argument. 358 359 """
360
361 362 -class ArgJobId(_Argument):
363 """Job ID argument. 364 365 """
366
367 368 -class ArgFile(_Argument):
369 """File path argument. 370 371 """
372
373 374 -class ArgCommand(_Argument):
375 """Command argument. 376 377 """
378
379 380 -class ArgHost(_Argument):
381 """Host argument. 382 383 """
384
385 386 -class ArgOs(_Argument):
387 """OS argument. 388 389 """
390 391 392 ARGS_NONE = [] 393 ARGS_MANY_INSTANCES = [ArgInstance()] 394 ARGS_MANY_NODES = [ArgNode()] 395 ARGS_MANY_GROUPS = [ArgGroup()] 396 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)] 397 ARGS_ONE_NODE = [ArgNode(min=1, max=1)] 398 # TODO 399 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)] 400 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
401 402 403 -def _ExtractTagsObject(opts, args):
404 """Extract the tag type object. 405 406 Note that this function will modify its args parameter. 407 408 """ 409 if not hasattr(opts, "tag_type"): 410 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject") 411 kind = opts.tag_type 412 if kind == constants.TAG_CLUSTER: 413 retval = kind, kind 414 elif kind in (constants.TAG_NODEGROUP, 415 constants.TAG_NODE, 416 constants.TAG_INSTANCE): 417 if not args: 418 raise errors.OpPrereqError("no arguments passed to the command") 419 name = args.pop(0) 420 retval = kind, name 421 else: 422 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind) 423 return retval
424
425 426 -def _ExtendTags(opts, args):
427 """Extend the args if a source file has been given. 428 429 This function will extend the tags with the contents of the file 430 passed in the 'tags_source' attribute of the opts parameter. A file 431 named '-' will be replaced by stdin. 432 433 """ 434 fname = opts.tags_source 435 if fname is None: 436 return 437 if fname == "-": 438 new_fh = sys.stdin 439 else: 440 new_fh = open(fname, "r") 441 new_data = [] 442 try: 443 # we don't use the nice 'new_data = [line.strip() for line in fh]' 444 # because of python bug 1633941 445 while True: 446 line = new_fh.readline() 447 if not line: 448 break 449 new_data.append(line.strip()) 450 finally: 451 new_fh.close() 452 args.extend(new_data)
453
454 455 -def ListTags(opts, args):
456 """List the tags on a given object. 457 458 This is a generic implementation that knows how to deal with all 459 three cases of tag objects (cluster, node, instance). The opts 460 argument is expected to contain a tag_type field denoting what 461 object type we work on. 462 463 """ 464 kind, name = _ExtractTagsObject(opts, args) 465 cl = GetClient() 466 result = cl.QueryTags(kind, name) 467 result = list(result) 468 result.sort() 469 for tag in result: 470 ToStdout(tag)
471
472 473 -def AddTags(opts, args):
474 """Add tags on a given object. 475 476 This is a generic implementation that knows how to deal with all 477 three cases of tag objects (cluster, node, instance). The opts 478 argument is expected to contain a tag_type field denoting what 479 object type we work on. 480 481 """ 482 kind, name = _ExtractTagsObject(opts, args) 483 _ExtendTags(opts, args) 484 if not args: 485 raise errors.OpPrereqError("No tags to be added") 486 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args) 487 SubmitOrSend(op, opts)
488
489 490 -def RemoveTags(opts, args):
491 """Remove tags from a given object. 492 493 This is a generic implementation that knows how to deal with all 494 three cases of tag objects (cluster, node, instance). The opts 495 argument is expected to contain a tag_type field denoting what 496 object type we work on. 497 498 """ 499 kind, name = _ExtractTagsObject(opts, args) 500 _ExtendTags(opts, args) 501 if not args: 502 raise errors.OpPrereqError("No tags to be removed") 503 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args) 504 SubmitOrSend(op, opts)
505
506 507 -def check_unit(option, opt, value): # pylint: disable=W0613
508 """OptParsers custom converter for units. 509 510 """ 511 try: 512 return utils.ParseUnit(value) 513 except errors.UnitParseError, err: 514 raise OptionValueError("option %s: %s" % (opt, err)) 515
516 517 -def _SplitKeyVal(opt, data):
518 """Convert a KeyVal string into a dict. 519 520 This function will convert a key=val[,...] string into a dict. Empty 521 values will be converted specially: keys which have the prefix 'no_' 522 will have the value=False and the prefix stripped, the others will 523 have value=True. 524 525 @type opt: string 526 @param opt: a string holding the option name for which we process the 527 data, used in building error messages 528 @type data: string 529 @param data: a string of the format key=val,key=val,... 530 @rtype: dict 531 @return: {key=val, key=val} 532 @raises errors.ParameterError: if there are duplicate keys 533 534 """ 535 kv_dict = {} 536 if data: 537 for elem in utils.UnescapeAndSplit(data, sep=","): 538 if "=" in elem: 539 key, val = elem.split("=", 1) 540 else: 541 if elem.startswith(NO_PREFIX): 542 key, val = elem[len(NO_PREFIX):], False 543 elif elem.startswith(UN_PREFIX): 544 key, val = elem[len(UN_PREFIX):], None 545 else: 546 key, val = elem, True 547 if key in kv_dict: 548 raise errors.ParameterError("Duplicate key '%s' in option %s" % 549 (key, opt)) 550 kv_dict[key] = val 551 return kv_dict
552
553 554 -def check_ident_key_val(option, opt, value): # pylint: disable=W0613
555 """Custom parser for ident:key=val,key=val options. 556 557 This will store the parsed values as a tuple (ident, {key: val}). As such, 558 multiple uses of this option via action=append is possible. 559 560 """ 561 if ":" not in value: 562 ident, rest = value, "" 563 else: 564 ident, rest = value.split(":", 1) 565 566 if ident.startswith(NO_PREFIX): 567 if rest: 568 msg = "Cannot pass options when removing parameter groups: %s" % value 569 raise errors.ParameterError(msg) 570 retval = (ident[len(NO_PREFIX):], False) 571 elif (ident.startswith(UN_PREFIX) and 572 (len(ident) <= len(UN_PREFIX) or 573 not ident[len(UN_PREFIX)][0].isdigit())): 574 if rest: 575 msg = "Cannot pass options when removing parameter groups: %s" % value 576 raise errors.ParameterError(msg) 577 retval = (ident[len(UN_PREFIX):], None) 578 else: 579 kv_dict = _SplitKeyVal(opt, rest) 580 retval = (ident, kv_dict) 581 return retval 582
583 584 -def check_key_val(option, opt, value): # pylint: disable=W0613
585 """Custom parser class for key=val,key=val options. 586 587 This will store the parsed values as a dict {key: val}. 588 589 """ 590 return _SplitKeyVal(opt, value) 591
592 593 -def check_bool(option, opt, value): # pylint: disable=W0613
594 """Custom parser for yes/no options. 595 596 This will store the parsed value as either True or False. 597 598 """ 599 value = value.lower() 600 if value == constants.VALUE_FALSE or value == "no": 601 return False 602 elif value == constants.VALUE_TRUE or value == "yes": 603 return True 604 else: 605 raise errors.ParameterError("Invalid boolean value '%s'" % value) 606
607 608 -def check_list(option, opt, value): # pylint: disable=W0613
609 """Custom parser for comma-separated lists. 610 611 """ 612 # we have to make this explicit check since "".split(",") is [""], 613 # not an empty list :( 614 if not value: 615 return [] 616 else: 617 return utils.UnescapeAndSplit(value) 618
619 620 -def check_maybefloat(option, opt, value): # pylint: disable=W0613
621 """Custom parser for float numbers which might be also defaults. 622 623 """ 624 value = value.lower() 625 626 if value == constants.VALUE_DEFAULT: 627 return value 628 else: 629 return float(value) 630 631 632 # completion_suggestion is normally a list. Using numeric values not evaluating 633 # to False for dynamic completion. 634 (OPT_COMPL_MANY_NODES, 635 OPT_COMPL_ONE_NODE, 636 OPT_COMPL_ONE_INSTANCE, 637 OPT_COMPL_ONE_OS, 638 OPT_COMPL_ONE_IALLOCATOR, 639 OPT_COMPL_INST_ADD_NODES, 640 OPT_COMPL_ONE_NODEGROUP) = range(100, 107) 641 642 OPT_COMPL_ALL = frozenset([ 643 OPT_COMPL_MANY_NODES, 644 OPT_COMPL_ONE_NODE, 645 OPT_COMPL_ONE_INSTANCE, 646 OPT_COMPL_ONE_OS, 647 OPT_COMPL_ONE_IALLOCATOR, 648 OPT_COMPL_INST_ADD_NODES, 649 OPT_COMPL_ONE_NODEGROUP, 650 ])
651 652 653 -class CliOption(Option):
654 """Custom option class for optparse. 655 656 """ 657 ATTRS = Option.ATTRS + [ 658 "completion_suggest", 659 ] 660 TYPES = Option.TYPES + ( 661 "identkeyval", 662 "keyval", 663 "unit", 664 "bool", 665 "list", 666 "maybefloat", 667 ) 668 TYPE_CHECKER = Option.TYPE_CHECKER.copy() 669 TYPE_CHECKER["identkeyval"] = check_ident_key_val 670 TYPE_CHECKER["keyval"] = check_key_val 671 TYPE_CHECKER["unit"] = check_unit 672 TYPE_CHECKER["bool"] = check_bool 673 TYPE_CHECKER["list"] = check_list 674 TYPE_CHECKER["maybefloat"] = check_maybefloat
675 676 677 # optparse.py sets make_option, so we do it for our own option class, too 678 cli_option = CliOption 679 680 681 _YORNO = "yes|no" 682 683 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count", 684 help="Increase debugging level") 685 686 NOHDR_OPT = cli_option("--no-headers", default=False, 687 action="store_true", dest="no_headers", 688 help="Don't display column headers") 689 690 SEP_OPT = cli_option("--separator", default=None, 691 action="store", dest="separator", 692 help=("Separator between output fields" 693 " (defaults to one space)")) 694 695 USEUNITS_OPT = cli_option("--units", default=None, 696 dest="units", choices=("h", "m", "g", "t"), 697 help="Specify units for output (one of h/m/g/t)") 698 699 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store", 700 type="string", metavar="FIELDS", 701 help="Comma separated list of output fields") 702 703 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true", 704 default=False, help="Force the operation") 705 706 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true", 707 default=False, help="Do not require confirmation") 708 709 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline", 710 action="store_true", default=False, 711 help=("Ignore offline nodes and do as much" 712 " as possible")) 713 714 TAG_ADD_OPT = cli_option("--tags", dest="tags", 715 default=None, help="Comma-separated list of instance" 716 " tags") 717 718 TAG_SRC_OPT = cli_option("--from", dest="tags_source", 719 default=None, help="File with tag names") 720 721 SUBMIT_OPT = cli_option("--submit", dest="submit_only", 722 default=False, action="store_true", 723 help=("Submit the job and return the job ID, but" 724 " don't wait for the job to finish")) 725 726 SYNC_OPT = cli_option("--sync", dest="do_locking", 727 default=False, action="store_true", 728 help=("Grab locks while doing the queries" 729 " in order to ensure more consistent results")) 730 731 DRY_RUN_OPT = cli_option("--dry-run", default=False, 732 action="store_true", 733 help=("Do not execute the operation, just run the" 734 " check steps and verify it it could be" 735 " executed")) 736 737 VERBOSE_OPT = cli_option("-v", "--verbose", default=False, 738 action="store_true", 739 help="Increase the verbosity of the operation") 740 741 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False, 742 action="store_true", dest="simulate_errors", 743 help="Debugging option that makes the operation" 744 " treat most runtime checks as failed") 745 746 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync", 747 default=True, action="store_false", 748 help="Don't wait for sync (DANGEROUS!)") 749 750 ONLINE_INST_OPT = cli_option("--online", dest="online_inst", 751 action="store_true", default=False, 752 help="Enable offline instance") 753 754 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst", 755 action="store_true", default=False, 756 help="Disable down instance") 757 758 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template", 759 help=("Custom disk setup (%s)" % 760 utils.CommaJoin(constants.DISK_TEMPLATES)), 761 default=None, metavar="TEMPL", 762 choices=list(constants.DISK_TEMPLATES)) 763 764 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true", 765 help="Do not create any network cards for" 766 " the instance") 767 768 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", 769 help="Relative path under default cluster-wide" 770 " file storage dir to store file-based disks", 771 default=None, metavar="<DIR>") 772 773 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver", 774 help="Driver to use for image files", 775 default="loop", metavar="<DRIVER>", 776 choices=list(constants.FILE_DRIVER)) 777 778 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>", 779 help="Select nodes for the instance automatically" 780 " using the <NAME> iallocator plugin", 781 default=None, type="string", 782 completion_suggest=OPT_COMPL_ONE_IALLOCATOR) 783 784 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator", 785 metavar="<NAME>", 786 help="Set the default instance allocator plugin", 787 default=None, type="string", 788 completion_suggest=OPT_COMPL_ONE_IALLOCATOR) 789 790 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run", 791 metavar="<os>", 792 completion_suggest=OPT_COMPL_ONE_OS) 793 794 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams", 795 type="keyval", default={}, 796 help="OS parameters") 797 798 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant", 799 action="store_true", default=False, 800 help="Force an unknown variant") 801 802 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install", 803 action="store_true", default=False, 804 help="Do not install the OS (will" 805 " enable no-start)") 806 807 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes", 808 dest="allow_runtime_chgs", 809 default=True, action="store_false", 810 help="Don't allow runtime changes") 811 812 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams", 813 type="keyval", default={}, 814 help="Backend parameters") 815 816 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval", 817 default={}, dest="hvparams", 818 help="Hypervisor parameters") 819 820 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams", 821 help="Disk template parameters, in the format" 822 " template:option=value,option=value,...", 823 type="identkeyval", action="append", default=[]) 824 825 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size", 826 type="keyval", default={}, 827 help="Memory size specs: list of key=value," 828 " where key is one of min, max, std" 829 " (in MB or using a unit)") 830 831 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count", 832 type="keyval", default={}, 833 help="CPU count specs: list of key=value," 834 " where key is one of min, max, std") 835 836 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count", 837 dest="ispecs_disk_count", 838 type="keyval", default={}, 839 help="Disk count specs: list of key=value," 840 " where key is one of min, max, std") 841 842 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size", 843 type="keyval", default={}, 844 help="Disk size specs: list of key=value," 845 " where key is one of min, max, std" 846 " (in MB or using a unit)") 847 848 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count", 849 type="keyval", default={}, 850 help="NIC count specs: list of key=value," 851 " where key is one of min, max, std") 852 853 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates", 854 dest="ipolicy_disk_templates", 855 type="list", default=None, 856 help="Comma-separated list of" 857 " enabled disk templates") 858 859 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio", 860 dest="ipolicy_vcpu_ratio", 861 type="maybefloat", default=None, 862 help="The maximum allowed vcpu-to-cpu ratio") 863 864 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio", 865 dest="ipolicy_spindle_ratio", 866 type="maybefloat", default=None, 867 help=("The maximum allowed instances to" 868 " spindle ratio")) 869 870 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor", 871 help="Hypervisor and hypervisor options, in the" 872 " format hypervisor:option=value,option=value,...", 873 default=None, type="identkeyval") 874 875 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams", 876 help="Hypervisor and hypervisor options, in the" 877 " format hypervisor:option=value,option=value,...", 878 default=[], action="append", type="identkeyval") 879 880 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True, 881 action="store_false", 882 help="Don't check that the instance's IP" 883 " is alive") 884 885 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check", 886 default=True, action="store_false", 887 help="Don't check that the instance's name" 888 " is resolvable") 889 890 NET_OPT = cli_option("--net", 891 help="NIC parameters", default=[], 892 dest="nics", action="append", type="identkeyval") 893 894 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[], 895 dest="disks", action="append", type="identkeyval") 896 897 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None, 898 help="Comma-separated list of disks" 899 " indices to act on (e.g. 0,2) (optional," 900 " defaults to all disks)") 901 902 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size", 903 help="Enforces a single-disk configuration using the" 904 " given disk size, in MiB unless a suffix is used", 905 default=None, type="unit", metavar="<size>") 906 907 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency", 908 dest="ignore_consistency", 909 action="store_true", default=False, 910 help="Ignore the consistency of the disks on" 911 " the secondary") 912 913 ALLOW_FAILOVER_OPT = cli_option("--allow-failover", 914 dest="allow_failover", 915 action="store_true", default=False, 916 help="If migration is not possible fallback to" 917 " failover") 918 919 NONLIVE_OPT = cli_option("--non-live", dest="live", 920 default=True, action="store_false", 921 help="Do a non-live migration (this usually means" 922 " freeze the instance, save the state, transfer and" 923 " only then resume running on the secondary node)") 924 925 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode", 926 default=None, 927 choices=list(constants.HT_MIGRATION_MODES), 928 help="Override default migration mode (choose" 929 " either live or non-live") 930 931 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node", 932 help="Target node and optional secondary node", 933 metavar="<pnode>[:<snode>]", 934 completion_suggest=OPT_COMPL_INST_ADD_NODES) 935 936 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[], 937 action="append", metavar="<node>", 938 help="Use only this node (can be used multiple" 939 " times, if not given defaults to all nodes)", 940 completion_suggest=OPT_COMPL_ONE_NODE) 941 942 NODEGROUP_OPT_NAME = "--node-group" 943 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME, 944 dest="nodegroup", 945 help="Node group (name or uuid)", 946 metavar="<nodegroup>", 947 default=None, type="string", 948 completion_suggest=OPT_COMPL_ONE_NODEGROUP) 949 950 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node", 951 metavar="<node>", 952 completion_suggest=OPT_COMPL_ONE_NODE) 953 954 NOSTART_OPT = cli_option("--no-start", dest="start", default=True, 955 action="store_false", 956 help="Don't start the instance after creation") 957 958 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command", 959 action="store_true", default=False, 960 help="Show command instead of executing it") 961 962 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup", 963 default=False, action="store_true", 964 help="Instead of performing the migration, try to" 965 " recover from a failed cleanup. This is safe" 966 " to run even if the instance is healthy, but it" 967 " will create extra replication traffic and " 968 " disrupt briefly the replication (like during the" 969 " migration") 970 971 STATIC_OPT = cli_option("-s", "--static", dest="static", 972 action="store_true", default=False, 973 help="Only show configuration data, not runtime data") 974 975 ALL_OPT = cli_option("--all", dest="show_all", 976 default=False, action="store_true", 977 help="Show info on all instances on the cluster." 978 " This can take a long time to run, use wisely") 979 980 SELECT_OS_OPT = cli_option("--select-os", dest="select_os", 981 action="store_true", default=False, 982 help="Interactive OS reinstall, lists available" 983 " OS templates for selection") 984 985 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures", 986 action="store_true", default=False, 987 help="Remove the instance from the cluster" 988 " configuration even if there are failures" 989 " during the removal process") 990 991 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures", 992 dest="ignore_remove_failures", 993 action="store_true", default=False, 994 help="Remove the instance from the" 995 " cluster configuration even if there" 996 " are failures during the removal" 997 " process") 998 999 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance", 1000 action="store_true", default=False, 1001 help="Remove the instance from the cluster") 1002 1003 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node", 1004 help="Specifies the new node for the instance", 1005 metavar="NODE", default=None, 1006 completion_suggest=OPT_COMPL_ONE_NODE) 1007 1008 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node", 1009 help="Specifies the new secondary node", 1010 metavar="NODE", default=None, 1011 completion_suggest=OPT_COMPL_ONE_NODE) 1012 1013 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary", 1014 default=False, action="store_true", 1015 help="Replace the disk(s) on the primary" 1016 " node (applies only to internally mirrored" 1017 " disk templates, e.g. %s)" % 1018 utils.CommaJoin(constants.DTS_INT_MIRROR)) 1019 1020 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary", 1021 default=False, action="store_true", 1022 help="Replace the disk(s) on the secondary" 1023 " node (applies only to internally mirrored" 1024 " disk templates, e.g. %s)" % 1025 utils.CommaJoin(constants.DTS_INT_MIRROR)) 1026 1027 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote", 1028 default=False, action="store_true", 1029 help="Lock all nodes and auto-promote as needed" 1030 " to MC status") 1031 1032 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto", 1033 default=False, action="store_true", 1034 help="Automatically replace faulty disks" 1035 " (applies only to internally mirrored" 1036 " disk templates, e.g. %s)" % 1037 utils.CommaJoin(constants.DTS_INT_MIRROR)) 1038 1039 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size", 1040 default=False, action="store_true", 1041 help="Ignore current recorded size" 1042 " (useful for forcing activation when" 1043 " the recorded size is wrong)") 1044 1045 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node", 1046 metavar="<node>", 1047 completion_suggest=OPT_COMPL_ONE_NODE) 1048 1049 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory", 1050 metavar="<dir>") 1051 1052 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip", 1053 help="Specify the secondary ip for the node", 1054 metavar="ADDRESS", default=None) 1055 1056 READD_OPT = cli_option("--readd", dest="readd", 1057 default=False, action="store_true", 1058 help="Readd old node after replacing it") 1059 1060 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check", 1061 default=True, action="store_false", 1062 help="Disable SSH key fingerprint checking") 1063 1064 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join", 1065 default=False, action="store_true", 1066 help="Force the joining of a node") 1067 1068 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate", 1069 type="bool", default=None, metavar=_YORNO, 1070 help="Set the master_candidate flag on the node") 1071 1072 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO, 1073 type="bool", default=None, 1074 help=("Set the offline flag on the node" 1075 " (cluster does not communicate with offline" 1076 " nodes)")) 1077 1078 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO, 1079 type="bool", default=None, 1080 help=("Set the drained flag on the node" 1081 " (excluded from allocation operations)")) 1082 1083 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable", 1084 type="bool", default=None, metavar=_YORNO, 1085 help="Set the master_capable flag on the node") 1086 1087 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable", 1088 type="bool", default=None, metavar=_YORNO, 1089 help="Set the vm_capable flag on the node") 1090 1091 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable", 1092 type="bool", default=None, metavar=_YORNO, 1093 help="Set the allocatable flag on a volume") 1094 1095 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage", 1096 help="Disable support for lvm based instances" 1097 " (cluster-wide)", 1098 action="store_false", default=True) 1099 1100 ENABLED_HV_OPT = cli_option("--enabled-hypervisors", 1101 dest="enabled_hypervisors", 1102 help="Comma-separated list of hypervisors", 1103 type="string", default=None) 1104 1105 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams", 1106 type="keyval", default={}, 1107 help="NIC parameters") 1108 1109 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None, 1110 dest="candidate_pool_size", type="int", 1111 help="Set the candidate pool size") 1112 1113 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name", 1114 help=("Enables LVM and specifies the volume group" 1115 " name (cluster-wide) for disk allocation" 1116 " [%s]" % constants.DEFAULT_VG), 1117 metavar="VG", default=None) 1118 1119 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it", 1120 help="Destroy cluster", action="store_true") 1121 1122 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting", 1123 help="Skip node agreement check (dangerous)", 1124 action="store_true", default=False) 1125 1126 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix", 1127 help="Specify the mac prefix for the instance IP" 1128 " addresses, in the format XX:XX:XX", 1129 metavar="PREFIX", 1130 default=None) 1131 1132 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev", 1133 help="Specify the node interface (cluster-wide)" 1134 " on which the master IP address will be added" 1135 " (cluster init default: %s)" % 1136 constants.DEFAULT_BRIDGE, 1137 metavar="NETDEV", 1138 default=None) 1139 1140 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask", 1141 help="Specify the netmask of the master IP", 1142 metavar="NETMASK", 1143 default=None) 1144 1145 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script", 1146 dest="use_external_mip_script", 1147 help="Specify whether to run a user-provided" 1148 " script for the master IP address turnup and" 1149 " turndown operations", 1150 type="bool", metavar=_YORNO, default=None) 1151 1152 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", 1153 help="Specify the default directory (cluster-" 1154 "wide) for storing the file-based disks [%s]" % 1155 constants.DEFAULT_FILE_STORAGE_DIR, 1156 metavar="DIR", 1157 default=constants.DEFAULT_FILE_STORAGE_DIR) 1158 1159 GLOBAL_SHARED_FILEDIR_OPT = cli_option("--shared-file-storage-dir", 1160 dest="shared_file_storage_dir", 1161 help="Specify the default directory (cluster-" 1162 "wide) for storing the shared file-based" 1163 " disks [%s]" % 1164 constants.DEFAULT_SHARED_FILE_STORAGE_DIR, 1165 metavar="SHAREDDIR", 1166 default=constants.DEFAULT_SHARED_FILE_STORAGE_DIR) 1167 1168 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts", 1169 help="Don't modify /etc/hosts", 1170 action="store_false", default=True) 1171 1172 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup", 1173 help="Don't initialize SSH keys", 1174 action="store_false", default=True) 1175 1176 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes", 1177 help="Enable parseable error messages", 1178 action="store_true", default=False) 1179 1180 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem", 1181 help="Skip N+1 memory redundancy tests", 1182 action="store_true", default=False) 1183 1184 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type", 1185 help="Type of reboot: soft/hard/full", 1186 default=constants.INSTANCE_REBOOT_HARD, 1187 metavar="<REBOOT>", 1188 choices=list(constants.REBOOT_TYPES)) 1189 1190 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries", 1191 dest="ignore_secondaries", 1192 default=False, action="store_true", 1193 help="Ignore errors from secondaries") 1194 1195 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown", 1196 action="store_false", default=True, 1197 help="Don't shutdown the instance (unsafe)") 1198 1199 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int", 1200 default=constants.DEFAULT_SHUTDOWN_TIMEOUT, 1201 help="Maximum time to wait") 1202 1203 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout", 1204 dest="shutdown_timeout", type="int", 1205 default=constants.DEFAULT_SHUTDOWN_TIMEOUT, 1206 help="Maximum time to wait for instance shutdown") 1207 1208 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int", 1209 default=None, 1210 help=("Number of seconds between repetions of the" 1211 " command")) 1212 1213 EARLY_RELEASE_OPT = cli_option("--early-release", 1214 dest="early_release", default=False, 1215 action="store_true", 1216 help="Release the locks on the secondary" 1217 " node(s) early") 1218 1219 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate", 1220 dest="new_cluster_cert", 1221 default=False, action="store_true", 1222 help="Generate a new cluster certificate") 1223 1224 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert", 1225 default=None, 1226 help="File containing new RAPI certificate") 1227 1228 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert", 1229 default=None, action="store_true", 1230 help=("Generate a new self-signed RAPI" 1231 " certificate")) 1232 1233 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert", 1234 default=None, 1235 help="File containing new SPICE certificate") 1236 1237 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert", 1238 default=None, 1239 help="File containing the certificate of the CA" 1240 " which signed the SPICE certificate") 1241 1242 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate", 1243 dest="new_spice_cert", default=None, 1244 action="store_true", 1245 help=("Generate a new self-signed SPICE" 1246 " certificate")) 1247 1248 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key", 1249 dest="new_confd_hmac_key", 1250 default=False, action="store_true", 1251 help=("Create a new HMAC key for %s" % 1252 constants.CONFD)) 1253 1254 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret", 1255 dest="cluster_domain_secret", 1256 default=None, 1257 help=("Load new new cluster domain" 1258 " secret from file")) 1259 1260 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret", 1261 dest="new_cluster_domain_secret", 1262 default=False, action="store_true", 1263 help=("Create a new cluster domain" 1264 " secret")) 1265 1266 USE_REPL_NET_OPT = cli_option("--use-replication-network", 1267 dest="use_replication_network", 1268 help="Whether to use the replication network" 1269 " for talking to the nodes", 1270 action="store_true", default=False) 1271 1272 MAINTAIN_NODE_HEALTH_OPT = \ 1273 cli_option("--maintain-node-health", dest="maintain_node_health", 1274 metavar=_YORNO, default=None, type="bool", 1275 help="Configure the cluster to automatically maintain node" 1276 " health, by shutting down unknown instances, shutting down" 1277 " unknown DRBD devices, etc.") 1278 1279 IDENTIFY_DEFAULTS_OPT = \ 1280 cli_option("--identify-defaults", dest="identify_defaults", 1281 default=False, action="store_true", 1282 help="Identify which saved instance parameters are equal to" 1283 " the current cluster defaults and set them as such, instead" 1284 " of marking them as overridden") 1285 1286 UIDPOOL_OPT = cli_option("--uid-pool", default=None, 1287 action="store", dest="uid_pool", 1288 help=("A list of user-ids or user-id" 1289 " ranges separated by commas")) 1290 1291 ADD_UIDS_OPT = cli_option("--add-uids", default=None, 1292 action="store", dest="add_uids", 1293 help=("A list of user-ids or user-id" 1294 " ranges separated by commas, to be" 1295 " added to the user-id pool")) 1296 1297 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None, 1298 action="store", dest="remove_uids", 1299 help=("A list of user-ids or user-id" 1300 " ranges separated by commas, to be" 1301 " removed from the user-id pool")) 1302 1303 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None, 1304 action="store", dest="reserved_lvs", 1305 help=("A comma-separated list of reserved" 1306 " logical volumes names, that will be" 1307 " ignored by cluster verify")) 1308 1309 ROMAN_OPT = cli_option("--roman", 1310 dest="roman_integers", default=False, 1311 action="store_true", 1312 help="Use roman numbers for positive integers") 1313 1314 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper", 1315 action="store", default=None, 1316 help="Specifies usermode helper for DRBD") 1317 1318 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage", 1319 action="store_false", default=True, 1320 help="Disable support for DRBD") 1321 1322 PRIMARY_IP_VERSION_OPT = \ 1323 cli_option("--primary-ip-version", default=constants.IP4_VERSION, 1324 action="store", dest="primary_ip_version", 1325 metavar="%d|%d" % (constants.IP4_VERSION, 1326 constants.IP6_VERSION), 1327 help="Cluster-wide IP version for primary IP") 1328 1329 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority", 1330 metavar="|".join(name for name, _ in _PRIORITY_NAMES), 1331 choices=_PRIONAME_TO_VALUE.keys(), 1332 help="Priority for opcode processing") 1333 1334 HID_OS_OPT = cli_option("--hidden", dest="hidden", 1335 type="bool", default=None, metavar=_YORNO, 1336 help="Sets the hidden flag on the OS") 1337 1338 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted", 1339 type="bool", default=None, metavar=_YORNO, 1340 help="Sets the blacklisted flag on the OS") 1341 1342 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None, 1343 type="bool", metavar=_YORNO, 1344 dest="prealloc_wipe_disks", 1345 help=("Wipe disks prior to instance" 1346 " creation")) 1347 1348 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams", 1349 type="keyval", default=None, 1350 help="Node parameters") 1351 1352 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy", 1353 action="store", metavar="POLICY", default=None, 1354 help="Allocation policy for the node group") 1355 1356 NODE_POWERED_OPT = cli_option("--node-powered", default=None, 1357 type="bool", metavar=_YORNO, 1358 dest="node_powered", 1359 help="Specify if the SoR for node is powered") 1360 1361 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int", 1362 default=constants.OOB_TIMEOUT, 1363 help="Maximum time to wait for out-of-band helper") 1364 1365 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float", 1366 default=constants.OOB_POWER_DELAY, 1367 help="Time in seconds to wait between power-ons") 1368 1369 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter", 1370 action="store_true", default=False, 1371 help=("Whether command argument should be treated" 1372 " as filter")) 1373 1374 NO_REMEMBER_OPT = cli_option("--no-remember", 1375 dest="no_remember", 1376 action="store_true", default=False, 1377 help="Perform but do not record the change" 1378 " in the configuration") 1379 1380 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only", 1381 default=False, action="store_true", 1382 help="Evacuate primary instances only") 1383 1384 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only", 1385 default=False, action="store_true", 1386 help="Evacuate secondary instances only" 1387 " (applies only to internally mirrored" 1388 " disk templates, e.g. %s)" % 1389 utils.CommaJoin(constants.DTS_INT_MIRROR)) 1390 1391 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused", 1392 action="store_true", default=False, 1393 help="Pause instance at startup") 1394 1395 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>", 1396 help="Destination node group (name or uuid)", 1397 default=None, action="append", 1398 completion_suggest=OPT_COMPL_ONE_NODEGROUP) 1399 1400 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[], 1401 action="append", dest="ignore_errors", 1402 choices=list(constants.CV_ALL_ECODES_STRINGS), 1403 help="Error code to be ignored") 1404 1405 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state", 1406 action="append", 1407 help=("Specify disk state information in the" 1408 " format" 1409 " storage_type/identifier:option=value,...;" 1410 " note this is unused for now"), 1411 type="identkeyval") 1412 1413 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state", 1414 action="append", 1415 help=("Specify hypervisor state information in the" 1416 " format hypervisor:option=value,...;" 1417 " note this is unused for now"), 1418 type="identkeyval") 1419 1420 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy", 1421 action="store_true", default=False, 1422 help="Ignore instance policy violations") 1423 1424 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem", 1425 help="Sets the instance's runtime memory," 1426 " ballooning it up or down to the new value", 1427 default=None, type="unit", metavar="<size>") 1428 1429 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute", 1430 action="store_true", default=False, 1431 help="Marks the grow as absolute instead of the" 1432 " (default) relative mode") 1433 1434 #: Options provided by all commands 1435 COMMON_OPTS = [DEBUG_OPT] 1436 1437 # common options for creating instances. add and import then add their own 1438 # specific ones. 1439 COMMON_CREATE_OPTS = [ 1440 BACKEND_OPT, 1441 DISK_OPT, 1442 DISK_TEMPLATE_OPT, 1443 FILESTORE_DIR_OPT, 1444 FILESTORE_DRIVER_OPT, 1445 HYPERVISOR_OPT, 1446 IALLOCATOR_OPT, 1447 NET_OPT, 1448 NODE_PLACEMENT_OPT, 1449 NOIPCHECK_OPT, 1450 NONAMECHECK_OPT, 1451 NONICS_OPT, 1452 NWSYNC_OPT, 1453 OSPARAMS_OPT, 1454 OS_SIZE_OPT, 1455 SUBMIT_OPT, 1456 TAG_ADD_OPT, 1457 DRY_RUN_OPT, 1458 PRIORITY_OPT, 1459 ] 1460 1461 # common instance policy options 1462 INSTANCE_POLICY_OPTS = [ 1463 SPECS_CPU_COUNT_OPT, 1464 SPECS_DISK_COUNT_OPT, 1465 SPECS_DISK_SIZE_OPT, 1466 SPECS_MEM_SIZE_OPT, 1467 SPECS_NIC_COUNT_OPT, 1468 IPOLICY_DISK_TEMPLATES, 1469 IPOLICY_VCPU_RATIO, 1470 IPOLICY_SPINDLE_RATIO, 1471 ]
1472 1473 1474 -def _ParseArgs(argv, commands, aliases, env_override):
1475 """Parser for the command line arguments. 1476 1477 This function parses the arguments and returns the function which 1478 must be executed together with its (modified) arguments. 1479 1480 @param argv: the command line 1481 @param commands: dictionary with special contents, see the design 1482 doc for cmdline handling 1483 @param aliases: dictionary with command aliases {'alias': 'target, ...} 1484 @param env_override: list of env variables allowed for default args 1485 1486 """ 1487 assert not (env_override - set(commands)) 1488 1489 if len(argv) == 0: 1490 binary = "<command>" 1491 else: 1492 binary = argv[0].split("/")[-1] 1493 1494 if len(argv) > 1 and argv[1] == "--version": 1495 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION, 1496 constants.RELEASE_VERSION) 1497 # Quit right away. That way we don't have to care about this special 1498 # argument. optparse.py does it the same. 1499 sys.exit(0) 1500 1501 if len(argv) < 2 or not (argv[1] in commands or 1502 argv[1] in aliases): 1503 # let's do a nice thing 1504 sortedcmds = commands.keys() 1505 sortedcmds.sort() 1506 1507 ToStdout("Usage: %s {command} [options...] [argument...]", binary) 1508 ToStdout("%s <command> --help to see details, or man %s", binary, binary) 1509 ToStdout("") 1510 1511 # compute the max line length for cmd + usage 1512 mlen = max([len(" %s" % cmd) for cmd in commands]) 1513 mlen = min(60, mlen) # should not get here... 1514 1515 # and format a nice command list 1516 ToStdout("Commands:") 1517 for cmd in sortedcmds: 1518 cmdstr = " %s" % (cmd,) 1519 help_text = commands[cmd][4] 1520 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen) 1521 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0)) 1522 for line in help_lines: 1523 ToStdout("%-*s %s", mlen, "", line) 1524 1525 ToStdout("") 1526 1527 return None, None, None 1528 1529 # get command, unalias it, and look it up in commands 1530 cmd = argv.pop(1) 1531 if cmd in aliases: 1532 if cmd in commands: 1533 raise errors.ProgrammerError("Alias '%s' overrides an existing" 1534 " command" % cmd) 1535 1536 if aliases[cmd] not in commands: 1537 raise errors.ProgrammerError("Alias '%s' maps to non-existing" 1538 " command '%s'" % (cmd, aliases[cmd])) 1539 1540 cmd = aliases[cmd] 1541 1542 if cmd in env_override: 1543 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper() 1544 env_args = os.environ.get(args_env_name) 1545 if env_args: 1546 argv = utils.InsertAtPos(argv, 1, shlex.split(env_args)) 1547 1548 func, args_def, parser_opts, usage, description = commands[cmd] 1549 parser = OptionParser(option_list=parser_opts + COMMON_OPTS, 1550 description=description, 1551 formatter=TitledHelpFormatter(), 1552 usage="%%prog %s %s" % (cmd, usage)) 1553 parser.disable_interspersed_args() 1554 options, args = parser.parse_args(args=argv[1:]) 1555 1556 if not _CheckArguments(cmd, args_def, args): 1557 return None, None, None 1558 1559 return func, options, args
1560
1561 1562 -def _CheckArguments(cmd, args_def, args):
1563 """Verifies the arguments using the argument definition. 1564 1565 Algorithm: 1566 1567 1. Abort with error if values specified by user but none expected. 1568 1569 1. For each argument in definition 1570 1571 1. Keep running count of minimum number of values (min_count) 1572 1. Keep running count of maximum number of values (max_count) 1573 1. If it has an unlimited number of values 1574 1575 1. Abort with error if it's not the last argument in the definition 1576 1577 1. If last argument has limited number of values 1578 1579 1. Abort with error if number of values doesn't match or is too large 1580 1581 1. Abort with error if user didn't pass enough values (min_count) 1582 1583 """ 1584 if args and not args_def: 1585 ToStderr("Error: Command %s expects no arguments", cmd) 1586 return False 1587 1588 min_count = None 1589 max_count = None 1590 check_max = None 1591 1592 last_idx = len(args_def) - 1 1593 1594 for idx, arg in enumerate(args_def): 1595 if min_count is None: 1596 min_count = arg.min 1597 elif arg.min is not None: 1598 min_count += arg.min 1599 1600 if max_count is None: 1601 max_count = arg.max 1602 elif arg.max is not None: 1603 max_count += arg.max 1604 1605 if idx == last_idx: 1606 check_max = (arg.max is not None) 1607 1608 elif arg.max is None: 1609 raise errors.ProgrammerError("Only the last argument can have max=None") 1610 1611 if check_max: 1612 # Command with exact number of arguments 1613 if (min_count is not None and max_count is not None and 1614 min_count == max_count and len(args) != min_count): 1615 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count) 1616 return False 1617 1618 # Command with limited number of arguments 1619 if max_count is not None and len(args) > max_count: 1620 ToStderr("Error: Command %s expects only %d argument(s)", 1621 cmd, max_count) 1622 return False 1623 1624 # Command with some required arguments 1625 if min_count is not None and len(args) < min_count: 1626 ToStderr("Error: Command %s expects at least %d argument(s)", 1627 cmd, min_count) 1628 return False 1629 1630 return True
1631
1632 1633 -def SplitNodeOption(value):
1634 """Splits the value of a --node option. 1635 1636 """ 1637 if value and ":" in value: 1638 return value.split(":", 1) 1639 else: 1640 return (value, None)
1641
1642 1643 -def CalculateOSNames(os_name, os_variants):
1644 """Calculates all the names an OS can be called, according to its variants. 1645 1646 @type os_name: string 1647 @param os_name: base name of the os 1648 @type os_variants: list or None 1649 @param os_variants: list of supported variants 1650 @rtype: list 1651 @return: list of valid names 1652 1653 """ 1654 if os_variants: 1655 return ["%s+%s" % (os_name, v) for v in os_variants] 1656 else: 1657 return [os_name]
1658
1659 1660 -def ParseFields(selected, default):
1661 """Parses the values of "--field"-like options. 1662 1663 @type selected: string or None 1664 @param selected: User-selected options 1665 @type default: list 1666 @param default: Default fields 1667 1668 """ 1669 if selected is None: 1670 return default 1671 1672 if selected.startswith("+"): 1673 return default + selected[1:].split(",") 1674 1675 return selected.split(",")
1676 1677 1678 UsesRPC = rpc.RunWithRPC
1679 1680 1681 -def AskUser(text, choices=None):
1682 """Ask the user a question. 1683 1684 @param text: the question to ask 1685 1686 @param choices: list with elements tuples (input_char, return_value, 1687 description); if not given, it will default to: [('y', True, 1688 'Perform the operation'), ('n', False, 'Do no do the operation')]; 1689 note that the '?' char is reserved for help 1690 1691 @return: one of the return values from the choices list; if input is 1692 not possible (i.e. not running with a tty, we return the last 1693 entry from the list 1694 1695 """ 1696 if choices is None: 1697 choices = [("y", True, "Perform the operation"), 1698 ("n", False, "Do not perform the operation")] 1699 if not choices or not isinstance(choices, list): 1700 raise errors.ProgrammerError("Invalid choices argument to AskUser") 1701 for entry in choices: 1702 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?": 1703 raise errors.ProgrammerError("Invalid choices element to AskUser") 1704 1705 answer = choices[-1][1] 1706 new_text = [] 1707 for line in text.splitlines(): 1708 new_text.append(textwrap.fill(line, 70, replace_whitespace=False)) 1709 text = "\n".join(new_text) 1710 try: 1711 f = file("/dev/tty", "a+") 1712 except IOError: 1713 return answer 1714 try: 1715 chars = [entry[0] for entry in choices] 1716 chars[-1] = "[%s]" % chars[-1] 1717 chars.append("?") 1718 maps = dict([(entry[0], entry[1]) for entry in choices]) 1719 while True: 1720 f.write(text) 1721 f.write("\n") 1722 f.write("/".join(chars)) 1723 f.write(": ") 1724 line = f.readline(2).strip().lower() 1725 if line in maps: 1726 answer = maps[line] 1727 break 1728 elif line == "?": 1729 for entry in choices: 1730 f.write(" %s - %s\n" % (entry[0], entry[2])) 1731 f.write("\n") 1732 continue 1733 finally: 1734 f.close() 1735 return answer
1736
1737 1738 -class JobSubmittedException(Exception):
1739 """Job was submitted, client should exit. 1740 1741 This exception has one argument, the ID of the job that was 1742 submitted. The handler should print this ID. 1743 1744 This is not an error, just a structured way to exit from clients. 1745 1746 """
1747
1748 1749 -def SendJob(ops, cl=None):
1750 """Function to submit an opcode without waiting for the results. 1751 1752 @type ops: list 1753 @param ops: list of opcodes 1754 @type cl: luxi.Client 1755 @param cl: the luxi client to use for communicating with the master; 1756 if None, a new client will be created 1757 1758 """ 1759 if cl is None: 1760 cl = GetClient() 1761 1762 job_id = cl.SubmitJob(ops) 1763 1764 return job_id
1765
1766 1767 -def GenericPollJob(job_id, cbs, report_cbs):
1768 """Generic job-polling function. 1769 1770 @type job_id: number 1771 @param job_id: Job ID 1772 @type cbs: Instance of L{JobPollCbBase} 1773 @param cbs: Data callbacks 1774 @type report_cbs: Instance of L{JobPollReportCbBase} 1775 @param report_cbs: Reporting callbacks 1776 1777 """ 1778 prev_job_info = None 1779 prev_logmsg_serial = None 1780 1781 status = None 1782 1783 while True: 1784 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info, 1785 prev_logmsg_serial) 1786 if not result: 1787 # job not found, go away! 1788 raise errors.JobLost("Job with id %s lost" % job_id) 1789 1790 if result == constants.JOB_NOTCHANGED: 1791 report_cbs.ReportNotChanged(job_id, status) 1792 1793 # Wait again 1794 continue 1795 1796 # Split result, a tuple of (field values, log entries) 1797 (job_info, log_entries) = result 1798 (status, ) = job_info 1799 1800 if log_entries: 1801 for log_entry in log_entries: 1802 (serial, timestamp, log_type, message) = log_entry 1803 report_cbs.ReportLogMessage(job_id, serial, timestamp, 1804 log_type, message) 1805 prev_logmsg_serial = max(prev_logmsg_serial, serial) 1806 1807 # TODO: Handle canceled and archived jobs 1808 elif status in (constants.JOB_STATUS_SUCCESS, 1809 constants.JOB_STATUS_ERROR, 1810 constants.JOB_STATUS_CANCELING, 1811 constants.JOB_STATUS_CANCELED): 1812 break 1813 1814 prev_job_info = job_info 1815 1816 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"]) 1817 if not jobs: 1818 raise errors.JobLost("Job with id %s lost" % job_id) 1819 1820 status, opstatus, result = jobs[0] 1821 1822 if status == constants.JOB_STATUS_SUCCESS: 1823 return result 1824 1825 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED): 1826 raise errors.OpExecError("Job was canceled") 1827 1828 has_ok = False 1829 for idx, (status, msg) in enumerate(zip(opstatus, result)): 1830 if status == constants.OP_STATUS_SUCCESS: 1831 has_ok = True 1832 elif status == constants.OP_STATUS_ERROR: 1833 errors.MaybeRaise(msg) 1834 1835 if has_ok: 1836 raise errors.OpExecError("partial failure (opcode %d): %s" % 1837 (idx, msg)) 1838 1839 raise errors.OpExecError(str(msg)) 1840 1841 # default failure mode 1842 raise errors.OpExecError(result)
1843
1844 1845 -class JobPollCbBase:
1846 """Base class for L{GenericPollJob} callbacks. 1847 1848 """
1849 - def __init__(self):
1850 """Initializes this class. 1851 1852 """
1853
1854 - def WaitForJobChangeOnce(self, job_id, fields, 1855 prev_job_info, prev_log_serial):
1856 """Waits for changes on a job. 1857 1858 """ 1859 raise NotImplementedError()
1860
1861 - def QueryJobs(self, job_ids, fields):
1862 """Returns the selected fields for the selected job IDs. 1863 1864 @type job_ids: list of numbers 1865 @param job_ids: Job IDs 1866 @type fields: list of strings 1867 @param fields: Fields 1868 1869 """ 1870 raise NotImplementedError()
1871
1872 1873 -class JobPollReportCbBase:
1874 """Base class for L{GenericPollJob} reporting callbacks. 1875 1876 """
1877 - def __init__(self):
1878 """Initializes this class. 1879 1880 """
1881
1882 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1883 """Handles a log message. 1884 1885 """ 1886 raise NotImplementedError()
1887
1888 - def ReportNotChanged(self, job_id, status):
1889 """Called for if a job hasn't changed in a while. 1890 1891 @type job_id: number 1892 @param job_id: Job ID 1893 @type status: string or None 1894 @param status: Job status if available 1895 1896 """ 1897 raise NotImplementedError()
1898
1899 1900 -class _LuxiJobPollCb(JobPollCbBase):
1901 - def __init__(self, cl):
1902 """Initializes this class. 1903 1904 """ 1905 JobPollCbBase.__init__(self) 1906 self.cl = cl
1907
1908 - def WaitForJobChangeOnce(self, job_id, fields, 1909 prev_job_info, prev_log_serial):
1910 """Waits for changes on a job. 1911 1912 """ 1913 return self.cl.WaitForJobChangeOnce(job_id, fields, 1914 prev_job_info, prev_log_serial)
1915
1916 - def QueryJobs(self, job_ids, fields):
1917 """Returns the selected fields for the selected job IDs. 1918 1919 """ 1920 return self.cl.QueryJobs(job_ids, fields)
1921
1922 1923 -class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1924 - def __init__(self, feedback_fn):
1925 """Initializes this class. 1926 1927 """ 1928 JobPollReportCbBase.__init__(self) 1929 1930 self.feedback_fn = feedback_fn 1931 1932 assert callable(feedback_fn)
1933
1934 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1935 """Handles a log message. 1936 1937 """ 1938 self.feedback_fn((timestamp, log_type, log_msg))
1939
1940 - def ReportNotChanged(self, job_id, status):
1941 """Called if a job hasn't changed in a while. 1942 1943 """
1944 # Ignore
1945 1946 1947 -class StdioJobPollReportCb(JobPollReportCbBase):
1948 - def __init__(self):
1949 """Initializes this class. 1950 1951 """ 1952 JobPollReportCbBase.__init__(self) 1953 1954 self.notified_queued = False 1955 self.notified_waitlock = False
1956
1957 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1958 """Handles a log message. 1959 1960 """ 1961 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)), 1962 FormatLogMessage(log_type, log_msg))
1963
1964 - def ReportNotChanged(self, job_id, status):
1965 """Called if a job hasn't changed in a while. 1966 1967 """ 1968 if status is None: 1969 return 1970 1971 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued: 1972 ToStderr("Job %s is waiting in queue", job_id) 1973 self.notified_queued = True 1974 1975 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock: 1976 ToStderr("Job %s is trying to acquire all necessary locks", job_id) 1977 self.notified_waitlock = True
1978
1979 1980 -def FormatLogMessage(log_type, log_msg):
1981 """Formats a job message according to its type. 1982 1983 """ 1984 if log_type != constants.ELOG_MESSAGE: 1985 log_msg = str(log_msg) 1986 1987 return utils.SafeEncode(log_msg)
1988
1989 1990 -def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1991 """Function to poll for the result of a job. 1992 1993 @type job_id: job identified 1994 @param job_id: the job to poll for results 1995 @type cl: luxi.Client 1996 @param cl: the luxi client to use for communicating with the master; 1997 if None, a new client will be created 1998 1999 """ 2000 if cl is None: 2001 cl = GetClient() 2002 2003 if reporter is None: 2004 if feedback_fn: 2005 reporter = FeedbackFnJobPollReportCb(feedback_fn) 2006 else: 2007 reporter = StdioJobPollReportCb() 2008 elif feedback_fn: 2009 raise errors.ProgrammerError("Can't specify reporter and feedback function") 2010 2011 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2012
2013 2014 -def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2015 """Legacy function to submit an opcode. 2016 2017 This is just a simple wrapper over the construction of the processor 2018 instance. It should be extended to better handle feedback and 2019 interaction functions. 2020 2021 """ 2022 if cl is None: 2023 cl = GetClient() 2024 2025 SetGenericOpcodeOpts([op], opts) 2026 2027 job_id = SendJob([op], cl=cl) 2028 2029 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn, 2030 reporter=reporter) 2031 2032 return op_results[0]
2033
2034 2035 -def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2036 """Wrapper around SubmitOpCode or SendJob. 2037 2038 This function will decide, based on the 'opts' parameter, whether to 2039 submit and wait for the result of the opcode (and return it), or 2040 whether to just send the job and print its identifier. It is used in 2041 order to simplify the implementation of the '--submit' option. 2042 2043 It will also process the opcodes if we're sending the via SendJob 2044 (otherwise SubmitOpCode does it). 2045 2046 """ 2047 if opts and opts.submit_only: 2048 job = [op] 2049 SetGenericOpcodeOpts(job, opts) 2050 job_id = SendJob(job, cl=cl) 2051 raise JobSubmittedException(job_id) 2052 else: 2053 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2054
2055 2056 -def SetGenericOpcodeOpts(opcode_list, options):
2057 """Processor for generic options. 2058 2059 This function updates the given opcodes based on generic command 2060 line options (like debug, dry-run, etc.). 2061 2062 @param opcode_list: list of opcodes 2063 @param options: command line options or None 2064 @return: None (in-place modification) 2065 2066 """ 2067 if not options: 2068 return 2069 for op in opcode_list: 2070 op.debug_level = options.debug 2071 if hasattr(options, "dry_run"): 2072 op.dry_run = options.dry_run 2073 if getattr(options, "priority", None) is not None: 2074 op.priority = _PRIONAME_TO_VALUE[options.priority]
2075
2076 2077 -def GetClient():
2078 # TODO: Cache object? 2079 try: 2080 client = luxi.Client() 2081 except luxi.NoMasterError: 2082 ss = ssconf.SimpleStore() 2083 2084 # Try to read ssconf file 2085 try: 2086 ss.GetMasterNode() 2087 except errors.ConfigurationError: 2088 raise errors.OpPrereqError("Cluster not initialized or this machine is" 2089 " not part of a cluster") 2090 2091 master, myself = ssconf.GetMasterAndMyself(ss=ss) 2092 if master != myself: 2093 raise errors.OpPrereqError("This is not the master node, please connect" 2094 " to node '%s' and rerun the command" % 2095 master) 2096 raise 2097 return client
2098
2099 2100 -def FormatError(err):
2101 """Return a formatted error message for a given error. 2102 2103 This function takes an exception instance and returns a tuple 2104 consisting of two values: first, the recommended exit code, and 2105 second, a string describing the error message (not 2106 newline-terminated). 2107 2108 """ 2109 retcode = 1 2110 obuf = StringIO() 2111 msg = str(err) 2112 if isinstance(err, errors.ConfigurationError): 2113 txt = "Corrupt configuration file: %s" % msg 2114 logging.error(txt) 2115 obuf.write(txt + "\n") 2116 obuf.write("Aborting.") 2117 retcode = 2 2118 elif isinstance(err, errors.HooksAbort): 2119 obuf.write("Failure: hooks execution failed:\n") 2120 for node, script, out in err.args[0]: 2121 if out: 2122 obuf.write(" node: %s, script: %s, output: %s\n" % 2123 (node, script, out)) 2124 else: 2125 obuf.write(" node: %s, script: %s (no output)\n" % 2126 (node, script)) 2127 elif isinstance(err, errors.HooksFailure): 2128 obuf.write("Failure: hooks general failure: %s" % msg) 2129 elif isinstance(err, errors.ResolverError): 2130 this_host = netutils.Hostname.GetSysName() 2131 if err.args[0] == this_host: 2132 msg = "Failure: can't resolve my own hostname ('%s')" 2133 else: 2134 msg = "Failure: can't resolve hostname '%s'" 2135 obuf.write(msg % err.args[0]) 2136 elif isinstance(err, errors.OpPrereqError): 2137 if len(err.args) == 2: 2138 obuf.write("Failure: prerequisites not met for this" 2139 " operation:\nerror type: %s, error details:\n%s" % 2140 (err.args[1], err.args[0])) 2141 else: 2142 obuf.write("Failure: prerequisites not met for this" 2143 " operation:\n%s" % msg) 2144 elif isinstance(err, errors.OpExecError): 2145 obuf.write("Failure: command execution error:\n%s" % msg) 2146 elif isinstance(err, errors.TagError): 2147 obuf.write("Failure: invalid tag(s) given:\n%s" % msg) 2148 elif isinstance(err, errors.JobQueueDrainError): 2149 obuf.write("Failure: the job queue is marked for drain and doesn't" 2150 " accept new requests\n") 2151 elif isinstance(err, errors.JobQueueFull): 2152 obuf.write("Failure: the job queue is full and doesn't accept new" 2153 " job submissions until old jobs are archived\n") 2154 elif isinstance(err, errors.TypeEnforcementError): 2155 obuf.write("Parameter Error: %s" % msg) 2156 elif isinstance(err, errors.ParameterError): 2157 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg) 2158 elif isinstance(err, luxi.NoMasterError): 2159 obuf.write("Cannot communicate with the master daemon.\nIs it running" 2160 " and listening for connections?") 2161 elif isinstance(err, luxi.TimeoutError): 2162 obuf.write("Timeout while talking to the master daemon. Jobs might have" 2163 " been submitted and will continue to run even if the call" 2164 " timed out. Useful commands in this situation are \"gnt-job" 2165 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n") 2166 obuf.write(msg) 2167 elif isinstance(err, luxi.PermissionError): 2168 obuf.write("It seems you don't have permissions to connect to the" 2169 " master daemon.\nPlease retry as a different user.") 2170 elif isinstance(err, luxi.ProtocolError): 2171 obuf.write("Unhandled protocol error while talking to the master daemon:\n" 2172 "%s" % msg) 2173 elif isinstance(err, errors.JobLost): 2174 obuf.write("Error checking job status: %s" % msg) 2175 elif isinstance(err, errors.QueryFilterParseError): 2176 obuf.write("Error while parsing query filter: %s\n" % err.args[0]) 2177 obuf.write("\n".join(err.GetDetails())) 2178 elif isinstance(err, errors.GenericError): 2179 obuf.write("Unhandled Ganeti error: %s" % msg) 2180 elif isinstance(err, JobSubmittedException): 2181 obuf.write("JobID: %s\n" % err.args[0]) 2182 retcode = 0 2183 else: 2184 obuf.write("Unhandled exception: %s" % msg) 2185 return retcode, obuf.getvalue().rstrip("\n")
2186
2187 2188 -def GenericMain(commands, override=None, aliases=None, 2189 env_override=frozenset()):
2190 """Generic main function for all the gnt-* commands. 2191 2192 @param commands: a dictionary with a special structure, see the design doc 2193 for command line handling. 2194 @param override: if not None, we expect a dictionary with keys that will 2195 override command line options; this can be used to pass 2196 options from the scripts to generic functions 2197 @param aliases: dictionary with command aliases {'alias': 'target, ...} 2198 @param env_override: list of environment names which are allowed to submit 2199 default args for commands 2200 2201 """ 2202 # save the program name and the entire command line for later logging 2203 if sys.argv: 2204 binary = os.path.basename(sys.argv[0]) 2205 if not binary: 2206 binary = sys.argv[0] 2207 2208 if len(sys.argv) >= 2: 2209 logname = utils.ShellQuoteArgs([binary, sys.argv[1]]) 2210 else: 2211 logname = binary 2212 2213 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:]) 2214 else: 2215 binary = "<unknown program>" 2216 cmdline = "<unknown>" 2217 2218 if aliases is None: 2219 aliases = {} 2220 2221 try: 2222 func, options, args = _ParseArgs(sys.argv, commands, aliases, env_override) 2223 except errors.ParameterError, err: 2224 result, err_msg = FormatError(err) 2225 ToStderr(err_msg) 2226 return 1 2227 2228 if func is None: # parse error 2229 return 1 2230 2231 if override is not None: 2232 for key, val in override.iteritems(): 2233 setattr(options, key, val) 2234 2235 utils.SetupLogging(constants.LOG_COMMANDS, logname, debug=options.debug, 2236 stderr_logging=True) 2237 2238 logging.info("Command line: %s", cmdline) 2239 2240 try: 2241 result = func(options, args) 2242 except (errors.GenericError, luxi.ProtocolError, 2243 JobSubmittedException), err: 2244 result, err_msg = FormatError(err) 2245 logging.exception("Error during command processing") 2246 ToStderr(err_msg) 2247 except KeyboardInterrupt: 2248 result = constants.EXIT_FAILURE 2249 ToStderr("Aborted. Note that if the operation created any jobs, they" 2250 " might have been submitted and" 2251 " will continue to run in the background.") 2252 except IOError, err: 2253 if err.errno == errno.EPIPE: 2254 # our terminal went away, we'll exit 2255 sys.exit(constants.EXIT_FAILURE) 2256 else: 2257 raise 2258 2259 return result
2260
2261 2262 -def ParseNicOption(optvalue):
2263 """Parses the value of the --net option(s). 2264 2265 """ 2266 try: 2267 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue) 2268 except (TypeError, ValueError), err: 2269 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err)) 2270 2271 nics = [{}] * nic_max 2272 for nidx, ndict in optvalue: 2273 nidx = int(nidx) 2274 2275 if not isinstance(ndict, dict): 2276 raise errors.OpPrereqError("Invalid nic/%d value: expected dict," 2277 " got %s" % (nidx, ndict)) 2278 2279 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES) 2280 2281 nics[nidx] = ndict 2282 2283 return nics
2284
2285 2286 -def GenericInstanceCreate(mode, opts, args):
2287 """Add an instance to the cluster via either creation or import. 2288 2289 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT 2290 @param opts: the command line options selected by the user 2291 @type args: list 2292 @param args: should contain only one element, the new instance name 2293 @rtype: int 2294 @return: the desired exit code 2295 2296 """ 2297 instance = args[0] 2298 2299 (pnode, snode) = SplitNodeOption(opts.node) 2300 2301 hypervisor = None 2302 hvparams = {} 2303 if opts.hypervisor: 2304 hypervisor, hvparams = opts.hypervisor 2305 2306 if opts.nics: 2307 nics = ParseNicOption(opts.nics) 2308 elif opts.no_nics: 2309 # no nics 2310 nics = [] 2311 elif mode == constants.INSTANCE_CREATE: 2312 # default of one nic, all auto 2313 nics = [{}] 2314 else: 2315 # mode == import 2316 nics = [] 2317 2318 if opts.disk_template == constants.DT_DISKLESS: 2319 if opts.disks or opts.sd_size is not None: 2320 raise errors.OpPrereqError("Diskless instance but disk" 2321 " information passed") 2322 disks = [] 2323 else: 2324 if (not opts.disks and not opts.sd_size 2325 and mode == constants.INSTANCE_CREATE): 2326 raise errors.OpPrereqError("No disk information specified") 2327 if opts.disks and opts.sd_size is not None: 2328 raise errors.OpPrereqError("Please use either the '--disk' or" 2329 " '-s' option") 2330 if opts.sd_size is not None: 2331 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})] 2332 2333 if opts.disks: 2334 try: 2335 disk_max = max(int(didx[0]) + 1 for didx in opts.disks) 2336 except ValueError, err: 2337 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err)) 2338 disks = [{}] * disk_max 2339 else: 2340 disks = [] 2341 for didx, ddict in opts.disks: 2342 didx = int(didx) 2343 if not isinstance(ddict, dict): 2344 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict) 2345 raise errors.OpPrereqError(msg) 2346 elif constants.IDISK_SIZE in ddict: 2347 if constants.IDISK_ADOPT in ddict: 2348 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed" 2349 " (disk %d)" % didx) 2350 try: 2351 ddict[constants.IDISK_SIZE] = \ 2352 utils.ParseUnit(ddict[constants.IDISK_SIZE]) 2353 except ValueError, err: 2354 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" % 2355 (didx, err)) 2356 elif constants.IDISK_ADOPT in ddict: 2357 if mode == constants.INSTANCE_IMPORT: 2358 raise errors.OpPrereqError("Disk adoption not allowed for instance" 2359 " import") 2360 ddict[constants.IDISK_SIZE] = 0 2361 else: 2362 raise errors.OpPrereqError("Missing size or adoption source for" 2363 " disk %d" % didx) 2364 disks[didx] = ddict 2365 2366 if opts.tags is not None: 2367 tags = opts.tags.split(",") 2368 else: 2369 tags = [] 2370 2371 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT) 2372 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES) 2373 2374 if mode == constants.INSTANCE_CREATE: 2375 start = opts.start 2376 os_type = opts.os 2377 force_variant = opts.force_variant 2378 src_node = None 2379 src_path = None 2380 no_install = opts.no_install 2381 identify_defaults = False 2382 elif mode == constants.INSTANCE_IMPORT: 2383 start = False 2384 os_type = None 2385 force_variant = False 2386 src_node = opts.src_node 2387 src_path = opts.src_dir 2388 no_install = None 2389 identify_defaults = opts.identify_defaults 2390 else: 2391 raise errors.ProgrammerError("Invalid creation mode %s" % mode) 2392 2393 op = opcodes.OpInstanceCreate(instance_name=instance, 2394 disks=disks, 2395 disk_template=opts.disk_template, 2396 nics=nics, 2397 pnode=pnode, snode=snode, 2398 ip_check=opts.ip_check, 2399 name_check=opts.name_check, 2400 wait_for_sync=opts.wait_for_sync, 2401 file_storage_dir=opts.file_storage_dir, 2402 file_driver=opts.file_driver, 2403 iallocator=opts.iallocator, 2404 hypervisor=hypervisor, 2405 hvparams=hvparams, 2406 beparams=opts.beparams, 2407 osparams=opts.osparams, 2408 mode=mode, 2409 start=start, 2410 os_type=os_type, 2411 force_variant=force_variant, 2412 src_node=src_node, 2413 src_path=src_path, 2414 tags=tags, 2415 no_install=no_install, 2416 identify_defaults=identify_defaults, 2417 ignore_ipolicy=opts.ignore_ipolicy) 2418 2419 SubmitOrSend(op, opts) 2420 return 0
2421
2422 2423 -class _RunWhileClusterStoppedHelper:
2424 """Helper class for L{RunWhileClusterStopped} to simplify state management 2425 2426 """
2427 - def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2428 """Initializes this class. 2429 2430 @type feedback_fn: callable 2431 @param feedback_fn: Feedback function 2432 @type cluster_name: string 2433 @param cluster_name: Cluster name 2434 @type master_node: string 2435 @param master_node Master node name 2436 @type online_nodes: list 2437 @param online_nodes: List of names of online nodes 2438 2439 """ 2440 self.feedback_fn = feedback_fn 2441 self.cluster_name = cluster_name 2442 self.master_node = master_node 2443 self.online_nodes = online_nodes 2444 2445 self.ssh = ssh.SshRunner(self.cluster_name) 2446 2447 self.nonmaster_nodes = [name for name in online_nodes 2448 if name != master_node] 2449 2450 assert self.master_node not in self.nonmaster_nodes
2451
2452 - def _RunCmd(self, node_name, cmd):
2453 """Runs a command on the local or a remote machine. 2454 2455 @type node_name: string 2456 @param node_name: Machine name 2457 @type cmd: list 2458 @param cmd: Command 2459 2460 """ 2461 if node_name is None or node_name == self.master_node: 2462 # No need to use SSH 2463 result = utils.RunCmd(cmd) 2464 else: 2465 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd)) 2466 2467 if result.failed: 2468 errmsg = ["Failed to run command %s" % result.cmd] 2469 if node_name: 2470 errmsg.append("on node %s" % node_name) 2471 errmsg.append(": exitcode %s and error %s" % 2472 (result.exit_code, result.output)) 2473 raise errors.OpExecError(" ".join(errmsg))
2474
2475 - def Call(self, fn, *args):
2476 """Call function while all daemons are stopped. 2477 2478 @type fn: callable 2479 @param fn: Function to be called 2480 2481 """ 2482 # Pause watcher by acquiring an exclusive lock on watcher state file 2483 self.feedback_fn("Blocking watcher") 2484 watcher_block = utils.FileLock.Open(constants.WATCHER_LOCK_FILE) 2485 try: 2486 # TODO: Currently, this just blocks. There's no timeout. 2487 # TODO: Should it be a shared lock? 2488 watcher_block.Exclusive(blocking=True) 2489 2490 # Stop master daemons, so that no new jobs can come in and all running 2491 # ones are finished 2492 self.feedback_fn("Stopping master daemons") 2493 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"]) 2494 try: 2495 # Stop daemons on all nodes 2496 for node_name in self.online_nodes: 2497 self.feedback_fn("Stopping daemons on %s" % node_name) 2498 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"]) 2499 2500 # All daemons are shut down now 2501 try: 2502 return fn(self, *args) 2503 except Exception, err: 2504 _, errmsg = FormatError(err) 2505 logging.exception("Caught exception") 2506 self.feedback_fn(errmsg) 2507 raise 2508 finally: 2509 # Start cluster again, master node last 2510 for node_name in self.nonmaster_nodes + [self.master_node]: 2511 self.feedback_fn("Starting daemons on %s" % node_name) 2512 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"]) 2513 finally: 2514 # Resume watcher 2515 watcher_block.Close()
2516
2517 2518 -def RunWhileClusterStopped(feedback_fn, fn, *args):
2519 """Calls a function while all cluster daemons are stopped. 2520 2521 @type feedback_fn: callable 2522 @param feedback_fn: Feedback function 2523 @type fn: callable 2524 @param fn: Function to be called when daemons are stopped 2525 2526 """ 2527 feedback_fn("Gathering cluster information") 2528 2529 # This ensures we're running on the master daemon 2530 cl = GetClient() 2531 2532 (cluster_name, master_node) = \ 2533 cl.QueryConfigValues(["cluster_name", "master_node"]) 2534 2535 online_nodes = GetOnlineNodes([], cl=cl) 2536 2537 # Don't keep a reference to the client. The master daemon will go away. 2538 del cl 2539 2540 assert master_node in online_nodes 2541 2542 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node, 2543 online_nodes).Call(fn, *args)
2544
2545 2546 -def GenerateTable(headers, fields, separator, data, 2547 numfields=None, unitfields=None, 2548 units=None):
2549 """Prints a table with headers and different fields. 2550 2551 @type headers: dict 2552 @param headers: dictionary mapping field names to headers for 2553 the table 2554 @type fields: list 2555 @param fields: the field names corresponding to each row in 2556 the data field 2557 @param separator: the separator to be used; if this is None, 2558 the default 'smart' algorithm is used which computes optimal 2559 field width, otherwise just the separator is used between 2560 each field 2561 @type data: list 2562 @param data: a list of lists, each sublist being one row to be output 2563 @type numfields: list 2564 @param numfields: a list with the fields that hold numeric 2565 values and thus should be right-aligned 2566 @type unitfields: list 2567 @param unitfields: a list with the fields that hold numeric 2568 values that should be formatted with the units field 2569 @type units: string or None 2570 @param units: the units we should use for formatting, or None for 2571 automatic choice (human-readable for non-separator usage, otherwise 2572 megabytes); this is a one-letter string 2573 2574 """ 2575 if units is None: 2576 if separator: 2577 units = "m" 2578 else: 2579 units = "h" 2580 2581 if numfields is None: 2582 numfields = [] 2583 if unitfields is None: 2584 unitfields = [] 2585 2586 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142 2587 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142 2588 2589 format_fields = [] 2590 for field in fields: 2591 if headers and field not in headers: 2592 # TODO: handle better unknown fields (either revert to old 2593 # style of raising exception, or deal more intelligently with 2594 # variable fields) 2595 headers[field] = field 2596 if separator is not None: 2597 format_fields.append("%s") 2598 elif numfields.Matches(field): 2599 format_fields.append("%*s") 2600 else: 2601 format_fields.append("%-*s") 2602 2603 if separator is None: 2604 mlens = [0 for name in fields] 2605 format_str = " ".join(format_fields) 2606 else: 2607 format_str = separator.replace("%", "%%").join(format_fields) 2608 2609 for row in data: 2610 if row is None: 2611 continue 2612 for idx, val in enumerate(row): 2613 if unitfields.Matches(fields[idx]): 2614 try: 2615 val = int(val) 2616 except (TypeError, ValueError): 2617 pass 2618 else: 2619 val = row[idx] = utils.FormatUnit(val, units) 2620 val = row[idx] = str(val) 2621 if separator is None: 2622 mlens[idx] = max(mlens[idx], len(val)) 2623 2624 result = [] 2625 if headers: 2626 args = [] 2627 for idx, name in enumerate(fields): 2628 hdr = headers[name] 2629 if separator is None: 2630 mlens[idx] = max(mlens[idx], len(hdr)) 2631 args.append(mlens[idx]) 2632 args.append(hdr) 2633 result.append(format_str % tuple(args)) 2634 2635 if separator is None: 2636 assert len(mlens) == len(fields) 2637 2638 if fields and not numfields.Matches(fields[-1]): 2639 mlens[-1] = 0 2640 2641 for line in data: 2642 args = [] 2643 if line is None: 2644 line = ["-" for _ in fields] 2645 for idx in range(len(fields)): 2646 if separator is None: 2647 args.append(mlens[idx]) 2648 args.append(line[idx]) 2649 result.append(format_str % tuple(args)) 2650 2651 return result
2652
2653 2654 -def _FormatBool(value):
2655 """Formats a boolean value as a string. 2656 2657 """ 2658 if value: 2659 return "Y" 2660 return "N"
2661 2662 2663 #: Default formatting for query results; (callback, align right) 2664 _DEFAULT_FORMAT_QUERY = { 2665 constants.QFT_TEXT: (str, False), 2666 constants.QFT_BOOL: (_FormatBool, False), 2667 constants.QFT_NUMBER: (str, True), 2668 constants.QFT_TIMESTAMP: (utils.FormatTime, False), 2669 constants.QFT_OTHER: (str, False), 2670 constants.QFT_UNKNOWN: (str, False), 2671 }
2672 2673 2674 -def _GetColumnFormatter(fdef, override, unit):
2675 """Returns formatting function for a field. 2676 2677 @type fdef: L{objects.QueryFieldDefinition} 2678 @type override: dict 2679 @param override: Dictionary for overriding field formatting functions, 2680 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 2681 @type unit: string 2682 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} 2683 @rtype: tuple; (callable, bool) 2684 @return: Returns the function to format a value (takes one parameter) and a 2685 boolean for aligning the value on the right-hand side 2686 2687 """ 2688 fmt = override.get(fdef.name, None) 2689 if fmt is not None: 2690 return fmt 2691 2692 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY 2693 2694 if fdef.kind == constants.QFT_UNIT: 2695 # Can't keep this information in the static dictionary 2696 return (lambda value: utils.FormatUnit(value, unit), True) 2697 2698 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None) 2699 if fmt is not None: 2700 return fmt 2701 2702 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2703
2704 2705 -class _QueryColumnFormatter:
2706 """Callable class for formatting fields of a query. 2707 2708 """
2709 - def __init__(self, fn, status_fn, verbose):
2710 """Initializes this class. 2711 2712 @type fn: callable 2713 @param fn: Formatting function 2714 @type status_fn: callable 2715 @param status_fn: Function to report fields' status 2716 @type verbose: boolean 2717 @param verbose: whether to use verbose field descriptions or not 2718 2719 """ 2720 self._fn = fn 2721 self._status_fn = status_fn 2722 self._verbose = verbose
2723
2724 - def __call__(self, data):
2725 """Returns a field's string representation. 2726 2727 """ 2728 (status, value) = data 2729 2730 # Report status 2731 self._status_fn(status) 2732 2733 if status == constants.RS_NORMAL: 2734 return self._fn(value) 2735 2736 assert value is None, \ 2737 "Found value %r for abnormal status %s" % (value, status) 2738 2739 return FormatResultError(status, self._verbose)
2740
2741 2742 -def FormatResultError(status, verbose):
2743 """Formats result status other than L{constants.RS_NORMAL}. 2744 2745 @param status: The result status 2746 @type verbose: boolean 2747 @param verbose: Whether to return the verbose text 2748 @return: Text of result status 2749 2750 """ 2751 assert status != constants.RS_NORMAL, \ 2752 "FormatResultError called with status equal to constants.RS_NORMAL" 2753 try: 2754 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status] 2755 except KeyError: 2756 raise NotImplementedError("Unknown status %s" % status) 2757 else: 2758 if verbose: 2759 return verbose_text 2760 return normal_text
2761
2762 2763 -def FormatQueryResult(result, unit=None, format_override=None, separator=None, 2764 header=False, verbose=False):
2765 """Formats data in L{objects.QueryResponse}. 2766 2767 @type result: L{objects.QueryResponse} 2768 @param result: result of query operation 2769 @type unit: string 2770 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}, 2771 see L{utils.text.FormatUnit} 2772 @type format_override: dict 2773 @param format_override: Dictionary for overriding field formatting functions, 2774 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 2775 @type separator: string or None 2776 @param separator: String used to separate fields 2777 @type header: bool 2778 @param header: Whether to output header row 2779 @type verbose: boolean 2780 @param verbose: whether to use verbose field descriptions or not 2781 2782 """ 2783 if unit is None: 2784 if separator: 2785 unit = "m" 2786 else: 2787 unit = "h" 2788 2789 if format_override is None: 2790 format_override = {} 2791 2792 stats = dict.fromkeys(constants.RS_ALL, 0) 2793 2794 def _RecordStatus(status): 2795 if status in stats: 2796 stats[status] += 1
2797 2798 columns = [] 2799 for fdef in result.fields: 2800 assert fdef.title and fdef.name 2801 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit) 2802 columns.append(TableColumn(fdef.title, 2803 _QueryColumnFormatter(fn, _RecordStatus, 2804 verbose), 2805 align_right)) 2806 2807 table = FormatTable(result.data, columns, header, separator) 2808 2809 # Collect statistics 2810 assert len(stats) == len(constants.RS_ALL) 2811 assert compat.all(count >= 0 for count in stats.values()) 2812 2813 # Determine overall status. If there was no data, unknown fields must be 2814 # detected via the field definitions. 2815 if (stats[constants.RS_UNKNOWN] or 2816 (not result.data and _GetUnknownFields(result.fields))): 2817 status = QR_UNKNOWN 2818 elif compat.any(count > 0 for key, count in stats.items() 2819 if key != constants.RS_NORMAL): 2820 status = QR_INCOMPLETE 2821 else: 2822 status = QR_NORMAL 2823 2824 return (status, table) 2825
2826 2827 -def _GetUnknownFields(fdefs):
2828 """Returns list of unknown fields included in C{fdefs}. 2829 2830 @type fdefs: list of L{objects.QueryFieldDefinition} 2831 2832 """ 2833 return [fdef for fdef in fdefs 2834 if fdef.kind == constants.QFT_UNKNOWN]
2835
2836 2837 -def _WarnUnknownFields(fdefs):
2838 """Prints a warning to stderr if a query included unknown fields. 2839 2840 @type fdefs: list of L{objects.QueryFieldDefinition} 2841 2842 """ 2843 unknown = _GetUnknownFields(fdefs) 2844 if unknown: 2845 ToStderr("Warning: Queried for unknown fields %s", 2846 utils.CommaJoin(fdef.name for fdef in unknown)) 2847 return True 2848 2849 return False
2850
2851 2852 -def GenericList(resource, fields, names, unit, separator, header, cl=None, 2853 format_override=None, verbose=False, force_filter=False, 2854 namefield=None, qfilter=None):
2855 """Generic implementation for listing all items of a resource. 2856 2857 @param resource: One of L{constants.QR_VIA_LUXI} 2858 @type fields: list of strings 2859 @param fields: List of fields to query for 2860 @type names: list of strings 2861 @param names: Names of items to query for 2862 @type unit: string or None 2863 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or 2864 None for automatic choice (human-readable for non-separator usage, 2865 otherwise megabytes); this is a one-letter string 2866 @type separator: string or None 2867 @param separator: String used to separate fields 2868 @type header: bool 2869 @param header: Whether to show header row 2870 @type force_filter: bool 2871 @param force_filter: Whether to always treat names as filter 2872 @type format_override: dict 2873 @param format_override: Dictionary for overriding field formatting functions, 2874 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 2875 @type verbose: boolean 2876 @param verbose: whether to use verbose field descriptions or not 2877 @type namefield: string 2878 @param namefield: Name of field to use for simple filters (see 2879 L{qlang.MakeFilter} for details) 2880 @type qfilter: list or None 2881 @param qfilter: Query filter (in addition to names) 2882 2883 """ 2884 if not names: 2885 names = None 2886 2887 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield) 2888 2889 if qfilter is None: 2890 qfilter = namefilter 2891 elif namefilter is not None: 2892 qfilter = [qlang.OP_AND, namefilter, qfilter] 2893 2894 if cl is None: 2895 cl = GetClient() 2896 2897 response = cl.Query(resource, fields, qfilter) 2898 2899 found_unknown = _WarnUnknownFields(response.fields) 2900 2901 (status, data) = FormatQueryResult(response, unit=unit, separator=separator, 2902 header=header, 2903 format_override=format_override, 2904 verbose=verbose) 2905 2906 for line in data: 2907 ToStdout(line) 2908 2909 assert ((found_unknown and status == QR_UNKNOWN) or 2910 (not found_unknown and status != QR_UNKNOWN)) 2911 2912 if status == QR_UNKNOWN: 2913 return constants.EXIT_UNKNOWN_FIELD 2914 2915 # TODO: Should the list command fail if not all data could be collected? 2916 return constants.EXIT_SUCCESS
2917
2918 2919 -def GenericListFields(resource, fields, separator, header, cl=None):
2920 """Generic implementation for listing fields for a resource. 2921 2922 @param resource: One of L{constants.QR_VIA_LUXI} 2923 @type fields: list of strings 2924 @param fields: List of fields to query for 2925 @type separator: string or None 2926 @param separator: String used to separate fields 2927 @type header: bool 2928 @param header: Whether to show header row 2929 2930 """ 2931 if cl is None: 2932 cl = GetClient() 2933 2934 if not fields: 2935 fields = None 2936 2937 response = cl.QueryFields(resource, fields) 2938 2939 found_unknown = _WarnUnknownFields(response.fields) 2940 2941 columns = [ 2942 TableColumn("Name", str, False), 2943 TableColumn("Title", str, False), 2944 TableColumn("Description", str, False), 2945 ] 2946 2947 rows = [[fdef.name, fdef.title, fdef.doc] for fdef in response.fields] 2948 2949 for line in FormatTable(rows, columns, header, separator): 2950 ToStdout(line) 2951 2952 if found_unknown: 2953 return constants.EXIT_UNKNOWN_FIELD 2954 2955 return constants.EXIT_SUCCESS
2956
2957 2958 -class TableColumn:
2959 """Describes a column for L{FormatTable}. 2960 2961 """
2962 - def __init__(self, title, fn, align_right):
2963 """Initializes this class. 2964 2965 @type title: string 2966 @param title: Column title 2967 @type fn: callable 2968 @param fn: Formatting function 2969 @type align_right: bool 2970 @param align_right: Whether to align values on the right-hand side 2971 2972 """ 2973 self.title = title 2974 self.format = fn 2975 self.align_right = align_right
2976
2977 2978 -def _GetColFormatString(width, align_right):
2979 """Returns the format string for a field. 2980 2981 """ 2982 if align_right: 2983 sign = "" 2984 else: 2985 sign = "-" 2986 2987 return "%%%s%ss" % (sign, width)
2988
2989 2990 -def FormatTable(rows, columns, header, separator):
2991 """Formats data as a table. 2992 2993 @type rows: list of lists 2994 @param rows: Row data, one list per row 2995 @type columns: list of L{TableColumn} 2996 @param columns: Column descriptions 2997 @type header: bool 2998 @param header: Whether to show header row 2999 @type separator: string or None 3000 @param separator: String used to separate columns 3001 3002 """ 3003 if header: 3004 data = [[col.title for col in columns]] 3005 colwidth = [len(col.title) for col in columns] 3006 else: 3007 data = [] 3008 colwidth = [0 for _ in columns] 3009 3010 # Format row data 3011 for row in rows: 3012 assert len(row) == len(columns) 3013 3014 formatted = [col.format(value) for value, col in zip(row, columns)] 3015 3016 if separator is None: 3017 # Update column widths 3018 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)): 3019 # Modifying a list's items while iterating is fine 3020 colwidth[idx] = max(oldwidth, len(value)) 3021 3022 data.append(formatted) 3023 3024 if separator is not None: 3025 # Return early if a separator is used 3026 return [separator.join(row) for row in data] 3027 3028 if columns and not columns[-1].align_right: 3029 # Avoid unnecessary spaces at end of line 3030 colwidth[-1] = 0 3031 3032 # Build format string 3033 fmt = " ".join([_GetColFormatString(width, col.align_right) 3034 for col, width in zip(columns, colwidth)]) 3035 3036 return [fmt % tuple(row) for row in data]
3037
3038 3039 -def FormatTimestamp(ts):
3040 """Formats a given timestamp. 3041 3042 @type ts: timestamp 3043 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds 3044 3045 @rtype: string 3046 @return: a string with the formatted timestamp 3047 3048 """ 3049 if not isinstance(ts, (tuple, list)) or len(ts) != 2: 3050 return "?" 3051 3052 (sec, usecs) = ts 3053 return utils.FormatTime(sec, usecs=usecs)
3054
3055 3056 -def ParseTimespec(value):
3057 """Parse a time specification. 3058 3059 The following suffixed will be recognized: 3060 3061 - s: seconds 3062 - m: minutes 3063 - h: hours 3064 - d: day 3065 - w: weeks 3066 3067 Without any suffix, the value will be taken to be in seconds. 3068 3069 """ 3070 value = str(value) 3071 if not value: 3072 raise errors.OpPrereqError("Empty time specification passed") 3073 suffix_map = { 3074 "s": 1, 3075 "m": 60, 3076 "h": 3600, 3077 "d": 86400, 3078 "w": 604800, 3079 } 3080 if value[-1] not in suffix_map: 3081 try: 3082 value = int(value) 3083 except (TypeError, ValueError): 3084 raise errors.OpPrereqError("Invalid time specification '%s'" % value) 3085 else: 3086 multiplier = suffix_map[value[-1]] 3087 value = value[:-1] 3088 if not value: # no data left after stripping the suffix 3089 raise errors.OpPrereqError("Invalid time specification (only" 3090 " suffix passed)") 3091 try: 3092 value = int(value) * multiplier 3093 except (TypeError, ValueError): 3094 raise errors.OpPrereqError("Invalid time specification '%s'" % value) 3095 return value
3096
3097 3098 -def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False, 3099 filter_master=False, nodegroup=None):
3100 """Returns the names of online nodes. 3101 3102 This function will also log a warning on stderr with the names of 3103 the online nodes. 3104 3105 @param nodes: if not empty, use only this subset of nodes (minus the 3106 offline ones) 3107 @param cl: if not None, luxi client to use 3108 @type nowarn: boolean 3109 @param nowarn: by default, this function will output a note with the 3110 offline nodes that are skipped; if this parameter is True the 3111 note is not displayed 3112 @type secondary_ips: boolean 3113 @param secondary_ips: if True, return the secondary IPs instead of the 3114 names, useful for doing network traffic over the replication interface 3115 (if any) 3116 @type filter_master: boolean 3117 @param filter_master: if True, do not return the master node in the list 3118 (useful in coordination with secondary_ips where we cannot check our 3119 node name against the list) 3120 @type nodegroup: string 3121 @param nodegroup: If set, only return nodes in this node group 3122 3123 """ 3124 if cl is None: 3125 cl = GetClient() 3126 3127 qfilter = [] 3128 3129 if nodes: 3130 qfilter.append(qlang.MakeSimpleFilter("name", nodes)) 3131 3132 if nodegroup is not None: 3133 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup], 3134 [qlang.OP_EQUAL, "group.uuid", nodegroup]]) 3135 3136 if filter_master: 3137 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]]) 3138 3139 if qfilter: 3140 if len(qfilter) > 1: 3141 final_filter = [qlang.OP_AND] + qfilter 3142 else: 3143 assert len(qfilter) == 1 3144 final_filter = qfilter[0] 3145 else: 3146 final_filter = None 3147 3148 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter) 3149 3150 def _IsOffline(row): 3151 (_, (_, offline), _) = row 3152 return offline
3153 3154 def _GetName(row): 3155 ((_, name), _, _) = row 3156 return name 3157 3158 def _GetSip(row): 3159 (_, _, (_, sip)) = row 3160 return sip 3161 3162 (offline, online) = compat.partition(result.data, _IsOffline) 3163 3164 if offline and not nowarn: 3165 ToStderr("Note: skipping offline node(s): %s" % 3166 utils.CommaJoin(map(_GetName, offline))) 3167 3168 if secondary_ips: 3169 fn = _GetSip 3170 else: 3171 fn = _GetName 3172 3173 return map(fn, online) 3174
3175 3176 -def _ToStream(stream, txt, *args):
3177 """Write a message to a stream, bypassing the logging system 3178 3179 @type stream: file object 3180 @param stream: the file to which we should write 3181 @type txt: str 3182 @param txt: the message 3183 3184 """ 3185 try: 3186 if args: 3187 args = tuple(args) 3188 stream.write(txt % args) 3189 else: 3190 stream.write(txt) 3191 stream.write("\n") 3192 stream.flush() 3193 except IOError, err: 3194 if err.errno == errno.EPIPE: 3195 # our terminal went away, we'll exit 3196 sys.exit(constants.EXIT_FAILURE) 3197 else: 3198 raise
3199
3200 3201 -def ToStdout(txt, *args):
3202 """Write a message to stdout only, bypassing the logging system 3203 3204 This is just a wrapper over _ToStream. 3205 3206 @type txt: str 3207 @param txt: the message 3208 3209 """ 3210 _ToStream(sys.stdout, txt, *args)
3211
3212 3213 -def ToStderr(txt, *args):
3214 """Write a message to stderr only, bypassing the logging system 3215 3216 This is just a wrapper over _ToStream. 3217 3218 @type txt: str 3219 @param txt: the message 3220 3221 """ 3222 _ToStream(sys.stderr, txt, *args)
3223
3224 3225 -class JobExecutor(object):
3226 """Class which manages the submission and execution of multiple jobs. 3227 3228 Note that instances of this class should not be reused between 3229 GetResults() calls. 3230 3231 """
3232 - def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3233 self.queue = [] 3234 if cl is None: 3235 cl = GetClient() 3236 self.cl = cl 3237 self.verbose = verbose 3238 self.jobs = [] 3239 self.opts = opts 3240 self.feedback_fn = feedback_fn 3241 self._counter = itertools.count()
3242 3243 @staticmethod
3244 - def _IfName(name, fmt):
3245 """Helper function for formatting name. 3246 3247 """ 3248 if name: 3249 return fmt % name 3250 3251 return ""
3252
3253 - def QueueJob(self, name, *ops):
3254 """Record a job for later submit. 3255 3256 @type name: string 3257 @param name: a description of the job, will be used in WaitJobSet 3258 3259 """ 3260 SetGenericOpcodeOpts(ops, self.opts) 3261 self.queue.append((self._counter.next(), name, ops))
3262
3263 - def AddJobId(self, name, status, job_id):
3264 """Adds a job ID to the internal queue. 3265 3266 """ 3267 self.jobs.append((self._counter.next(), status, job_id, name))
3268
3269 - def SubmitPending(self, each=False):
3270 """Submit all pending jobs. 3271 3272 """ 3273 if each: 3274 results = [] 3275 for (_, _, ops) in self.queue: 3276 # SubmitJob will remove the success status, but raise an exception if 3277 # the submission fails, so we'll notice that anyway. 3278 results.append([True, self.cl.SubmitJob(ops)[0]]) 3279 else: 3280 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue]) 3281 for ((status, data), (idx, name, _)) in zip(results, self.queue): 3282 self.jobs.append((idx, status, data, name))
3283
3284 - def _ChooseJob(self):
3285 """Choose a non-waiting/queued job to poll next. 3286 3287 """ 3288 assert self.jobs, "_ChooseJob called with empty job list" 3289 3290 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]], 3291 ["status"]) 3292 assert result 3293 3294 for job_data, status in zip(self.jobs, result): 3295 if (isinstance(status, list) and status and 3296 status[0] in (constants.JOB_STATUS_QUEUED, 3297 constants.JOB_STATUS_WAITING, 3298 constants.JOB_STATUS_CANCELING)): 3299 # job is still present and waiting 3300 continue 3301 # good candidate found (either running job or lost job) 3302 self.jobs.remove(job_data) 3303 return job_data 3304 3305 # no job found 3306 return self.jobs.pop(0)
3307
3308 - def GetResults(self):
3309 """Wait for and return the results of all jobs. 3310 3311 @rtype: list 3312 @return: list of tuples (success, job results), in the same order 3313 as the submitted jobs; if a job has failed, instead of the result 3314 there will be the error message 3315 3316 """ 3317 if not self.jobs: 3318 self.SubmitPending() 3319 results = [] 3320 if self.verbose: 3321 ok_jobs = [row[2] for row in self.jobs if row[1]] 3322 if ok_jobs: 3323 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs)) 3324 3325 # first, remove any non-submitted jobs 3326 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1]) 3327 for idx, _, jid, name in failures: 3328 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid) 3329 results.append((idx, False, jid)) 3330 3331 while self.jobs: 3332 (idx, _, jid, name) = self._ChooseJob() 3333 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s")) 3334 try: 3335 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn) 3336 success = True 3337 except errors.JobLost, err: 3338 _, job_result = FormatError(err) 3339 ToStderr("Job %s%s has been archived, cannot check its result", 3340 jid, self._IfName(name, " for %s")) 3341 success = False 3342 except (errors.GenericError, luxi.ProtocolError), err: 3343 _, job_result = FormatError(err) 3344 success = False 3345 # the error message will always be shown, verbose or not 3346 ToStderr("Job %s%s has failed: %s", 3347 jid, self._IfName(name, " for %s"), job_result) 3348 3349 results.append((idx, success, job_result)) 3350 3351 # sort based on the index, then drop it 3352 results.sort() 3353 results = [i[1:] for i in results] 3354 3355 return results
3356
3357 - def WaitOrShow(self, wait):
3358 """Wait for job results or only print the job IDs. 3359 3360 @type wait: boolean 3361 @param wait: whether to wait or not 3362 3363 """ 3364 if wait: 3365 return self.GetResults() 3366 else: 3367 if not self.jobs: 3368 self.SubmitPending() 3369 for _, status, result, name in self.jobs: 3370 if status: 3371 ToStdout("%s: %s", result, name) 3372 else: 3373 ToStderr("Failure for %s: %s", name, result) 3374 return [row[1:3] for row in self.jobs]
3375
3376 3377 -def FormatParameterDict(buf, param_dict, actual, level=1):
3378 """Formats a parameter dictionary. 3379 3380 @type buf: L{StringIO} 3381 @param buf: the buffer into which to write 3382 @type param_dict: dict 3383 @param param_dict: the own parameters 3384 @type actual: dict 3385 @param actual: the current parameter set (including defaults) 3386 @param level: Level of indent 3387 3388 """ 3389 indent = " " * level 3390 3391 for key in sorted(actual): 3392 data = actual[key] 3393 buf.write("%s- %s:" % (indent, key)) 3394 3395 if isinstance(data, dict) and data: 3396 buf.write("\n") 3397 FormatParameterDict(buf, param_dict.get(key, {}), data, 3398 level=level + 1) 3399 else: 3400 val = param_dict.get(key, "default (%s)" % data) 3401 buf.write(" %s\n" % val)
3402
3403 3404 -def ConfirmOperation(names, list_type, text, extra=""):
3405 """Ask the user to confirm an operation on a list of list_type. 3406 3407 This function is used to request confirmation for doing an operation 3408 on a given list of list_type. 3409 3410 @type names: list 3411 @param names: the list of names that we display when 3412 we ask for confirmation 3413 @type list_type: str 3414 @param list_type: Human readable name for elements in the list (e.g. nodes) 3415 @type text: str 3416 @param text: the operation that the user should confirm 3417 @rtype: boolean 3418 @return: True or False depending on user's confirmation. 3419 3420 """ 3421 count = len(names) 3422 msg = ("The %s will operate on %d %s.\n%s" 3423 "Do you want to continue?" % (text, count, list_type, extra)) 3424 affected = (("\nAffected %s:\n" % list_type) + 3425 "\n".join([" %s" % name for name in names])) 3426 3427 choices = [("y", True, "Yes, execute the %s" % text), 3428 ("n", False, "No, abort the %s" % text)] 3429 3430 if count > 20: 3431 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type)) 3432 question = msg 3433 else: 3434 question = msg + affected 3435 3436 choice = AskUser(question, choices) 3437 if choice == "v": 3438 choices.pop(1) 3439 choice = AskUser(msg + affected, choices) 3440 return choice
3441
3442 3443 -def _MaybeParseUnit(elements):
3444 """Parses and returns an array of potential values with units. 3445 3446 """ 3447 parsed = {} 3448 for k, v in elements.items(): 3449 if v == constants.VALUE_DEFAULT: 3450 parsed[k] = v 3451 else: 3452 parsed[k] = utils.ParseUnit(v) 3453 return parsed
3454
3455 3456 -def CreateIPolicyFromOpts(ispecs_mem_size=None, 3457 ispecs_cpu_count=None, 3458 ispecs_disk_count=None, 3459 ispecs_disk_size=None, 3460 ispecs_nic_count=None, 3461 ipolicy_disk_templates=None, 3462 ipolicy_vcpu_ratio=None, 3463 ipolicy_spindle_ratio=None, 3464 group_ipolicy=False, 3465 allowed_values=None, 3466 fill_all=False):
3467 """Creation of instance policy based on command line options. 3468 3469 @param fill_all: whether for cluster policies we should ensure that 3470 all values are filled 3471 3472 3473 """ 3474 try: 3475 if ispecs_mem_size: 3476 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size) 3477 if ispecs_disk_size: 3478 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size) 3479 except (TypeError, ValueError, errors.UnitParseError), err: 3480 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size" 3481 " in policy: %s" % 3482 (ispecs_disk_size, ispecs_mem_size, err), 3483 errors.ECODE_INVAL) 3484 3485 # prepare ipolicy dict 3486 ipolicy_transposed = { 3487 constants.ISPEC_MEM_SIZE: ispecs_mem_size, 3488 constants.ISPEC_CPU_COUNT: ispecs_cpu_count, 3489 constants.ISPEC_DISK_COUNT: ispecs_disk_count, 3490 constants.ISPEC_DISK_SIZE: ispecs_disk_size, 3491 constants.ISPEC_NIC_COUNT: ispecs_nic_count, 3492 } 3493 3494 # first, check that the values given are correct 3495 if group_ipolicy: 3496 forced_type = TISPECS_GROUP_TYPES 3497 else: 3498 forced_type = TISPECS_CLUSTER_TYPES 3499 3500 for specs in ipolicy_transposed.values(): 3501 utils.ForceDictType(specs, forced_type, allowed_values=allowed_values) 3502 3503 # then transpose 3504 ipolicy_out = objects.MakeEmptyIPolicy() 3505 for name, specs in ipolicy_transposed.iteritems(): 3506 assert name in constants.ISPECS_PARAMETERS 3507 for key, val in specs.items(): # {min: .. ,max: .., std: ..} 3508 ipolicy_out[key][name] = val 3509 3510 # no filldict for non-dicts 3511 if not group_ipolicy and fill_all: 3512 if ipolicy_disk_templates is None: 3513 ipolicy_disk_templates = constants.DISK_TEMPLATES 3514 if ipolicy_vcpu_ratio is None: 3515 ipolicy_vcpu_ratio = \ 3516 constants.IPOLICY_DEFAULTS[constants.IPOLICY_VCPU_RATIO] 3517 if ipolicy_spindle_ratio is None: 3518 ipolicy_spindle_ratio = \ 3519 constants.IPOLICY_DEFAULTS[constants.IPOLICY_SPINDLE_RATIO] 3520 if ipolicy_disk_templates is not None: 3521 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates) 3522 if ipolicy_vcpu_ratio is not None: 3523 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio 3524 if ipolicy_spindle_ratio is not None: 3525 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio 3526 3527 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS) 3528 3529 return ipolicy_out
3530