Package ganeti :: Module cli
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cli

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
   5  # 
   6  # This program is free software; you can redistribute it and/or modify 
   7  # it under the terms of the GNU General Public License as published by 
   8  # the Free Software Foundation; either version 2 of the License, or 
   9  # (at your option) any later version. 
  10  # 
  11  # This program is distributed in the hope that it will be useful, but 
  12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
  13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
  14  # General Public License for more details. 
  15  # 
  16  # You should have received a copy of the GNU General Public License 
  17  # along with this program; if not, write to the Free Software 
  18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
  19  # 02110-1301, USA. 
  20   
  21   
  22  """Module dealing with command line parsing""" 
  23   
  24   
  25  import sys 
  26  import textwrap 
  27  import os.path 
  28  import time 
  29  import logging 
  30  import errno 
  31  import itertools 
  32  import shlex 
  33  from cStringIO import StringIO 
  34   
  35  from ganeti import utils 
  36  from ganeti import errors 
  37  from ganeti import constants 
  38  from ganeti import opcodes 
  39  from ganeti import luxi 
  40  from ganeti import ssconf 
  41  from ganeti import rpc 
  42  from ganeti import ssh 
  43  from ganeti import compat 
  44  from ganeti import netutils 
  45  from ganeti import qlang 
  46  from ganeti import objects 
  47  from ganeti import pathutils 
  48   
  49  from optparse import (OptionParser, TitledHelpFormatter, 
  50                        Option, OptionValueError) 
  51   
  52   
  53  __all__ = [ 
  54    # Command line options 
  55    "ABSOLUTE_OPT", 
  56    "ADD_UIDS_OPT", 
  57    "ADD_RESERVED_IPS_OPT", 
  58    "ALLOCATABLE_OPT", 
  59    "ALLOC_POLICY_OPT", 
  60    "ALL_OPT", 
  61    "ALLOW_FAILOVER_OPT", 
  62    "AUTO_PROMOTE_OPT", 
  63    "AUTO_REPLACE_OPT", 
  64    "BACKEND_OPT", 
  65    "BLK_OS_OPT", 
  66    "CAPAB_MASTER_OPT", 
  67    "CAPAB_VM_OPT", 
  68    "CLEANUP_OPT", 
  69    "CLUSTER_DOMAIN_SECRET_OPT", 
  70    "CONFIRM_OPT", 
  71    "CP_SIZE_OPT", 
  72    "DEBUG_OPT", 
  73    "DEBUG_SIMERR_OPT", 
  74    "DISKIDX_OPT", 
  75    "DISK_OPT", 
  76    "DISK_PARAMS_OPT", 
  77    "DISK_TEMPLATE_OPT", 
  78    "DRAINED_OPT", 
  79    "DRY_RUN_OPT", 
  80    "DRBD_HELPER_OPT", 
  81    "DST_NODE_OPT", 
  82    "EARLY_RELEASE_OPT", 
  83    "ENABLED_HV_OPT", 
  84    "ENABLED_DISK_TEMPLATES_OPT", 
  85    "ERROR_CODES_OPT", 
  86    "FAILURE_ONLY_OPT", 
  87    "FIELDS_OPT", 
  88    "FILESTORE_DIR_OPT", 
  89    "FILESTORE_DRIVER_OPT", 
  90    "FORCE_FILTER_OPT", 
  91    "FORCE_OPT", 
  92    "FORCE_VARIANT_OPT", 
  93    "GATEWAY_OPT", 
  94    "GATEWAY6_OPT", 
  95    "GLOBAL_FILEDIR_OPT", 
  96    "HID_OS_OPT", 
  97    "GLOBAL_SHARED_FILEDIR_OPT", 
  98    "HVLIST_OPT", 
  99    "HVOPTS_OPT", 
 100    "HYPERVISOR_OPT", 
 101    "IALLOCATOR_OPT", 
 102    "DEFAULT_IALLOCATOR_OPT", 
 103    "IDENTIFY_DEFAULTS_OPT", 
 104    "IGNORE_CONSIST_OPT", 
 105    "IGNORE_ERRORS_OPT", 
 106    "IGNORE_FAILURES_OPT", 
 107    "IGNORE_OFFLINE_OPT", 
 108    "IGNORE_REMOVE_FAILURES_OPT", 
 109    "IGNORE_SECONDARIES_OPT", 
 110    "IGNORE_SIZE_OPT", 
 111    "INCLUDEDEFAULTS_OPT", 
 112    "INTERVAL_OPT", 
 113    "MAC_PREFIX_OPT", 
 114    "MAINTAIN_NODE_HEALTH_OPT", 
 115    "MASTER_NETDEV_OPT", 
 116    "MASTER_NETMASK_OPT", 
 117    "MC_OPT", 
 118    "MIGRATION_MODE_OPT", 
 119    "MODIFY_ETCHOSTS_OPT", 
 120    "NET_OPT", 
 121    "NETWORK_OPT", 
 122    "NETWORK6_OPT", 
 123    "NEW_CLUSTER_CERT_OPT", 
 124    "NEW_CLUSTER_DOMAIN_SECRET_OPT", 
 125    "NEW_CONFD_HMAC_KEY_OPT", 
 126    "NEW_RAPI_CERT_OPT", 
 127    "NEW_PRIMARY_OPT", 
 128    "NEW_SECONDARY_OPT", 
 129    "NEW_SPICE_CERT_OPT", 
 130    "NIC_PARAMS_OPT", 
 131    "NOCONFLICTSCHECK_OPT", 
 132    "NODE_FORCE_JOIN_OPT", 
 133    "NODE_LIST_OPT", 
 134    "NODE_PLACEMENT_OPT", 
 135    "NODEGROUP_OPT", 
 136    "NODE_PARAMS_OPT", 
 137    "NODE_POWERED_OPT", 
 138    "NODRBD_STORAGE_OPT", 
 139    "NOHDR_OPT", 
 140    "NOIPCHECK_OPT", 
 141    "NO_INSTALL_OPT", 
 142    "NONAMECHECK_OPT", 
 143    "NOLVM_STORAGE_OPT", 
 144    "NOMODIFY_ETCHOSTS_OPT", 
 145    "NOMODIFY_SSH_SETUP_OPT", 
 146    "NONICS_OPT", 
 147    "NONLIVE_OPT", 
 148    "NONPLUS1_OPT", 
 149    "NORUNTIME_CHGS_OPT", 
 150    "NOSHUTDOWN_OPT", 
 151    "NOSTART_OPT", 
 152    "NOSSH_KEYCHECK_OPT", 
 153    "NOVOTING_OPT", 
 154    "NO_REMEMBER_OPT", 
 155    "NWSYNC_OPT", 
 156    "OFFLINE_INST_OPT", 
 157    "ONLINE_INST_OPT", 
 158    "ON_PRIMARY_OPT", 
 159    "ON_SECONDARY_OPT", 
 160    "OFFLINE_OPT", 
 161    "OSPARAMS_OPT", 
 162    "OS_OPT", 
 163    "OS_SIZE_OPT", 
 164    "OOB_TIMEOUT_OPT", 
 165    "POWER_DELAY_OPT", 
 166    "PREALLOC_WIPE_DISKS_OPT", 
 167    "PRIMARY_IP_VERSION_OPT", 
 168    "PRIMARY_ONLY_OPT", 
 169    "PRINT_JOBID_OPT", 
 170    "PRIORITY_OPT", 
 171    "RAPI_CERT_OPT", 
 172    "READD_OPT", 
 173    "REASON_OPT", 
 174    "REBOOT_TYPE_OPT", 
 175    "REMOVE_INSTANCE_OPT", 
 176    "REMOVE_RESERVED_IPS_OPT", 
 177    "REMOVE_UIDS_OPT", 
 178    "RESERVED_LVS_OPT", 
 179    "RUNTIME_MEM_OPT", 
 180    "ROMAN_OPT", 
 181    "SECONDARY_IP_OPT", 
 182    "SECONDARY_ONLY_OPT", 
 183    "SELECT_OS_OPT", 
 184    "SEP_OPT", 
 185    "SHOWCMD_OPT", 
 186    "SHOW_MACHINE_OPT", 
 187    "SHUTDOWN_TIMEOUT_OPT", 
 188    "SINGLE_NODE_OPT", 
 189    "SPECS_CPU_COUNT_OPT", 
 190    "SPECS_DISK_COUNT_OPT", 
 191    "SPECS_DISK_SIZE_OPT", 
 192    "SPECS_MEM_SIZE_OPT", 
 193    "SPECS_NIC_COUNT_OPT", 
 194    "SPLIT_ISPECS_OPTS", 
 195    "IPOLICY_STD_SPECS_OPT", 
 196    "IPOLICY_DISK_TEMPLATES", 
 197    "IPOLICY_VCPU_RATIO", 
 198    "SPICE_CACERT_OPT", 
 199    "SPICE_CERT_OPT", 
 200    "SRC_DIR_OPT", 
 201    "SRC_NODE_OPT", 
 202    "SUBMIT_OPT", 
 203    "SUBMIT_OPTS", 
 204    "STARTUP_PAUSED_OPT", 
 205    "STATIC_OPT", 
 206    "SYNC_OPT", 
 207    "TAG_ADD_OPT", 
 208    "TAG_SRC_OPT", 
 209    "TIMEOUT_OPT", 
 210    "TO_GROUP_OPT", 
 211    "UIDPOOL_OPT", 
 212    "USEUNITS_OPT", 
 213    "USE_EXTERNAL_MIP_SCRIPT", 
 214    "USE_REPL_NET_OPT", 
 215    "VERBOSE_OPT", 
 216    "VG_NAME_OPT", 
 217    "WFSYNC_OPT", 
 218    "YES_DOIT_OPT", 
 219    "DISK_STATE_OPT", 
 220    "HV_STATE_OPT", 
 221    "IGNORE_IPOLICY_OPT", 
 222    "INSTANCE_POLICY_OPTS", 
 223    # Generic functions for CLI programs 
 224    "ConfirmOperation", 
 225    "CreateIPolicyFromOpts", 
 226    "GenericMain", 
 227    "GenericInstanceCreate", 
 228    "GenericList", 
 229    "GenericListFields", 
 230    "GetClient", 
 231    "GetOnlineNodes", 
 232    "JobExecutor", 
 233    "JobSubmittedException", 
 234    "ParseTimespec", 
 235    "RunWhileClusterStopped", 
 236    "SubmitOpCode", 
 237    "SubmitOrSend", 
 238    "UsesRPC", 
 239    # Formatting functions 
 240    "ToStderr", "ToStdout", 
 241    "FormatError", 
 242    "FormatQueryResult", 
 243    "FormatParamsDictInfo", 
 244    "FormatPolicyInfo", 
 245    "PrintIPolicyCommand", 
 246    "PrintGenericInfo", 
 247    "GenerateTable", 
 248    "AskUser", 
 249    "FormatTimestamp", 
 250    "FormatLogMessage", 
 251    # Tags functions 
 252    "ListTags", 
 253    "AddTags", 
 254    "RemoveTags", 
 255    # command line options support infrastructure 
 256    "ARGS_MANY_INSTANCES", 
 257    "ARGS_MANY_NODES", 
 258    "ARGS_MANY_GROUPS", 
 259    "ARGS_MANY_NETWORKS", 
 260    "ARGS_NONE", 
 261    "ARGS_ONE_INSTANCE", 
 262    "ARGS_ONE_NODE", 
 263    "ARGS_ONE_GROUP", 
 264    "ARGS_ONE_OS", 
 265    "ARGS_ONE_NETWORK", 
 266    "ArgChoice", 
 267    "ArgCommand", 
 268    "ArgFile", 
 269    "ArgGroup", 
 270    "ArgHost", 
 271    "ArgInstance", 
 272    "ArgJobId", 
 273    "ArgNetwork", 
 274    "ArgNode", 
 275    "ArgOs", 
 276    "ArgExtStorage", 
 277    "ArgSuggest", 
 278    "ArgUnknown", 
 279    "OPT_COMPL_INST_ADD_NODES", 
 280    "OPT_COMPL_MANY_NODES", 
 281    "OPT_COMPL_ONE_IALLOCATOR", 
 282    "OPT_COMPL_ONE_INSTANCE", 
 283    "OPT_COMPL_ONE_NODE", 
 284    "OPT_COMPL_ONE_NODEGROUP", 
 285    "OPT_COMPL_ONE_NETWORK", 
 286    "OPT_COMPL_ONE_OS", 
 287    "OPT_COMPL_ONE_EXTSTORAGE", 
 288    "cli_option", 
 289    "FixHvParams", 
 290    "SplitNodeOption", 
 291    "CalculateOSNames", 
 292    "ParseFields", 
 293    "COMMON_CREATE_OPTS", 
 294    ] 
 295   
 296  NO_PREFIX = "no_" 
 297  UN_PREFIX = "-" 
 298   
 299  #: Priorities (sorted) 
 300  _PRIORITY_NAMES = [ 
 301    ("low", constants.OP_PRIO_LOW), 
 302    ("normal", constants.OP_PRIO_NORMAL), 
 303    ("high", constants.OP_PRIO_HIGH), 
 304    ] 
 305   
 306  #: Priority dictionary for easier lookup 
 307  # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once 
 308  # we migrate to Python 2.6 
 309  _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES) 
 310   
 311  # Query result status for clients 
 312  (QR_NORMAL, 
 313   QR_UNKNOWN, 
 314   QR_INCOMPLETE) = range(3) 
 315   
 316  #: Maximum batch size for ChooseJob 
 317  _CHOOSE_BATCH = 25 
 318   
 319   
 320  # constants used to create InstancePolicy dictionary 
 321  TISPECS_GROUP_TYPES = { 
 322    constants.ISPECS_MIN: constants.VTYPE_INT, 
 323    constants.ISPECS_MAX: constants.VTYPE_INT, 
 324    } 
 325   
 326  TISPECS_CLUSTER_TYPES = { 
 327    constants.ISPECS_MIN: constants.VTYPE_INT, 
 328    constants.ISPECS_MAX: constants.VTYPE_INT, 
 329    constants.ISPECS_STD: constants.VTYPE_INT, 
 330    } 
 331   
 332  #: User-friendly names for query2 field types 
 333  _QFT_NAMES = { 
 334    constants.QFT_UNKNOWN: "Unknown", 
 335    constants.QFT_TEXT: "Text", 
 336    constants.QFT_BOOL: "Boolean", 
 337    constants.QFT_NUMBER: "Number", 
 338    constants.QFT_UNIT: "Storage size", 
 339    constants.QFT_TIMESTAMP: "Timestamp", 
 340    constants.QFT_OTHER: "Custom", 
 341    } 
342 343 344 -class _Argument:
345 - def __init__(self, min=0, max=None): # pylint: disable=W0622
346 self.min = min 347 self.max = max
348
349 - def __repr__(self):
350 return ("<%s min=%s max=%s>" % 351 (self.__class__.__name__, self.min, self.max))
352
353 354 -class ArgSuggest(_Argument):
355 """Suggesting argument. 356 357 Value can be any of the ones passed to the constructor. 358 359 """ 360 # pylint: disable=W0622
361 - def __init__(self, min=0, max=None, choices=None):
362 _Argument.__init__(self, min=min, max=max) 363 self.choices = choices
364
365 - def __repr__(self):
366 return ("<%s min=%s max=%s choices=%r>" % 367 (self.__class__.__name__, self.min, self.max, self.choices))
368
369 370 -class ArgChoice(ArgSuggest):
371 """Choice argument. 372 373 Value can be any of the ones passed to the constructor. Like L{ArgSuggest}, 374 but value must be one of the choices. 375 376 """
377
378 379 -class ArgUnknown(_Argument):
380 """Unknown argument to program (e.g. determined at runtime). 381 382 """
383
384 385 -class ArgInstance(_Argument):
386 """Instances argument. 387 388 """
389
390 391 -class ArgNode(_Argument):
392 """Node argument. 393 394 """
395
396 397 -class ArgNetwork(_Argument):
398 """Network argument. 399 400 """
401
402 403 -class ArgGroup(_Argument):
404 """Node group argument. 405 406 """
407
408 409 -class ArgJobId(_Argument):
410 """Job ID argument. 411 412 """
413
414 415 -class ArgFile(_Argument):
416 """File path argument. 417 418 """
419
420 421 -class ArgCommand(_Argument):
422 """Command argument. 423 424 """
425
426 427 -class ArgHost(_Argument):
428 """Host argument. 429 430 """
431
432 433 -class ArgOs(_Argument):
434 """OS argument. 435 436 """
437
438 439 -class ArgExtStorage(_Argument):
440 """ExtStorage argument. 441 442 """
443 444 445 ARGS_NONE = [] 446 ARGS_MANY_INSTANCES = [ArgInstance()] 447 ARGS_MANY_NETWORKS = [ArgNetwork()] 448 ARGS_MANY_NODES = [ArgNode()] 449 ARGS_MANY_GROUPS = [ArgGroup()] 450 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)] 451 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)] 452 ARGS_ONE_NODE = [ArgNode(min=1, max=1)] 453 # TODO 454 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)] 455 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
456 457 458 -def _ExtractTagsObject(opts, args):
459 """Extract the tag type object. 460 461 Note that this function will modify its args parameter. 462 463 """ 464 if not hasattr(opts, "tag_type"): 465 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject") 466 kind = opts.tag_type 467 if kind == constants.TAG_CLUSTER: 468 retval = kind, None 469 elif kind in (constants.TAG_NODEGROUP, 470 constants.TAG_NODE, 471 constants.TAG_NETWORK, 472 constants.TAG_INSTANCE): 473 if not args: 474 raise errors.OpPrereqError("no arguments passed to the command", 475 errors.ECODE_INVAL) 476 name = args.pop(0) 477 retval = kind, name 478 else: 479 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind) 480 return retval
481
482 483 -def _ExtendTags(opts, args):
484 """Extend the args if a source file has been given. 485 486 This function will extend the tags with the contents of the file 487 passed in the 'tags_source' attribute of the opts parameter. A file 488 named '-' will be replaced by stdin. 489 490 """ 491 fname = opts.tags_source 492 if fname is None: 493 return 494 if fname == "-": 495 new_fh = sys.stdin 496 else: 497 new_fh = open(fname, "r") 498 new_data = [] 499 try: 500 # we don't use the nice 'new_data = [line.strip() for line in fh]' 501 # because of python bug 1633941 502 while True: 503 line = new_fh.readline() 504 if not line: 505 break 506 new_data.append(line.strip()) 507 finally: 508 new_fh.close() 509 args.extend(new_data)
510
511 512 -def ListTags(opts, args):
513 """List the tags on a given object. 514 515 This is a generic implementation that knows how to deal with all 516 three cases of tag objects (cluster, node, instance). The opts 517 argument is expected to contain a tag_type field denoting what 518 object type we work on. 519 520 """ 521 kind, name = _ExtractTagsObject(opts, args) 522 cl = GetClient(query=True) 523 result = cl.QueryTags(kind, name) 524 result = list(result) 525 result.sort() 526 for tag in result: 527 ToStdout(tag)
528
529 530 -def AddTags(opts, args):
531 """Add tags on a given object. 532 533 This is a generic implementation that knows how to deal with all 534 three cases of tag objects (cluster, node, instance). The opts 535 argument is expected to contain a tag_type field denoting what 536 object type we work on. 537 538 """ 539 kind, name = _ExtractTagsObject(opts, args) 540 _ExtendTags(opts, args) 541 if not args: 542 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL) 543 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args) 544 SubmitOrSend(op, opts)
545
546 547 -def RemoveTags(opts, args):
548 """Remove tags from a given object. 549 550 This is a generic implementation that knows how to deal with all 551 three cases of tag objects (cluster, node, instance). The opts 552 argument is expected to contain a tag_type field denoting what 553 object type we work on. 554 555 """ 556 kind, name = _ExtractTagsObject(opts, args) 557 _ExtendTags(opts, args) 558 if not args: 559 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL) 560 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args) 561 SubmitOrSend(op, opts)
562
563 564 -def check_unit(option, opt, value): # pylint: disable=W0613
565 """OptParsers custom converter for units. 566 567 """ 568 try: 569 return utils.ParseUnit(value) 570 except errors.UnitParseError, err: 571 raise OptionValueError("option %s: %s" % (opt, err)) 572
573 574 -def _SplitKeyVal(opt, data, parse_prefixes):
575 """Convert a KeyVal string into a dict. 576 577 This function will convert a key=val[,...] string into a dict. Empty 578 values will be converted specially: keys which have the prefix 'no_' 579 will have the value=False and the prefix stripped, keys with the prefix 580 "-" will have value=None and the prefix stripped, and the others will 581 have value=True. 582 583 @type opt: string 584 @param opt: a string holding the option name for which we process the 585 data, used in building error messages 586 @type data: string 587 @param data: a string of the format key=val,key=val,... 588 @type parse_prefixes: bool 589 @param parse_prefixes: whether to handle prefixes specially 590 @rtype: dict 591 @return: {key=val, key=val} 592 @raises errors.ParameterError: if there are duplicate keys 593 594 """ 595 kv_dict = {} 596 if data: 597 for elem in utils.UnescapeAndSplit(data, sep=","): 598 if "=" in elem: 599 key, val = elem.split("=", 1) 600 elif parse_prefixes: 601 if elem.startswith(NO_PREFIX): 602 key, val = elem[len(NO_PREFIX):], False 603 elif elem.startswith(UN_PREFIX): 604 key, val = elem[len(UN_PREFIX):], None 605 else: 606 key, val = elem, True 607 else: 608 raise errors.ParameterError("Missing value for key '%s' in option %s" % 609 (elem, opt)) 610 if key in kv_dict: 611 raise errors.ParameterError("Duplicate key '%s' in option %s" % 612 (key, opt)) 613 kv_dict[key] = val 614 return kv_dict
615
616 617 -def _SplitIdentKeyVal(opt, value, parse_prefixes):
618 """Helper function to parse "ident:key=val,key=val" options. 619 620 @type opt: string 621 @param opt: option name, used in error messages 622 @type value: string 623 @param value: expected to be in the format "ident:key=val,key=val,..." 624 @type parse_prefixes: bool 625 @param parse_prefixes: whether to handle prefixes specially (see 626 L{_SplitKeyVal}) 627 @rtype: tuple 628 @return: (ident, {key=val, key=val}) 629 @raises errors.ParameterError: in case of duplicates or other parsing errors 630 631 """ 632 if ":" not in value: 633 ident, rest = value, "" 634 else: 635 ident, rest = value.split(":", 1) 636 637 if parse_prefixes and ident.startswith(NO_PREFIX): 638 if rest: 639 msg = "Cannot pass options when removing parameter groups: %s" % value 640 raise errors.ParameterError(msg) 641 retval = (ident[len(NO_PREFIX):], False) 642 elif (parse_prefixes and ident.startswith(UN_PREFIX) and 643 (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())): 644 if rest: 645 msg = "Cannot pass options when removing parameter groups: %s" % value 646 raise errors.ParameterError(msg) 647 retval = (ident[len(UN_PREFIX):], None) 648 else: 649 kv_dict = _SplitKeyVal(opt, rest, parse_prefixes) 650 retval = (ident, kv_dict) 651 return retval
652
653 654 -def check_ident_key_val(option, opt, value): # pylint: disable=W0613
655 """Custom parser for ident:key=val,key=val options. 656 657 This will store the parsed values as a tuple (ident, {key: val}). As such, 658 multiple uses of this option via action=append is possible. 659 660 """ 661 return _SplitIdentKeyVal(opt, value, True) 662
663 664 -def check_key_val(option, opt, value): # pylint: disable=W0613
665 """Custom parser class for key=val,key=val options. 666 667 This will store the parsed values as a dict {key: val}. 668 669 """ 670 return _SplitKeyVal(opt, value, True) 671
672 673 -def _SplitListKeyVal(opt, value):
674 retval = {} 675 for elem in value.split("/"): 676 if not elem: 677 raise errors.ParameterError("Empty section in option '%s'" % opt) 678 (ident, valdict) = _SplitIdentKeyVal(opt, elem, False) 679 if ident in retval: 680 msg = ("Duplicated parameter '%s' in parsing %s: %s" % 681 (ident, opt, elem)) 682 raise errors.ParameterError(msg) 683 retval[ident] = valdict 684 return retval
685
686 687 -def check_multilist_ident_key_val(_, opt, value):
688 """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options. 689 690 @rtype: list of dictionary 691 @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}] 692 693 """ 694 retval = [] 695 for line in value.split("//"): 696 retval.append(_SplitListKeyVal(opt, line)) 697 return retval
698
699 700 -def check_bool(option, opt, value): # pylint: disable=W0613
701 """Custom parser for yes/no options. 702 703 This will store the parsed value as either True or False. 704 705 """ 706 value = value.lower() 707 if value == constants.VALUE_FALSE or value == "no": 708 return False 709 elif value == constants.VALUE_TRUE or value == "yes": 710 return True 711 else: 712 raise errors.ParameterError("Invalid boolean value '%s'" % value) 713
714 715 -def check_list(option, opt, value): # pylint: disable=W0613
716 """Custom parser for comma-separated lists. 717 718 """ 719 # we have to make this explicit check since "".split(",") is [""], 720 # not an empty list :( 721 if not value: 722 return [] 723 else: 724 return utils.UnescapeAndSplit(value) 725
726 727 -def check_maybefloat(option, opt, value): # pylint: disable=W0613
728 """Custom parser for float numbers which might be also defaults. 729 730 """ 731 value = value.lower() 732 733 if value == constants.VALUE_DEFAULT: 734 return value 735 else: 736 return float(value) 737 738 739 # completion_suggestion is normally a list. Using numeric values not evaluating 740 # to False for dynamic completion. 741 (OPT_COMPL_MANY_NODES, 742 OPT_COMPL_ONE_NODE, 743 OPT_COMPL_ONE_INSTANCE, 744 OPT_COMPL_ONE_OS, 745 OPT_COMPL_ONE_EXTSTORAGE, 746 OPT_COMPL_ONE_IALLOCATOR, 747 OPT_COMPL_ONE_NETWORK, 748 OPT_COMPL_INST_ADD_NODES, 749 OPT_COMPL_ONE_NODEGROUP) = range(100, 109) 750 751 OPT_COMPL_ALL = compat.UniqueFrozenset([ 752 OPT_COMPL_MANY_NODES, 753 OPT_COMPL_ONE_NODE, 754 OPT_COMPL_ONE_INSTANCE, 755 OPT_COMPL_ONE_OS, 756 OPT_COMPL_ONE_EXTSTORAGE, 757 OPT_COMPL_ONE_IALLOCATOR, 758 OPT_COMPL_ONE_NETWORK, 759 OPT_COMPL_INST_ADD_NODES, 760 OPT_COMPL_ONE_NODEGROUP, 761 ])
762 763 764 -class CliOption(Option):
765 """Custom option class for optparse. 766 767 """ 768 ATTRS = Option.ATTRS + [ 769 "completion_suggest", 770 ] 771 TYPES = Option.TYPES + ( 772 "multilistidentkeyval", 773 "identkeyval", 774 "keyval", 775 "unit", 776 "bool", 777 "list", 778 "maybefloat", 779 ) 780 TYPE_CHECKER = Option.TYPE_CHECKER.copy() 781 TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val 782 TYPE_CHECKER["identkeyval"] = check_ident_key_val 783 TYPE_CHECKER["keyval"] = check_key_val 784 TYPE_CHECKER["unit"] = check_unit 785 TYPE_CHECKER["bool"] = check_bool 786 TYPE_CHECKER["list"] = check_list 787 TYPE_CHECKER["maybefloat"] = check_maybefloat
788 789 790 # optparse.py sets make_option, so we do it for our own option class, too 791 cli_option = CliOption 792 793 794 _YORNO = "yes|no" 795 796 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count", 797 help="Increase debugging level") 798 799 NOHDR_OPT = cli_option("--no-headers", default=False, 800 action="store_true", dest="no_headers", 801 help="Don't display column headers") 802 803 SEP_OPT = cli_option("--separator", default=None, 804 action="store", dest="separator", 805 help=("Separator between output fields" 806 " (defaults to one space)")) 807 808 USEUNITS_OPT = cli_option("--units", default=None, 809 dest="units", choices=("h", "m", "g", "t"), 810 help="Specify units for output (one of h/m/g/t)") 811 812 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store", 813 type="string", metavar="FIELDS", 814 help="Comma separated list of output fields") 815 816 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true", 817 default=False, help="Force the operation") 818 819 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true", 820 default=False, help="Do not require confirmation") 821 822 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline", 823 action="store_true", default=False, 824 help=("Ignore offline nodes and do as much" 825 " as possible")) 826 827 TAG_ADD_OPT = cli_option("--tags", dest="tags", 828 default=None, help="Comma-separated list of instance" 829 " tags") 830 831 TAG_SRC_OPT = cli_option("--from", dest="tags_source", 832 default=None, help="File with tag names") 833 834 SUBMIT_OPT = cli_option("--submit", dest="submit_only", 835 default=False, action="store_true", 836 help=("Submit the job and return the job ID, but" 837 " don't wait for the job to finish")) 838 839 PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid", 840 default=False, action="store_true", 841 help=("Additionally print the job as first line" 842 " on stdout (for scripting).")) 843 844 SYNC_OPT = cli_option("--sync", dest="do_locking", 845 default=False, action="store_true", 846 help=("Grab locks while doing the queries" 847 " in order to ensure more consistent results")) 848 849 DRY_RUN_OPT = cli_option("--dry-run", default=False, 850 action="store_true", 851 help=("Do not execute the operation, just run the" 852 " check steps and verify if it could be" 853 " executed")) 854 855 VERBOSE_OPT = cli_option("-v", "--verbose", default=False, 856 action="store_true", 857 help="Increase the verbosity of the operation") 858 859 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False, 860 action="store_true", dest="simulate_errors", 861 help="Debugging option that makes the operation" 862 " treat most runtime checks as failed") 863 864 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync", 865 default=True, action="store_false", 866 help="Don't wait for sync (DANGEROUS!)") 867 868 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync", 869 default=False, action="store_true", 870 help="Wait for disks to sync") 871 872 ONLINE_INST_OPT = cli_option("--online", dest="online_inst", 873 action="store_true", default=False, 874 help="Enable offline instance") 875 876 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst", 877 action="store_true", default=False, 878 help="Disable down instance") 879 880 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template", 881 help=("Custom disk setup (%s)" % 882 utils.CommaJoin(constants.DISK_TEMPLATES)), 883 default=None, metavar="TEMPL", 884 choices=list(constants.DISK_TEMPLATES)) 885 886 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true", 887 help="Do not create any network cards for" 888 " the instance") 889 890 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", 891 help="Relative path under default cluster-wide" 892 " file storage dir to store file-based disks", 893 default=None, metavar="<DIR>") 894 895 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver", 896 help="Driver to use for image files", 897 default=None, metavar="<DRIVER>", 898 choices=list(constants.FILE_DRIVER)) 899 900 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>", 901 help="Select nodes for the instance automatically" 902 " using the <NAME> iallocator plugin", 903 default=None, type="string", 904 completion_suggest=OPT_COMPL_ONE_IALLOCATOR) 905 906 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator", 907 metavar="<NAME>", 908 help="Set the default instance" 909 " allocator plugin", 910 default=None, type="string", 911 completion_suggest=OPT_COMPL_ONE_IALLOCATOR) 912 913 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run", 914 metavar="<os>", 915 completion_suggest=OPT_COMPL_ONE_OS) 916 917 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams", 918 type="keyval", default={}, 919 help="OS parameters") 920 921 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant", 922 action="store_true", default=False, 923 help="Force an unknown variant") 924 925 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install", 926 action="store_true", default=False, 927 help="Do not install the OS (will" 928 " enable no-start)") 929 930 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes", 931 dest="allow_runtime_chgs", 932 default=True, action="store_false", 933 help="Don't allow runtime changes") 934 935 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams", 936 type="keyval", default={}, 937 help="Backend parameters") 938 939 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval", 940 default={}, dest="hvparams", 941 help="Hypervisor parameters") 942 943 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams", 944 help="Disk template parameters, in the format" 945 " template:option=value,option=value,...", 946 type="identkeyval", action="append", default=[]) 947 948 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size", 949 type="keyval", default={}, 950 help="Memory size specs: list of key=value," 951 " where key is one of min, max, std" 952 " (in MB or using a unit)") 953 954 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count", 955 type="keyval", default={}, 956 help="CPU count specs: list of key=value," 957 " where key is one of min, max, std") 958 959 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count", 960 dest="ispecs_disk_count", 961 type="keyval", default={}, 962 help="Disk count specs: list of key=value," 963 " where key is one of min, max, std") 964 965 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size", 966 type="keyval", default={}, 967 help="Disk size specs: list of key=value," 968 " where key is one of min, max, std" 969 " (in MB or using a unit)") 970 971 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count", 972 type="keyval", default={}, 973 help="NIC count specs: list of key=value," 974 " where key is one of min, max, std") 975 976 IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs" 977 IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR, 978 dest="ipolicy_bounds_specs", 979 type="multilistidentkeyval", default=None, 980 help="Complete instance specs limits") 981 982 IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs" 983 IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR, 984 dest="ipolicy_std_specs", 985 type="keyval", default=None, 986 help="Complte standard instance specs") 987 988 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates", 989 dest="ipolicy_disk_templates", 990 type="list", default=None, 991 help="Comma-separated list of" 992 " enabled disk templates") 993 994 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio", 995 dest="ipolicy_vcpu_ratio", 996 type="maybefloat", default=None, 997 help="The maximum allowed vcpu-to-cpu ratio") 998 999 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio", 1000 dest="ipolicy_spindle_ratio", 1001 type="maybefloat", default=None, 1002 help=("The maximum allowed instances to" 1003 " spindle ratio")) 1004 1005 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor", 1006 help="Hypervisor and hypervisor options, in the" 1007 " format hypervisor:option=value,option=value,...", 1008 default=None, type="identkeyval") 1009 1010 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams", 1011 help="Hypervisor and hypervisor options, in the" 1012 " format hypervisor:option=value,option=value,...", 1013 default=[], action="append", type="identkeyval") 1014 1015 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True, 1016 action="store_false", 1017 help="Don't check that the instance's IP" 1018 " is alive") 1019 1020 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check", 1021 default=True, action="store_false", 1022 help="Don't check that the instance's name" 1023 " is resolvable") 1024 1025 NET_OPT = cli_option("--net", 1026 help="NIC parameters", default=[], 1027 dest="nics", action="append", type="identkeyval") 1028 1029 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[], 1030 dest="disks", action="append", type="identkeyval") 1031 1032 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None, 1033 help="Comma-separated list of disks" 1034 " indices to act on (e.g. 0,2) (optional," 1035 " defaults to all disks)") 1036 1037 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size", 1038 help="Enforces a single-disk configuration using the" 1039 " given disk size, in MiB unless a suffix is used", 1040 default=None, type="unit", metavar="<size>") 1041 1042 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency", 1043 dest="ignore_consistency", 1044 action="store_true", default=False, 1045 help="Ignore the consistency of the disks on" 1046 " the secondary") 1047 1048 ALLOW_FAILOVER_OPT = cli_option("--allow-failover", 1049 dest="allow_failover", 1050 action="store_true", default=False, 1051 help="If migration is not possible fallback to" 1052 " failover") 1053 1054 NONLIVE_OPT = cli_option("--non-live", dest="live", 1055 default=True, action="store_false", 1056 help="Do a non-live migration (this usually means" 1057 " freeze the instance, save the state, transfer and" 1058 " only then resume running on the secondary node)") 1059 1060 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode", 1061 default=None, 1062 choices=list(constants.HT_MIGRATION_MODES), 1063 help="Override default migration mode (choose" 1064 " either live or non-live") 1065 1066 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node", 1067 help="Target node and optional secondary node", 1068 metavar="<pnode>[:<snode>]", 1069 completion_suggest=OPT_COMPL_INST_ADD_NODES) 1070 1071 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[], 1072 action="append", metavar="<node>", 1073 help="Use only this node (can be used multiple" 1074 " times, if not given defaults to all nodes)", 1075 completion_suggest=OPT_COMPL_ONE_NODE) 1076 1077 NODEGROUP_OPT_NAME = "--node-group" 1078 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME, 1079 dest="nodegroup", 1080 help="Node group (name or uuid)", 1081 metavar="<nodegroup>", 1082 default=None, type="string", 1083 completion_suggest=OPT_COMPL_ONE_NODEGROUP) 1084 1085 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node", 1086 metavar="<node>", 1087 completion_suggest=OPT_COMPL_ONE_NODE) 1088 1089 NOSTART_OPT = cli_option("--no-start", dest="start", default=True, 1090 action="store_false", 1091 help="Don't start the instance after creation") 1092 1093 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command", 1094 action="store_true", default=False, 1095 help="Show command instead of executing it") 1096 1097 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup", 1098 default=False, action="store_true", 1099 help="Instead of performing the migration/failover," 1100 " try to recover from a failed cleanup. This is safe" 1101 " to run even if the instance is healthy, but it" 1102 " will create extra replication traffic and " 1103 " disrupt briefly the replication (like during the" 1104 " migration/failover") 1105 1106 STATIC_OPT = cli_option("-s", "--static", dest="static", 1107 action="store_true", default=False, 1108 help="Only show configuration data, not runtime data") 1109 1110 ALL_OPT = cli_option("--all", dest="show_all", 1111 default=False, action="store_true", 1112 help="Show info on all instances on the cluster." 1113 " This can take a long time to run, use wisely") 1114 1115 SELECT_OS_OPT = cli_option("--select-os", dest="select_os", 1116 action="store_true", default=False, 1117 help="Interactive OS reinstall, lists available" 1118 " OS templates for selection") 1119 1120 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures", 1121 action="store_true", default=False, 1122 help="Remove the instance from the cluster" 1123 " configuration even if there are failures" 1124 " during the removal process") 1125 1126 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures", 1127 dest="ignore_remove_failures", 1128 action="store_true", default=False, 1129 help="Remove the instance from the" 1130 " cluster configuration even if there" 1131 " are failures during the removal" 1132 " process") 1133 1134 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance", 1135 action="store_true", default=False, 1136 help="Remove the instance from the cluster") 1137 1138 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node", 1139 help="Specifies the new node for the instance", 1140 metavar="NODE", default=None, 1141 completion_suggest=OPT_COMPL_ONE_NODE) 1142 1143 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node", 1144 help="Specifies the new secondary node", 1145 metavar="NODE", default=None, 1146 completion_suggest=OPT_COMPL_ONE_NODE) 1147 1148 NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node", 1149 help="Specifies the new primary node", 1150 metavar="<node>", default=None, 1151 completion_suggest=OPT_COMPL_ONE_NODE) 1152 1153 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary", 1154 default=False, action="store_true", 1155 help="Replace the disk(s) on the primary" 1156 " node (applies only to internally mirrored" 1157 " disk templates, e.g. %s)" % 1158 utils.CommaJoin(constants.DTS_INT_MIRROR)) 1159 1160 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary", 1161 default=False, action="store_true", 1162 help="Replace the disk(s) on the secondary" 1163 " node (applies only to internally mirrored" 1164 " disk templates, e.g. %s)" % 1165 utils.CommaJoin(constants.DTS_INT_MIRROR)) 1166 1167 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote", 1168 default=False, action="store_true", 1169 help="Lock all nodes and auto-promote as needed" 1170 " to MC status") 1171 1172 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto", 1173 default=False, action="store_true", 1174 help="Automatically replace faulty disks" 1175 " (applies only to internally mirrored" 1176 " disk templates, e.g. %s)" % 1177 utils.CommaJoin(constants.DTS_INT_MIRROR)) 1178 1179 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size", 1180 default=False, action="store_true", 1181 help="Ignore current recorded size" 1182 " (useful for forcing activation when" 1183 " the recorded size is wrong)") 1184 1185 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node", 1186 metavar="<node>", 1187 completion_suggest=OPT_COMPL_ONE_NODE) 1188 1189 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory", 1190 metavar="<dir>") 1191 1192 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip", 1193 help="Specify the secondary ip for the node", 1194 metavar="ADDRESS", default=None) 1195 1196 READD_OPT = cli_option("--readd", dest="readd", 1197 default=False, action="store_true", 1198 help="Readd old node after replacing it") 1199 1200 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check", 1201 default=True, action="store_false", 1202 help="Disable SSH key fingerprint checking") 1203 1204 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join", 1205 default=False, action="store_true", 1206 help="Force the joining of a node") 1207 1208 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate", 1209 type="bool", default=None, metavar=_YORNO, 1210 help="Set the master_candidate flag on the node") 1211 1212 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO, 1213 type="bool", default=None, 1214 help=("Set the offline flag on the node" 1215 " (cluster does not communicate with offline" 1216 " nodes)")) 1217 1218 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO, 1219 type="bool", default=None, 1220 help=("Set the drained flag on the node" 1221 " (excluded from allocation operations)")) 1222 1223 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable", 1224 type="bool", default=None, metavar=_YORNO, 1225 help="Set the master_capable flag on the node") 1226 1227 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable", 1228 type="bool", default=None, metavar=_YORNO, 1229 help="Set the vm_capable flag on the node") 1230 1231 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable", 1232 type="bool", default=None, metavar=_YORNO, 1233 help="Set the allocatable flag on a volume") 1234 1235 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage", 1236 help="Disable support for lvm based instances" 1237 " (cluster-wide)", 1238 action="store_false", default=True) 1239 1240 ENABLED_HV_OPT = cli_option("--enabled-hypervisors", 1241 dest="enabled_hypervisors", 1242 help="Comma-separated list of hypervisors", 1243 type="string", default=None) 1244 1245 ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates", 1246 dest="enabled_disk_templates", 1247 help="Comma-separated list of " 1248 "disk templates", 1249 type="string", default=None) 1250 1251 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams", 1252 type="keyval", default={}, 1253 help="NIC parameters") 1254 1255 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None, 1256 dest="candidate_pool_size", type="int", 1257 help="Set the candidate pool size") 1258 1259 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name", 1260 help=("Enables LVM and specifies the volume group" 1261 " name (cluster-wide) for disk allocation" 1262 " [%s]" % constants.DEFAULT_VG), 1263 metavar="VG", default=None) 1264 1265 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it", 1266 help="Destroy cluster", action="store_true") 1267 1268 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting", 1269 help="Skip node agreement check (dangerous)", 1270 action="store_true", default=False) 1271 1272 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix", 1273 help="Specify the mac prefix for the instance IP" 1274 " addresses, in the format XX:XX:XX", 1275 metavar="PREFIX", 1276 default=None) 1277 1278 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev", 1279 help="Specify the node interface (cluster-wide)" 1280 " on which the master IP address will be added" 1281 " (cluster init default: %s)" % 1282 constants.DEFAULT_BRIDGE, 1283 metavar="NETDEV", 1284 default=None) 1285 1286 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask", 1287 help="Specify the netmask of the master IP", 1288 metavar="NETMASK", 1289 default=None) 1290 1291 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script", 1292 dest="use_external_mip_script", 1293 help="Specify whether to run a" 1294 " user-provided script for the master" 1295 " IP address turnup and" 1296 " turndown operations", 1297 type="bool", metavar=_YORNO, default=None) 1298 1299 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", 1300 help="Specify the default directory (cluster-" 1301 "wide) for storing the file-based disks [%s]" % 1302 pathutils.DEFAULT_FILE_STORAGE_DIR, 1303 metavar="DIR", 1304 default=None) 1305 1306 GLOBAL_SHARED_FILEDIR_OPT = cli_option( 1307 "--shared-file-storage-dir", 1308 dest="shared_file_storage_dir", 1309 help="Specify the default directory (cluster-wide) for storing the" 1310 " shared file-based disks [%s]" % 1311 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR, 1312 metavar="SHAREDDIR", default=None) 1313 1314 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts", 1315 help="Don't modify %s" % pathutils.ETC_HOSTS, 1316 action="store_false", default=True) 1317 1318 MODIFY_ETCHOSTS_OPT = \ 1319 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO, 1320 default=None, type="bool", 1321 help="Defines whether the cluster should autonomously modify" 1322 " and keep in sync the /etc/hosts file of the nodes") 1323 1324 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup", 1325 help="Don't initialize SSH keys", 1326 action="store_false", default=True) 1327 1328 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes", 1329 help="Enable parseable error messages", 1330 action="store_true", default=False) 1331 1332 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem", 1333 help="Skip N+1 memory redundancy tests", 1334 action="store_true", default=False) 1335 1336 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type", 1337 help="Type of reboot: soft/hard/full", 1338 default=constants.INSTANCE_REBOOT_HARD, 1339 metavar="<REBOOT>", 1340 choices=list(constants.REBOOT_TYPES)) 1341 1342 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries", 1343 dest="ignore_secondaries", 1344 default=False, action="store_true", 1345 help="Ignore errors from secondaries") 1346 1347 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown", 1348 action="store_false", default=True, 1349 help="Don't shutdown the instance (unsafe)") 1350 1351 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int", 1352 default=constants.DEFAULT_SHUTDOWN_TIMEOUT, 1353 help="Maximum time to wait") 1354 1355 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout", 1356 dest="shutdown_timeout", type="int", 1357 default=constants.DEFAULT_SHUTDOWN_TIMEOUT, 1358 help="Maximum time to wait for instance" 1359 " shutdown") 1360 1361 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int", 1362 default=None, 1363 help=("Number of seconds between repetions of the" 1364 " command")) 1365 1366 EARLY_RELEASE_OPT = cli_option("--early-release", 1367 dest="early_release", default=False, 1368 action="store_true", 1369 help="Release the locks on the secondary" 1370 " node(s) early") 1371 1372 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate", 1373 dest="new_cluster_cert", 1374 default=False, action="store_true", 1375 help="Generate a new cluster certificate") 1376 1377 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert", 1378 default=None, 1379 help="File containing new RAPI certificate") 1380 1381 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert", 1382 default=None, action="store_true", 1383 help=("Generate a new self-signed RAPI" 1384 " certificate")) 1385 1386 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert", 1387 default=None, 1388 help="File containing new SPICE certificate") 1389 1390 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert", 1391 default=None, 1392 help="File containing the certificate of the CA" 1393 " which signed the SPICE certificate") 1394 1395 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate", 1396 dest="new_spice_cert", default=None, 1397 action="store_true", 1398 help=("Generate a new self-signed SPICE" 1399 " certificate")) 1400 1401 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key", 1402 dest="new_confd_hmac_key", 1403 default=False, action="store_true", 1404 help=("Create a new HMAC key for %s" % 1405 constants.CONFD)) 1406 1407 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret", 1408 dest="cluster_domain_secret", 1409 default=None, 1410 help=("Load new new cluster domain" 1411 " secret from file")) 1412 1413 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret", 1414 dest="new_cluster_domain_secret", 1415 default=False, action="store_true", 1416 help=("Create a new cluster domain" 1417 " secret")) 1418 1419 USE_REPL_NET_OPT = cli_option("--use-replication-network", 1420 dest="use_replication_network", 1421 help="Whether to use the replication network" 1422 " for talking to the nodes", 1423 action="store_true", default=False) 1424 1425 MAINTAIN_NODE_HEALTH_OPT = \ 1426 cli_option("--maintain-node-health", dest="maintain_node_health", 1427 metavar=_YORNO, default=None, type="bool", 1428 help="Configure the cluster to automatically maintain node" 1429 " health, by shutting down unknown instances, shutting down" 1430 " unknown DRBD devices, etc.") 1431 1432 IDENTIFY_DEFAULTS_OPT = \ 1433 cli_option("--identify-defaults", dest="identify_defaults", 1434 default=False, action="store_true", 1435 help="Identify which saved instance parameters are equal to" 1436 " the current cluster defaults and set them as such, instead" 1437 " of marking them as overridden") 1438 1439 UIDPOOL_OPT = cli_option("--uid-pool", default=None, 1440 action="store", dest="uid_pool", 1441 help=("A list of user-ids or user-id" 1442 " ranges separated by commas")) 1443 1444 ADD_UIDS_OPT = cli_option("--add-uids", default=None, 1445 action="store", dest="add_uids", 1446 help=("A list of user-ids or user-id" 1447 " ranges separated by commas, to be" 1448 " added to the user-id pool")) 1449 1450 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None, 1451 action="store", dest="remove_uids", 1452 help=("A list of user-ids or user-id" 1453 " ranges separated by commas, to be" 1454 " removed from the user-id pool")) 1455 1456 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None, 1457 action="store", dest="reserved_lvs", 1458 help=("A comma-separated list of reserved" 1459 " logical volumes names, that will be" 1460 " ignored by cluster verify")) 1461 1462 ROMAN_OPT = cli_option("--roman", 1463 dest="roman_integers", default=False, 1464 action="store_true", 1465 help="Use roman numbers for positive integers") 1466 1467 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper", 1468 action="store", default=None, 1469 help="Specifies usermode helper for DRBD") 1470 1471 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage", 1472 action="store_false", default=True, 1473 help="Disable support for DRBD") 1474 1475 PRIMARY_IP_VERSION_OPT = \ 1476 cli_option("--primary-ip-version", default=constants.IP4_VERSION, 1477 action="store", dest="primary_ip_version", 1478 metavar="%d|%d" % (constants.IP4_VERSION, 1479 constants.IP6_VERSION), 1480 help="Cluster-wide IP version for primary IP") 1481 1482 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False, 1483 action="store_true", 1484 help="Show machine name for every line in output") 1485 1486 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False, 1487 action="store_true", 1488 help=("Hide successful results and show failures" 1489 " only (determined by the exit code)")) 1490 1491 REASON_OPT = cli_option("--reason", default=None, 1492 help="The reason for executing the command")
1493 1494 1495 -def _PriorityOptionCb(option, _, value, parser):
1496 """Callback for processing C{--priority} option. 1497 1498 """ 1499 value = _PRIONAME_TO_VALUE[value] 1500 1501 setattr(parser.values, option.dest, value)
1502 1503 1504 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority", 1505 metavar="|".join(name for name, _ in _PRIORITY_NAMES), 1506 choices=_PRIONAME_TO_VALUE.keys(), 1507 action="callback", type="choice", 1508 callback=_PriorityOptionCb, 1509 help="Priority for opcode processing") 1510 1511 HID_OS_OPT = cli_option("--hidden", dest="hidden", 1512 type="bool", default=None, metavar=_YORNO, 1513 help="Sets the hidden flag on the OS") 1514 1515 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted", 1516 type="bool", default=None, metavar=_YORNO, 1517 help="Sets the blacklisted flag on the OS") 1518 1519 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None, 1520 type="bool", metavar=_YORNO, 1521 dest="prealloc_wipe_disks", 1522 help=("Wipe disks prior to instance" 1523 " creation")) 1524 1525 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams", 1526 type="keyval", default=None, 1527 help="Node parameters") 1528 1529 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy", 1530 action="store", metavar="POLICY", default=None, 1531 help="Allocation policy for the node group") 1532 1533 NODE_POWERED_OPT = cli_option("--node-powered", default=None, 1534 type="bool", metavar=_YORNO, 1535 dest="node_powered", 1536 help="Specify if the SoR for node is powered") 1537 1538 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int", 1539 default=constants.OOB_TIMEOUT, 1540 help="Maximum time to wait for out-of-band helper") 1541 1542 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float", 1543 default=constants.OOB_POWER_DELAY, 1544 help="Time in seconds to wait between power-ons") 1545 1546 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter", 1547 action="store_true", default=False, 1548 help=("Whether command argument should be treated" 1549 " as filter")) 1550 1551 NO_REMEMBER_OPT = cli_option("--no-remember", 1552 dest="no_remember", 1553 action="store_true", default=False, 1554 help="Perform but do not record the change" 1555 " in the configuration") 1556 1557 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only", 1558 default=False, action="store_true", 1559 help="Evacuate primary instances only") 1560 1561 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only", 1562 default=False, action="store_true", 1563 help="Evacuate secondary instances only" 1564 " (applies only to internally mirrored" 1565 " disk templates, e.g. %s)" % 1566 utils.CommaJoin(constants.DTS_INT_MIRROR)) 1567 1568 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused", 1569 action="store_true", default=False, 1570 help="Pause instance at startup") 1571 1572 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>", 1573 help="Destination node group (name or uuid)", 1574 default=None, action="append", 1575 completion_suggest=OPT_COMPL_ONE_NODEGROUP) 1576 1577 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[], 1578 action="append", dest="ignore_errors", 1579 choices=list(constants.CV_ALL_ECODES_STRINGS), 1580 help="Error code to be ignored") 1581 1582 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state", 1583 action="append", 1584 help=("Specify disk state information in the" 1585 " format" 1586 " storage_type/identifier:option=value,...;" 1587 " note this is unused for now"), 1588 type="identkeyval") 1589 1590 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state", 1591 action="append", 1592 help=("Specify hypervisor state information in the" 1593 " format hypervisor:option=value,...;" 1594 " note this is unused for now"), 1595 type="identkeyval") 1596 1597 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy", 1598 action="store_true", default=False, 1599 help="Ignore instance policy violations") 1600 1601 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem", 1602 help="Sets the instance's runtime memory," 1603 " ballooning it up or down to the new value", 1604 default=None, type="unit", metavar="<size>") 1605 1606 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute", 1607 action="store_true", default=False, 1608 help="Marks the grow as absolute instead of the" 1609 " (default) relative mode") 1610 1611 NETWORK_OPT = cli_option("--network", 1612 action="store", default=None, dest="network", 1613 help="IP network in CIDR notation") 1614 1615 GATEWAY_OPT = cli_option("--gateway", 1616 action="store", default=None, dest="gateway", 1617 help="IP address of the router (gateway)") 1618 1619 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips", 1620 action="store", default=None, 1621 dest="add_reserved_ips", 1622 help="Comma-separated list of" 1623 " reserved IPs to add") 1624 1625 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips", 1626 action="store", default=None, 1627 dest="remove_reserved_ips", 1628 help="Comma-delimited list of" 1629 " reserved IPs to remove") 1630 1631 NETWORK6_OPT = cli_option("--network6", 1632 action="store", default=None, dest="network6", 1633 help="IP network in CIDR notation") 1634 1635 GATEWAY6_OPT = cli_option("--gateway6", 1636 action="store", default=None, dest="gateway6", 1637 help="IP6 address of the router (gateway)") 1638 1639 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check", 1640 dest="conflicts_check", 1641 default=True, 1642 action="store_false", 1643 help="Don't check for conflicting IPs") 1644 1645 INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults", 1646 default=False, action="store_true", 1647 help="Include default values") 1648 1649 #: Options provided by all commands 1650 COMMON_OPTS = [DEBUG_OPT, REASON_OPT] 1651 1652 # options related to asynchronous job handling 1653 1654 SUBMIT_OPTS = [ 1655 SUBMIT_OPT, 1656 PRINT_JOBID_OPT, 1657 ] 1658 1659 # common options for creating instances. add and import then add their own 1660 # specific ones. 1661 COMMON_CREATE_OPTS = [ 1662 BACKEND_OPT, 1663 DISK_OPT, 1664 DISK_TEMPLATE_OPT, 1665 FILESTORE_DIR_OPT, 1666 FILESTORE_DRIVER_OPT, 1667 HYPERVISOR_OPT, 1668 IALLOCATOR_OPT, 1669 NET_OPT, 1670 NODE_PLACEMENT_OPT, 1671 NOIPCHECK_OPT, 1672 NOCONFLICTSCHECK_OPT, 1673 NONAMECHECK_OPT, 1674 NONICS_OPT, 1675 NWSYNC_OPT, 1676 OSPARAMS_OPT, 1677 OS_SIZE_OPT, 1678 SUBMIT_OPT, 1679 PRINT_JOBID_OPT, 1680 TAG_ADD_OPT, 1681 DRY_RUN_OPT, 1682 PRIORITY_OPT, 1683 ] 1684 1685 # common instance policy options 1686 INSTANCE_POLICY_OPTS = [ 1687 IPOLICY_BOUNDS_SPECS_OPT, 1688 IPOLICY_DISK_TEMPLATES, 1689 IPOLICY_VCPU_RATIO, 1690 IPOLICY_SPINDLE_RATIO, 1691 ] 1692 1693 # instance policy split specs options 1694 SPLIT_ISPECS_OPTS = [ 1695 SPECS_CPU_COUNT_OPT, 1696 SPECS_DISK_COUNT_OPT, 1697 SPECS_DISK_SIZE_OPT, 1698 SPECS_MEM_SIZE_OPT, 1699 SPECS_NIC_COUNT_OPT, 1700 ]
1701 1702 1703 -class _ShowUsage(Exception):
1704 """Exception class for L{_ParseArgs}. 1705 1706 """
1707 - def __init__(self, exit_error):
1708 """Initializes instances of this class. 1709 1710 @type exit_error: bool 1711 @param exit_error: Whether to report failure on exit 1712 1713 """ 1714 Exception.__init__(self) 1715 self.exit_error = exit_error
1716
1717 1718 -class _ShowVersion(Exception):
1719 """Exception class for L{_ParseArgs}. 1720 1721 """
1722
1723 1724 -def _ParseArgs(binary, argv, commands, aliases, env_override):
1725 """Parser for the command line arguments. 1726 1727 This function parses the arguments and returns the function which 1728 must be executed together with its (modified) arguments. 1729 1730 @param binary: Script name 1731 @param argv: Command line arguments 1732 @param commands: Dictionary containing command definitions 1733 @param aliases: dictionary with command aliases {"alias": "target", ...} 1734 @param env_override: list of env variables allowed for default args 1735 @raise _ShowUsage: If usage description should be shown 1736 @raise _ShowVersion: If version should be shown 1737 1738 """ 1739 assert not (env_override - set(commands)) 1740 assert not (set(aliases.keys()) & set(commands.keys())) 1741 1742 if len(argv) > 1: 1743 cmd = argv[1] 1744 else: 1745 # No option or command given 1746 raise _ShowUsage(exit_error=True) 1747 1748 if cmd == "--version": 1749 raise _ShowVersion() 1750 elif cmd == "--help": 1751 raise _ShowUsage(exit_error=False) 1752 elif not (cmd in commands or cmd in aliases): 1753 raise _ShowUsage(exit_error=True) 1754 1755 # get command, unalias it, and look it up in commands 1756 if cmd in aliases: 1757 if aliases[cmd] not in commands: 1758 raise errors.ProgrammerError("Alias '%s' maps to non-existing" 1759 " command '%s'" % (cmd, aliases[cmd])) 1760 1761 cmd = aliases[cmd] 1762 1763 if cmd in env_override: 1764 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper() 1765 env_args = os.environ.get(args_env_name) 1766 if env_args: 1767 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args)) 1768 1769 func, args_def, parser_opts, usage, description = commands[cmd] 1770 parser = OptionParser(option_list=parser_opts + COMMON_OPTS, 1771 description=description, 1772 formatter=TitledHelpFormatter(), 1773 usage="%%prog %s %s" % (cmd, usage)) 1774 parser.disable_interspersed_args() 1775 options, args = parser.parse_args(args=argv[2:]) 1776 1777 if not _CheckArguments(cmd, args_def, args): 1778 return None, None, None 1779 1780 return func, options, args
1781
1782 1783 -def _FormatUsage(binary, commands):
1784 """Generates a nice description of all commands. 1785 1786 @param binary: Script name 1787 @param commands: Dictionary containing command definitions 1788 1789 """ 1790 # compute the max line length for cmd + usage 1791 mlen = min(60, max(map(len, commands))) 1792 1793 yield "Usage: %s {command} [options...] [argument...]" % binary 1794 yield "%s <command> --help to see details, or man %s" % (binary, binary) 1795 yield "" 1796 yield "Commands:" 1797 1798 # and format a nice command list 1799 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()): 1800 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen) 1801 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0)) 1802 for line in help_lines: 1803 yield " %-*s %s" % (mlen, "", line) 1804 1805 yield ""
1806
1807 1808 -def _CheckArguments(cmd, args_def, args):
1809 """Verifies the arguments using the argument definition. 1810 1811 Algorithm: 1812 1813 1. Abort with error if values specified by user but none expected. 1814 1815 1. For each argument in definition 1816 1817 1. Keep running count of minimum number of values (min_count) 1818 1. Keep running count of maximum number of values (max_count) 1819 1. If it has an unlimited number of values 1820 1821 1. Abort with error if it's not the last argument in the definition 1822 1823 1. If last argument has limited number of values 1824 1825 1. Abort with error if number of values doesn't match or is too large 1826 1827 1. Abort with error if user didn't pass enough values (min_count) 1828 1829 """ 1830 if args and not args_def: 1831 ToStderr("Error: Command %s expects no arguments", cmd) 1832 return False 1833 1834 min_count = None 1835 max_count = None 1836 check_max = None 1837 1838 last_idx = len(args_def) - 1 1839 1840 for idx, arg in enumerate(args_def): 1841 if min_count is None: 1842 min_count = arg.min 1843 elif arg.min is not None: 1844 min_count += arg.min 1845 1846 if max_count is None: 1847 max_count = arg.max 1848 elif arg.max is not None: 1849 max_count += arg.max 1850 1851 if idx == last_idx: 1852 check_max = (arg.max is not None) 1853 1854 elif arg.max is None: 1855 raise errors.ProgrammerError("Only the last argument can have max=None") 1856 1857 if check_max: 1858 # Command with exact number of arguments 1859 if (min_count is not None and max_count is not None and 1860 min_count == max_count and len(args) != min_count): 1861 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count) 1862 return False 1863 1864 # Command with limited number of arguments 1865 if max_count is not None and len(args) > max_count: 1866 ToStderr("Error: Command %s expects only %d argument(s)", 1867 cmd, max_count) 1868 return False 1869 1870 # Command with some required arguments 1871 if min_count is not None and len(args) < min_count: 1872 ToStderr("Error: Command %s expects at least %d argument(s)", 1873 cmd, min_count) 1874 return False 1875 1876 return True
1877
1878 1879 -def SplitNodeOption(value):
1880 """Splits the value of a --node option. 1881 1882 """ 1883 if value and ":" in value: 1884 return value.split(":", 1) 1885 else: 1886 return (value, None)
1887
1888 1889 -def CalculateOSNames(os_name, os_variants):
1890 """Calculates all the names an OS can be called, according to its variants. 1891 1892 @type os_name: string 1893 @param os_name: base name of the os 1894 @type os_variants: list or None 1895 @param os_variants: list of supported variants 1896 @rtype: list 1897 @return: list of valid names 1898 1899 """ 1900 if os_variants: 1901 return ["%s+%s" % (os_name, v) for v in os_variants] 1902 else: 1903 return [os_name]
1904
1905 1906 -def ParseFields(selected, default):
1907 """Parses the values of "--field"-like options. 1908 1909 @type selected: string or None 1910 @param selected: User-selected options 1911 @type default: list 1912 @param default: Default fields 1913 1914 """ 1915 if selected is None: 1916 return default 1917 1918 if selected.startswith("+"): 1919 return default + selected[1:].split(",") 1920 1921 return selected.split(",")
1922 1923 1924 UsesRPC = rpc.RunWithRPC
1925 1926 1927 -def AskUser(text, choices=None):
1928 """Ask the user a question. 1929 1930 @param text: the question to ask 1931 1932 @param choices: list with elements tuples (input_char, return_value, 1933 description); if not given, it will default to: [('y', True, 1934 'Perform the operation'), ('n', False, 'Do no do the operation')]; 1935 note that the '?' char is reserved for help 1936 1937 @return: one of the return values from the choices list; if input is 1938 not possible (i.e. not running with a tty, we return the last 1939 entry from the list 1940 1941 """ 1942 if choices is None: 1943 choices = [("y", True, "Perform the operation"), 1944 ("n", False, "Do not perform the operation")] 1945 if not choices or not isinstance(choices, list): 1946 raise errors.ProgrammerError("Invalid choices argument to AskUser") 1947 for entry in choices: 1948 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?": 1949 raise errors.ProgrammerError("Invalid choices element to AskUser") 1950 1951 answer = choices[-1][1] 1952 new_text = [] 1953 for line in text.splitlines(): 1954 new_text.append(textwrap.fill(line, 70, replace_whitespace=False)) 1955 text = "\n".join(new_text) 1956 try: 1957 f = file("/dev/tty", "a+") 1958 except IOError: 1959 return answer 1960 try: 1961 chars = [entry[0] for entry in choices] 1962 chars[-1] = "[%s]" % chars[-1] 1963 chars.append("?") 1964 maps = dict([(entry[0], entry[1]) for entry in choices]) 1965 while True: 1966 f.write(text) 1967 f.write("\n") 1968 f.write("/".join(chars)) 1969 f.write(": ") 1970 line = f.readline(2).strip().lower() 1971 if line in maps: 1972 answer = maps[line] 1973 break 1974 elif line == "?": 1975 for entry in choices: 1976 f.write(" %s - %s\n" % (entry[0], entry[2])) 1977 f.write("\n") 1978 continue 1979 finally: 1980 f.close() 1981 return answer
1982
1983 1984 -class JobSubmittedException(Exception):
1985 """Job was submitted, client should exit. 1986 1987 This exception has one argument, the ID of the job that was 1988 submitted. The handler should print this ID. 1989 1990 This is not an error, just a structured way to exit from clients. 1991 1992 """
1993
1994 1995 -def SendJob(ops, cl=None):
1996 """Function to submit an opcode without waiting for the results. 1997 1998 @type ops: list 1999 @param ops: list of opcodes 2000 @type cl: luxi.Client 2001 @param cl: the luxi client to use for communicating with the master; 2002 if None, a new client will be created 2003 2004 """ 2005 if cl is None: 2006 cl = GetClient() 2007 2008 job_id = cl.SubmitJob(ops) 2009 2010 return job_id
2011
2012 2013 -def GenericPollJob(job_id, cbs, report_cbs):
2014 """Generic job-polling function. 2015 2016 @type job_id: number 2017 @param job_id: Job ID 2018 @type cbs: Instance of L{JobPollCbBase} 2019 @param cbs: Data callbacks 2020 @type report_cbs: Instance of L{JobPollReportCbBase} 2021 @param report_cbs: Reporting callbacks 2022 2023 """ 2024 prev_job_info = None 2025 prev_logmsg_serial = None 2026 2027 status = None 2028 2029 while True: 2030 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info, 2031 prev_logmsg_serial) 2032 if not result: 2033 # job not found, go away! 2034 raise errors.JobLost("Job with id %s lost" % job_id) 2035 2036 if result == constants.JOB_NOTCHANGED: 2037 report_cbs.ReportNotChanged(job_id, status) 2038 2039 # Wait again 2040 continue 2041 2042 # Split result, a tuple of (field values, log entries) 2043 (job_info, log_entries) = result 2044 (status, ) = job_info 2045 2046 if log_entries: 2047 for log_entry in log_entries: 2048 (serial, timestamp, log_type, message) = log_entry 2049 report_cbs.ReportLogMessage(job_id, serial, timestamp, 2050 log_type, message) 2051 prev_logmsg_serial = max(prev_logmsg_serial, serial) 2052 2053 # TODO: Handle canceled and archived jobs 2054 elif status in (constants.JOB_STATUS_SUCCESS, 2055 constants.JOB_STATUS_ERROR, 2056 constants.JOB_STATUS_CANCELING, 2057 constants.JOB_STATUS_CANCELED): 2058 break 2059 2060 prev_job_info = job_info 2061 2062 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"]) 2063 if not jobs: 2064 raise errors.JobLost("Job with id %s lost" % job_id) 2065 2066 status, opstatus, result = jobs[0] 2067 2068 if status == constants.JOB_STATUS_SUCCESS: 2069 return result 2070 2071 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED): 2072 raise errors.OpExecError("Job was canceled") 2073 2074 has_ok = False 2075 for idx, (status, msg) in enumerate(zip(opstatus, result)): 2076 if status == constants.OP_STATUS_SUCCESS: 2077 has_ok = True 2078 elif status == constants.OP_STATUS_ERROR: 2079 errors.MaybeRaise(msg) 2080 2081 if has_ok: 2082 raise errors.OpExecError("partial failure (opcode %d): %s" % 2083 (idx, msg)) 2084 2085 raise errors.OpExecError(str(msg)) 2086 2087 # default failure mode 2088 raise errors.OpExecError(result)
2089
2090 2091 -class JobPollCbBase:
2092 """Base class for L{GenericPollJob} callbacks. 2093 2094 """
2095 - def __init__(self):
2096 """Initializes this class. 2097 2098 """
2099
2100 - def WaitForJobChangeOnce(self, job_id, fields, 2101 prev_job_info, prev_log_serial):
2102 """Waits for changes on a job. 2103 2104 """ 2105 raise NotImplementedError()
2106
2107 - def QueryJobs(self, job_ids, fields):
2108 """Returns the selected fields for the selected job IDs. 2109 2110 @type job_ids: list of numbers 2111 @param job_ids: Job IDs 2112 @type fields: list of strings 2113 @param fields: Fields 2114 2115 """ 2116 raise NotImplementedError()
2117
2118 2119 -class JobPollReportCbBase:
2120 """Base class for L{GenericPollJob} reporting callbacks. 2121 2122 """
2123 - def __init__(self):
2124 """Initializes this class. 2125 2126 """
2127
2128 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2129 """Handles a log message. 2130 2131 """ 2132 raise NotImplementedError()
2133
2134 - def ReportNotChanged(self, job_id, status):
2135 """Called for if a job hasn't changed in a while. 2136 2137 @type job_id: number 2138 @param job_id: Job ID 2139 @type status: string or None 2140 @param status: Job status if available 2141 2142 """ 2143 raise NotImplementedError()
2144
2145 2146 -class _LuxiJobPollCb(JobPollCbBase):
2147 - def __init__(self, cl):
2148 """Initializes this class. 2149 2150 """ 2151 JobPollCbBase.__init__(self) 2152 self.cl = cl
2153
2154 - def WaitForJobChangeOnce(self, job_id, fields, 2155 prev_job_info, prev_log_serial):
2156 """Waits for changes on a job. 2157 2158 """ 2159 return self.cl.WaitForJobChangeOnce(job_id, fields, 2160 prev_job_info, prev_log_serial)
2161
2162 - def QueryJobs(self, job_ids, fields):
2163 """Returns the selected fields for the selected job IDs. 2164 2165 """ 2166 return self.cl.QueryJobs(job_ids, fields)
2167
2168 2169 -class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2170 - def __init__(self, feedback_fn):
2171 """Initializes this class. 2172 2173 """ 2174 JobPollReportCbBase.__init__(self) 2175 2176 self.feedback_fn = feedback_fn 2177 2178 assert callable(feedback_fn)
2179
2180 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2181 """Handles a log message. 2182 2183 """ 2184 self.feedback_fn((timestamp, log_type, log_msg))
2185
2186 - def ReportNotChanged(self, job_id, status):
2187 """Called if a job hasn't changed in a while. 2188 2189 """
2190 # Ignore
2191 2192 2193 -class StdioJobPollReportCb(JobPollReportCbBase):
2194 - def __init__(self):
2195 """Initializes this class. 2196 2197 """ 2198 JobPollReportCbBase.__init__(self) 2199 2200 self.notified_queued = False 2201 self.notified_waitlock = False
2202
2203 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2204 """Handles a log message. 2205 2206 """ 2207 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)), 2208 FormatLogMessage(log_type, log_msg))
2209
2210 - def ReportNotChanged(self, job_id, status):
2211 """Called if a job hasn't changed in a while. 2212 2213 """ 2214 if status is None: 2215 return 2216 2217 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued: 2218 ToStderr("Job %s is waiting in queue", job_id) 2219 self.notified_queued = True 2220 2221 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock: 2222 ToStderr("Job %s is trying to acquire all necessary locks", job_id) 2223 self.notified_waitlock = True
2224
2225 2226 -def FormatLogMessage(log_type, log_msg):
2227 """Formats a job message according to its type. 2228 2229 """ 2230 if log_type != constants.ELOG_MESSAGE: 2231 log_msg = str(log_msg) 2232 2233 return utils.SafeEncode(log_msg)
2234
2235 2236 -def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2237 """Function to poll for the result of a job. 2238 2239 @type job_id: job identified 2240 @param job_id: the job to poll for results 2241 @type cl: luxi.Client 2242 @param cl: the luxi client to use for communicating with the master; 2243 if None, a new client will be created 2244 2245 """ 2246 if cl is None: 2247 cl = GetClient() 2248 2249 if reporter is None: 2250 if feedback_fn: 2251 reporter = FeedbackFnJobPollReportCb(feedback_fn) 2252 else: 2253 reporter = StdioJobPollReportCb() 2254 elif feedback_fn: 2255 raise errors.ProgrammerError("Can't specify reporter and feedback function") 2256 2257 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2258
2259 2260 -def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2261 """Legacy function to submit an opcode. 2262 2263 This is just a simple wrapper over the construction of the processor 2264 instance. It should be extended to better handle feedback and 2265 interaction functions. 2266 2267 """ 2268 if cl is None: 2269 cl = GetClient() 2270 2271 SetGenericOpcodeOpts([op], opts) 2272 2273 job_id = SendJob([op], cl=cl) 2274 if hasattr(opts, "print_jobid") and opts.print_jobid: 2275 ToStdout("%d" % job_id) 2276 2277 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn, 2278 reporter=reporter) 2279 2280 return op_results[0]
2281
2282 2283 -def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2284 """Wrapper around SubmitOpCode or SendJob. 2285 2286 This function will decide, based on the 'opts' parameter, whether to 2287 submit and wait for the result of the opcode (and return it), or 2288 whether to just send the job and print its identifier. It is used in 2289 order to simplify the implementation of the '--submit' option. 2290 2291 It will also process the opcodes if we're sending the via SendJob 2292 (otherwise SubmitOpCode does it). 2293 2294 """ 2295 if opts and opts.submit_only: 2296 job = [op] 2297 SetGenericOpcodeOpts(job, opts) 2298 job_id = SendJob(job, cl=cl) 2299 if opts.print_jobid: 2300 ToStdout("%d" % job_id) 2301 raise JobSubmittedException(job_id) 2302 else: 2303 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2304
2305 2306 -def _InitReasonTrail(op, opts):
2307 """Builds the first part of the reason trail 2308 2309 Builds the initial part of the reason trail, adding the user provided reason 2310 (if it exists) and the name of the command starting the operation. 2311 2312 @param op: the opcode the reason trail will be added to 2313 @param opts: the command line options selected by the user 2314 2315 """ 2316 assert len(sys.argv) >= 2 2317 trail = [] 2318 2319 if opts.reason: 2320 trail.append((constants.OPCODE_REASON_SRC_USER, 2321 opts.reason, 2322 utils.EpochNano())) 2323 2324 binary = os.path.basename(sys.argv[0]) 2325 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary) 2326 command = sys.argv[1] 2327 trail.append((source, command, utils.EpochNano())) 2328 op.reason = trail
2329
2330 2331 -def SetGenericOpcodeOpts(opcode_list, options):
2332 """Processor for generic options. 2333 2334 This function updates the given opcodes based on generic command 2335 line options (like debug, dry-run, etc.). 2336 2337 @param opcode_list: list of opcodes 2338 @param options: command line options or None 2339 @return: None (in-place modification) 2340 2341 """ 2342 if not options: 2343 return 2344 for op in opcode_list: 2345 op.debug_level = options.debug 2346 if hasattr(options, "dry_run"): 2347 op.dry_run = options.dry_run 2348 if getattr(options, "priority", None) is not None: 2349 op.priority = options.priority 2350 _InitReasonTrail(op, options)
2351
2352 2353 -def GetClient(query=False):
2354 """Connects to the a luxi socket and returns a client. 2355 2356 @type query: boolean 2357 @param query: this signifies that the client will only be 2358 used for queries; if the build-time parameter 2359 enable-split-queries is enabled, then the client will be 2360 connected to the query socket instead of the masterd socket 2361 2362 """ 2363 override_socket = os.getenv(constants.LUXI_OVERRIDE, "") 2364 if override_socket: 2365 if override_socket == constants.LUXI_OVERRIDE_MASTER: 2366 address = pathutils.MASTER_SOCKET 2367 elif override_socket == constants.LUXI_OVERRIDE_QUERY: 2368 address = pathutils.QUERY_SOCKET 2369 else: 2370 address = override_socket 2371 elif query and constants.ENABLE_SPLIT_QUERY: 2372 address = pathutils.QUERY_SOCKET 2373 else: 2374 address = None 2375 # TODO: Cache object? 2376 try: 2377 client = luxi.Client(address=address) 2378 except luxi.NoMasterError: 2379 ss = ssconf.SimpleStore() 2380 2381 # Try to read ssconf file 2382 try: 2383 ss.GetMasterNode() 2384 except errors.ConfigurationError: 2385 raise errors.OpPrereqError("Cluster not initialized or this machine is" 2386 " not part of a cluster", 2387 errors.ECODE_INVAL) 2388 2389 master, myself = ssconf.GetMasterAndMyself(ss=ss) 2390 if master != myself: 2391 raise errors.OpPrereqError("This is not the master node, please connect" 2392 " to node '%s' and rerun the command" % 2393 master, errors.ECODE_INVAL) 2394 raise 2395 return client
2396
2397 2398 -def FormatError(err):
2399 """Return a formatted error message for a given error. 2400 2401 This function takes an exception instance and returns a tuple 2402 consisting of two values: first, the recommended exit code, and 2403 second, a string describing the error message (not 2404 newline-terminated). 2405 2406 """ 2407 retcode = 1 2408 obuf = StringIO() 2409 msg = str(err) 2410 if isinstance(err, errors.ConfigurationError): 2411 txt = "Corrupt configuration file: %s" % msg 2412 logging.error(txt) 2413 obuf.write(txt + "\n") 2414 obuf.write("Aborting.") 2415 retcode = 2 2416 elif isinstance(err, errors.HooksAbort): 2417 obuf.write("Failure: hooks execution failed:\n") 2418 for node, script, out in err.args[0]: 2419 if out: 2420 obuf.write(" node: %s, script: %s, output: %s\n" % 2421 (node, script, out)) 2422 else: 2423 obuf.write(" node: %s, script: %s (no output)\n" % 2424 (node, script)) 2425 elif isinstance(err, errors.HooksFailure): 2426 obuf.write("Failure: hooks general failure: %s" % msg) 2427 elif isinstance(err, errors.ResolverError): 2428 this_host = netutils.Hostname.GetSysName() 2429 if err.args[0] == this_host: 2430 msg = "Failure: can't resolve my own hostname ('%s')" 2431 else: 2432 msg = "Failure: can't resolve hostname '%s'" 2433 obuf.write(msg % err.args[0]) 2434 elif isinstance(err, errors.OpPrereqError): 2435 if len(err.args) == 2: 2436 obuf.write("Failure: prerequisites not met for this" 2437 " operation:\nerror type: %s, error details:\n%s" % 2438 (err.args[1], err.args[0])) 2439 else: 2440 obuf.write("Failure: prerequisites not met for this" 2441 " operation:\n%s" % msg) 2442 elif isinstance(err, errors.OpExecError): 2443 obuf.write("Failure: command execution error:\n%s" % msg) 2444 elif isinstance(err, errors.TagError): 2445 obuf.write("Failure: invalid tag(s) given:\n%s" % msg) 2446 elif isinstance(err, errors.JobQueueDrainError): 2447 obuf.write("Failure: the job queue is marked for drain and doesn't" 2448 " accept new requests\n") 2449 elif isinstance(err, errors.JobQueueFull): 2450 obuf.write("Failure: the job queue is full and doesn't accept new" 2451 " job submissions until old jobs are archived\n") 2452 elif isinstance(err, errors.TypeEnforcementError): 2453 obuf.write("Parameter Error: %s" % msg) 2454 elif isinstance(err, errors.ParameterError): 2455 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg) 2456 elif isinstance(err, luxi.NoMasterError): 2457 if err.args[0] == pathutils.MASTER_SOCKET: 2458 daemon = "the master daemon" 2459 elif err.args[0] == pathutils.QUERY_SOCKET: 2460 daemon = "the config daemon" 2461 else: 2462 daemon = "socket '%s'" % str(err.args[0]) 2463 obuf.write("Cannot communicate with %s.\nIs the process running" 2464 " and listening for connections?" % daemon) 2465 elif isinstance(err, luxi.TimeoutError): 2466 obuf.write("Timeout while talking to the master daemon. Jobs might have" 2467 " been submitted and will continue to run even if the call" 2468 " timed out. Useful commands in this situation are \"gnt-job" 2469 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n") 2470 obuf.write(msg) 2471 elif isinstance(err, luxi.PermissionError): 2472 obuf.write("It seems you don't have permissions to connect to the" 2473 " master daemon.\nPlease retry as a different user.") 2474 elif isinstance(err, luxi.ProtocolError): 2475 obuf.write("Unhandled protocol error while talking to the master daemon:\n" 2476 "%s" % msg) 2477 elif isinstance(err, errors.JobLost): 2478 obuf.write("Error checking job status: %s" % msg) 2479 elif isinstance(err, errors.QueryFilterParseError): 2480 obuf.write("Error while parsing query filter: %s\n" % err.args[0]) 2481 obuf.write("\n".join(err.GetDetails())) 2482 elif isinstance(err, errors.GenericError): 2483 obuf.write("Unhandled Ganeti error: %s" % msg) 2484 elif isinstance(err, JobSubmittedException): 2485 obuf.write("JobID: %s\n" % err.args[0]) 2486 retcode = 0 2487 else: 2488 obuf.write("Unhandled exception: %s" % msg) 2489 return retcode, obuf.getvalue().rstrip("\n")
2490
2491 2492 -def GenericMain(commands, override=None, aliases=None, 2493 env_override=frozenset()):
2494 """Generic main function for all the gnt-* commands. 2495 2496 @param commands: a dictionary with a special structure, see the design doc 2497 for command line handling. 2498 @param override: if not None, we expect a dictionary with keys that will 2499 override command line options; this can be used to pass 2500 options from the scripts to generic functions 2501 @param aliases: dictionary with command aliases {'alias': 'target, ...} 2502 @param env_override: list of environment names which are allowed to submit 2503 default args for commands 2504 2505 """ 2506 # save the program name and the entire command line for later logging 2507 if sys.argv: 2508 binary = os.path.basename(sys.argv[0]) 2509 if not binary: 2510 binary = sys.argv[0] 2511 2512 if len(sys.argv) >= 2: 2513 logname = utils.ShellQuoteArgs([binary, sys.argv[1]]) 2514 else: 2515 logname = binary 2516 2517 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:]) 2518 else: 2519 binary = "<unknown program>" 2520 cmdline = "<unknown>" 2521 2522 if aliases is None: 2523 aliases = {} 2524 2525 try: 2526 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases, 2527 env_override) 2528 except _ShowVersion: 2529 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION, 2530 constants.RELEASE_VERSION) 2531 return constants.EXIT_SUCCESS 2532 except _ShowUsage, err: 2533 for line in _FormatUsage(binary, commands): 2534 ToStdout(line) 2535 2536 if err.exit_error: 2537 return constants.EXIT_FAILURE 2538 else: 2539 return constants.EXIT_SUCCESS 2540 except errors.ParameterError, err: 2541 result, err_msg = FormatError(err) 2542 ToStderr(err_msg) 2543 return 1 2544 2545 if func is None: # parse error 2546 return 1 2547 2548 if override is not None: 2549 for key, val in override.iteritems(): 2550 setattr(options, key, val) 2551 2552 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug, 2553 stderr_logging=True) 2554 2555 logging.info("Command line: %s", cmdline) 2556 2557 try: 2558 result = func(options, args) 2559 except (errors.GenericError, luxi.ProtocolError, 2560 JobSubmittedException), err: 2561 result, err_msg = FormatError(err) 2562 logging.exception("Error during command processing") 2563 ToStderr(err_msg) 2564 except KeyboardInterrupt: 2565 result = constants.EXIT_FAILURE 2566 ToStderr("Aborted. Note that if the operation created any jobs, they" 2567 " might have been submitted and" 2568 " will continue to run in the background.") 2569 except IOError, err: 2570 if err.errno == errno.EPIPE: 2571 # our terminal went away, we'll exit 2572 sys.exit(constants.EXIT_FAILURE) 2573 else: 2574 raise 2575 2576 return result
2577
2578 2579 -def ParseNicOption(optvalue):
2580 """Parses the value of the --net option(s). 2581 2582 """ 2583 try: 2584 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue) 2585 except (TypeError, ValueError), err: 2586 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err), 2587 errors.ECODE_INVAL) 2588 2589 nics = [{}] * nic_max 2590 for nidx, ndict in optvalue: 2591 nidx = int(nidx) 2592 2593 if not isinstance(ndict, dict): 2594 raise errors.OpPrereqError("Invalid nic/%d value: expected dict," 2595 " got %s" % (nidx, ndict), errors.ECODE_INVAL) 2596 2597 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES) 2598 2599 nics[nidx] = ndict 2600 2601 return nics
2602
2603 2604 -def FixHvParams(hvparams):
2605 # In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from 2606 # comma to space because commas cannot be accepted on the command line 2607 # (they already act as the separator between different hvparams). Still, 2608 # RAPI should be able to accept commas for backwards compatibility. 2609 # Therefore, we convert spaces into commas here, and we keep the old 2610 # parsing logic everywhere else. 2611 try: 2612 new_usb_devices = hvparams[constants.HV_USB_DEVICES].replace(" ", ",") 2613 hvparams[constants.HV_USB_DEVICES] = new_usb_devices 2614 except KeyError: 2615 #No usb_devices, no modification required 2616 pass
2617
2618 2619 -def GenericInstanceCreate(mode, opts, args):
2620 """Add an instance to the cluster via either creation or import. 2621 2622 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT 2623 @param opts: the command line options selected by the user 2624 @type args: list 2625 @param args: should contain only one element, the new instance name 2626 @rtype: int 2627 @return: the desired exit code 2628 2629 """ 2630 instance = args[0] 2631 2632 (pnode, snode) = SplitNodeOption(opts.node) 2633 2634 hypervisor = None 2635 hvparams = {} 2636 if opts.hypervisor: 2637 hypervisor, hvparams = opts.hypervisor 2638 2639 if opts.nics: 2640 nics = ParseNicOption(opts.nics) 2641 elif opts.no_nics: 2642 # no nics 2643 nics = [] 2644 elif mode == constants.INSTANCE_CREATE: 2645 # default of one nic, all auto 2646 nics = [{}] 2647 else: 2648 # mode == import 2649 nics = [] 2650 2651 if opts.disk_template == constants.DT_DISKLESS: 2652 if opts.disks or opts.sd_size is not None: 2653 raise errors.OpPrereqError("Diskless instance but disk" 2654 " information passed", errors.ECODE_INVAL) 2655 disks = [] 2656 else: 2657 if (not opts.disks and not opts.sd_size 2658 and mode == constants.INSTANCE_CREATE): 2659 raise errors.OpPrereqError("No disk information specified", 2660 errors.ECODE_INVAL) 2661 if opts.disks and opts.sd_size is not None: 2662 raise errors.OpPrereqError("Please use either the '--disk' or" 2663 " '-s' option", errors.ECODE_INVAL) 2664 if opts.sd_size is not None: 2665 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})] 2666 2667 if opts.disks: 2668 try: 2669 disk_max = max(int(didx[0]) + 1 for didx in opts.disks) 2670 except ValueError, err: 2671 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err), 2672 errors.ECODE_INVAL) 2673 disks = [{}] * disk_max 2674 else: 2675 disks = [] 2676 for didx, ddict in opts.disks: 2677 didx = int(didx) 2678 if not isinstance(ddict, dict): 2679 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict) 2680 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 2681 elif constants.IDISK_SIZE in ddict: 2682 if constants.IDISK_ADOPT in ddict: 2683 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed" 2684 " (disk %d)" % didx, errors.ECODE_INVAL) 2685 try: 2686 ddict[constants.IDISK_SIZE] = \ 2687 utils.ParseUnit(ddict[constants.IDISK_SIZE]) 2688 except ValueError, err: 2689 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" % 2690 (didx, err), errors.ECODE_INVAL) 2691 elif constants.IDISK_ADOPT in ddict: 2692 if constants.IDISK_SPINDLES in ddict: 2693 raise errors.OpPrereqError("spindles is not a valid option when" 2694 " adopting a disk", errors.ECODE_INVAL) 2695 if mode == constants.INSTANCE_IMPORT: 2696 raise errors.OpPrereqError("Disk adoption not allowed for instance" 2697 " import", errors.ECODE_INVAL) 2698 ddict[constants.IDISK_SIZE] = 0 2699 else: 2700 raise errors.OpPrereqError("Missing size or adoption source for" 2701 " disk %d" % didx, errors.ECODE_INVAL) 2702 disks[didx] = ddict 2703 2704 if opts.tags is not None: 2705 tags = opts.tags.split(",") 2706 else: 2707 tags = [] 2708 2709 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT) 2710 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES) 2711 FixHvParams(hvparams) 2712 2713 if mode == constants.INSTANCE_CREATE: 2714 start = opts.start 2715 os_type = opts.os 2716 force_variant = opts.force_variant 2717 src_node = None 2718 src_path = None 2719 no_install = opts.no_install 2720 identify_defaults = False 2721 elif mode == constants.INSTANCE_IMPORT: 2722 start = False 2723 os_type = None 2724 force_variant = False 2725 src_node = opts.src_node 2726 src_path = opts.src_dir 2727 no_install = None 2728 identify_defaults = opts.identify_defaults 2729 else: 2730 raise errors.ProgrammerError("Invalid creation mode %s" % mode) 2731 2732 op = opcodes.OpInstanceCreate(instance_name=instance, 2733 disks=disks, 2734 disk_template=opts.disk_template, 2735 nics=nics, 2736 conflicts_check=opts.conflicts_check, 2737 pnode=pnode, snode=snode, 2738 ip_check=opts.ip_check, 2739 name_check=opts.name_check, 2740 wait_for_sync=opts.wait_for_sync, 2741 file_storage_dir=opts.file_storage_dir, 2742 file_driver=opts.file_driver, 2743 iallocator=opts.iallocator, 2744 hypervisor=hypervisor, 2745 hvparams=hvparams, 2746 beparams=opts.beparams, 2747 osparams=opts.osparams, 2748 mode=mode, 2749 start=start, 2750 os_type=os_type, 2751 force_variant=force_variant, 2752 src_node=src_node, 2753 src_path=src_path, 2754 tags=tags, 2755 no_install=no_install, 2756 identify_defaults=identify_defaults, 2757 ignore_ipolicy=opts.ignore_ipolicy) 2758 2759 SubmitOrSend(op, opts) 2760 return 0
2761
2762 2763 -class _RunWhileClusterStoppedHelper:
2764 """Helper class for L{RunWhileClusterStopped} to simplify state management 2765 2766 """
2767 - def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2768 """Initializes this class. 2769 2770 @type feedback_fn: callable 2771 @param feedback_fn: Feedback function 2772 @type cluster_name: string 2773 @param cluster_name: Cluster name 2774 @type master_node: string 2775 @param master_node Master node name 2776 @type online_nodes: list 2777 @param online_nodes: List of names of online nodes 2778 2779 """ 2780 self.feedback_fn = feedback_fn 2781 self.cluster_name = cluster_name 2782 self.master_node = master_node 2783 self.online_nodes = online_nodes 2784 2785 self.ssh = ssh.SshRunner(self.cluster_name) 2786 2787 self.nonmaster_nodes = [name for name in online_nodes 2788 if name != master_node] 2789 2790 assert self.master_node not in self.nonmaster_nodes
2791
2792 - def _RunCmd(self, node_name, cmd):
2793 """Runs a command on the local or a remote machine. 2794 2795 @type node_name: string 2796 @param node_name: Machine name 2797 @type cmd: list 2798 @param cmd: Command 2799 2800 """ 2801 if node_name is None or node_name == self.master_node: 2802 # No need to use SSH 2803 result = utils.RunCmd(cmd) 2804 else: 2805 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER, 2806 utils.ShellQuoteArgs(cmd)) 2807 2808 if result.failed: 2809 errmsg = ["Failed to run command %s" % result.cmd] 2810 if node_name: 2811 errmsg.append("on node %s" % node_name) 2812 errmsg.append(": exitcode %s and error %s" % 2813 (result.exit_code, result.output)) 2814 raise errors.OpExecError(" ".join(errmsg))
2815
2816 - def Call(self, fn, *args):
2817 """Call function while all daemons are stopped. 2818 2819 @type fn: callable 2820 @param fn: Function to be called 2821 2822 """ 2823 # Pause watcher by acquiring an exclusive lock on watcher state file 2824 self.feedback_fn("Blocking watcher") 2825 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE) 2826 try: 2827 # TODO: Currently, this just blocks. There's no timeout. 2828 # TODO: Should it be a shared lock? 2829 watcher_block.Exclusive(blocking=True) 2830 2831 # Stop master daemons, so that no new jobs can come in and all running 2832 # ones are finished 2833 self.feedback_fn("Stopping master daemons") 2834 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"]) 2835 try: 2836 # Stop daemons on all nodes 2837 for node_name in self.online_nodes: 2838 self.feedback_fn("Stopping daemons on %s" % node_name) 2839 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"]) 2840 2841 # All daemons are shut down now 2842 try: 2843 return fn(self, *args) 2844 except Exception, err: 2845 _, errmsg = FormatError(err) 2846 logging.exception("Caught exception") 2847 self.feedback_fn(errmsg) 2848 raise 2849 finally: 2850 # Start cluster again, master node last 2851 for node_name in self.nonmaster_nodes + [self.master_node]: 2852 self.feedback_fn("Starting daemons on %s" % node_name) 2853 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"]) 2854 finally: 2855 # Resume watcher 2856 watcher_block.Close()
2857
2858 2859 -def RunWhileClusterStopped(feedback_fn, fn, *args):
2860 """Calls a function while all cluster daemons are stopped. 2861 2862 @type feedback_fn: callable 2863 @param feedback_fn: Feedback function 2864 @type fn: callable 2865 @param fn: Function to be called when daemons are stopped 2866 2867 """ 2868 feedback_fn("Gathering cluster information") 2869 2870 # This ensures we're running on the master daemon 2871 cl = GetClient() 2872 2873 (cluster_name, master_node) = \ 2874 cl.QueryConfigValues(["cluster_name", "master_node"]) 2875 2876 online_nodes = GetOnlineNodes([], cl=cl) 2877 2878 # Don't keep a reference to the client. The master daemon will go away. 2879 del cl 2880 2881 assert master_node in online_nodes 2882 2883 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node, 2884 online_nodes).Call(fn, *args)
2885
2886 2887 -def GenerateTable(headers, fields, separator, data, 2888 numfields=None, unitfields=None, 2889 units=None):
2890 """Prints a table with headers and different fields. 2891 2892 @type headers: dict 2893 @param headers: dictionary mapping field names to headers for 2894 the table 2895 @type fields: list 2896 @param fields: the field names corresponding to each row in 2897 the data field 2898 @param separator: the separator to be used; if this is None, 2899 the default 'smart' algorithm is used which computes optimal 2900 field width, otherwise just the separator is used between 2901 each field 2902 @type data: list 2903 @param data: a list of lists, each sublist being one row to be output 2904 @type numfields: list 2905 @param numfields: a list with the fields that hold numeric 2906 values and thus should be right-aligned 2907 @type unitfields: list 2908 @param unitfields: a list with the fields that hold numeric 2909 values that should be formatted with the units field 2910 @type units: string or None 2911 @param units: the units we should use for formatting, or None for 2912 automatic choice (human-readable for non-separator usage, otherwise 2913 megabytes); this is a one-letter string 2914 2915 """ 2916 if units is None: 2917 if separator: 2918 units = "m" 2919 else: 2920 units = "h" 2921 2922 if numfields is None: 2923 numfields = [] 2924 if unitfields is None: 2925 unitfields = [] 2926 2927 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142 2928 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142 2929 2930 format_fields = [] 2931 for field in fields: 2932 if headers and field not in headers: 2933 # TODO: handle better unknown fields (either revert to old 2934 # style of raising exception, or deal more intelligently with 2935 # variable fields) 2936 headers[field] = field 2937 if separator is not None: 2938 format_fields.append("%s") 2939 elif numfields.Matches(field): 2940 format_fields.append("%*s") 2941 else: 2942 format_fields.append("%-*s") 2943 2944 if separator is None: 2945 mlens = [0 for name in fields] 2946 format_str = " ".join(format_fields) 2947 else: 2948 format_str = separator.replace("%", "%%").join(format_fields) 2949 2950 for row in data: 2951 if row is None: 2952 continue 2953 for idx, val in enumerate(row): 2954 if unitfields.Matches(fields[idx]): 2955 try: 2956 val = int(val) 2957 except (TypeError, ValueError): 2958 pass 2959 else: 2960 val = row[idx] = utils.FormatUnit(val, units) 2961 val = row[idx] = str(val) 2962 if separator is None: 2963 mlens[idx] = max(mlens[idx], len(val)) 2964 2965 result = [] 2966 if headers: 2967 args = [] 2968 for idx, name in enumerate(fields): 2969 hdr = headers[name] 2970 if separator is None: 2971 mlens[idx] = max(mlens[idx], len(hdr)) 2972 args.append(mlens[idx]) 2973 args.append(hdr) 2974 result.append(format_str % tuple(args)) 2975 2976 if separator is None: 2977 assert len(mlens) == len(fields) 2978 2979 if fields and not numfields.Matches(fields[-1]): 2980 mlens[-1] = 0 2981 2982 for line in data: 2983 args = [] 2984 if line is None: 2985 line = ["-" for _ in fields] 2986 for idx in range(len(fields)): 2987 if separator is None: 2988 args.append(mlens[idx]) 2989 args.append(line[idx]) 2990 result.append(format_str % tuple(args)) 2991 2992 return result
2993
2994 2995 -def _FormatBool(value):
2996 """Formats a boolean value as a string. 2997 2998 """ 2999 if value: 3000 return "Y" 3001 return "N"
3002 3003 3004 #: Default formatting for query results; (callback, align right) 3005 _DEFAULT_FORMAT_QUERY = { 3006 constants.QFT_TEXT: (str, False), 3007 constants.QFT_BOOL: (_FormatBool, False), 3008 constants.QFT_NUMBER: (str, True), 3009 constants.QFT_TIMESTAMP: (utils.FormatTime, False), 3010 constants.QFT_OTHER: (str, False), 3011 constants.QFT_UNKNOWN: (str, False), 3012 }
3013 3014 3015 -def _GetColumnFormatter(fdef, override, unit):
3016 """Returns formatting function for a field. 3017 3018 @type fdef: L{objects.QueryFieldDefinition} 3019 @type override: dict 3020 @param override: Dictionary for overriding field formatting functions, 3021 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 3022 @type unit: string 3023 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} 3024 @rtype: tuple; (callable, bool) 3025 @return: Returns the function to format a value (takes one parameter) and a 3026 boolean for aligning the value on the right-hand side 3027 3028 """ 3029 fmt = override.get(fdef.name, None) 3030 if fmt is not None: 3031 return fmt 3032 3033 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY 3034 3035 if fdef.kind == constants.QFT_UNIT: 3036 # Can't keep this information in the static dictionary 3037 return (lambda value: utils.FormatUnit(value, unit), True) 3038 3039 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None) 3040 if fmt is not None: 3041 return fmt 3042 3043 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3044
3045 3046 -class _QueryColumnFormatter:
3047 """Callable class for formatting fields of a query. 3048 3049 """
3050 - def __init__(self, fn, status_fn, verbose):
3051 """Initializes this class. 3052 3053 @type fn: callable 3054 @param fn: Formatting function 3055 @type status_fn: callable 3056 @param status_fn: Function to report fields' status 3057 @type verbose: boolean 3058 @param verbose: whether to use verbose field descriptions or not 3059 3060 """ 3061 self._fn = fn 3062 self._status_fn = status_fn 3063 self._verbose = verbose
3064
3065 - def __call__(self, data):
3066 """Returns a field's string representation. 3067 3068 """ 3069 (status, value) = data 3070 3071 # Report status 3072 self._status_fn(status) 3073 3074 if status == constants.RS_NORMAL: 3075 return self._fn(value) 3076 3077 assert value is None, \ 3078 "Found value %r for abnormal status %s" % (value, status) 3079 3080 return FormatResultError(status, self._verbose)
3081
3082 3083 -def FormatResultError(status, verbose):
3084 """Formats result status other than L{constants.RS_NORMAL}. 3085 3086 @param status: The result status 3087 @type verbose: boolean 3088 @param verbose: Whether to return the verbose text 3089 @return: Text of result status 3090 3091 """ 3092 assert status != constants.RS_NORMAL, \ 3093 "FormatResultError called with status equal to constants.RS_NORMAL" 3094 try: 3095 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status] 3096 except KeyError: 3097 raise NotImplementedError("Unknown status %s" % status) 3098 else: 3099 if verbose: 3100 return verbose_text 3101 return normal_text
3102
3103 3104 -def FormatQueryResult(result, unit=None, format_override=None, separator=None, 3105 header=False, verbose=False):
3106 """Formats data in L{objects.QueryResponse}. 3107 3108 @type result: L{objects.QueryResponse} 3109 @param result: result of query operation 3110 @type unit: string 3111 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}, 3112 see L{utils.text.FormatUnit} 3113 @type format_override: dict 3114 @param format_override: Dictionary for overriding field formatting functions, 3115 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 3116 @type separator: string or None 3117 @param separator: String used to separate fields 3118 @type header: bool 3119 @param header: Whether to output header row 3120 @type verbose: boolean 3121 @param verbose: whether to use verbose field descriptions or not 3122 3123 """ 3124 if unit is None: 3125 if separator: 3126 unit = "m" 3127 else: 3128 unit = "h" 3129 3130 if format_override is None: 3131 format_override = {} 3132 3133 stats = dict.fromkeys(constants.RS_ALL, 0) 3134 3135 def _RecordStatus(status): 3136 if status in stats: 3137 stats[status] += 1
3138 3139 columns = [] 3140 for fdef in result.fields: 3141 assert fdef.title and fdef.name 3142 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit) 3143 columns.append(TableColumn(fdef.title, 3144 _QueryColumnFormatter(fn, _RecordStatus, 3145 verbose), 3146 align_right)) 3147 3148 table = FormatTable(result.data, columns, header, separator) 3149 3150 # Collect statistics 3151 assert len(stats) == len(constants.RS_ALL) 3152 assert compat.all(count >= 0 for count in stats.values()) 3153 3154 # Determine overall status. If there was no data, unknown fields must be 3155 # detected via the field definitions. 3156 if (stats[constants.RS_UNKNOWN] or 3157 (not result.data and _GetUnknownFields(result.fields))): 3158 status = QR_UNKNOWN 3159 elif compat.any(count > 0 for key, count in stats.items() 3160 if key != constants.RS_NORMAL): 3161 status = QR_INCOMPLETE 3162 else: 3163 status = QR_NORMAL 3164 3165 return (status, table) 3166
3167 3168 -def _GetUnknownFields(fdefs):
3169 """Returns list of unknown fields included in C{fdefs}. 3170 3171 @type fdefs: list of L{objects.QueryFieldDefinition} 3172 3173 """ 3174 return [fdef for fdef in fdefs 3175 if fdef.kind == constants.QFT_UNKNOWN]
3176
3177 3178 -def _WarnUnknownFields(fdefs):
3179 """Prints a warning to stderr if a query included unknown fields. 3180 3181 @type fdefs: list of L{objects.QueryFieldDefinition} 3182 3183 """ 3184 unknown = _GetUnknownFields(fdefs) 3185 if unknown: 3186 ToStderr("Warning: Queried for unknown fields %s", 3187 utils.CommaJoin(fdef.name for fdef in unknown)) 3188 return True 3189 3190 return False
3191
3192 3193 -def GenericList(resource, fields, names, unit, separator, header, cl=None, 3194 format_override=None, verbose=False, force_filter=False, 3195 namefield=None, qfilter=None, isnumeric=False):
3196 """Generic implementation for listing all items of a resource. 3197 3198 @param resource: One of L{constants.QR_VIA_LUXI} 3199 @type fields: list of strings 3200 @param fields: List of fields to query for 3201 @type names: list of strings 3202 @param names: Names of items to query for 3203 @type unit: string or None 3204 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or 3205 None for automatic choice (human-readable for non-separator usage, 3206 otherwise megabytes); this is a one-letter string 3207 @type separator: string or None 3208 @param separator: String used to separate fields 3209 @type header: bool 3210 @param header: Whether to show header row 3211 @type force_filter: bool 3212 @param force_filter: Whether to always treat names as filter 3213 @type format_override: dict 3214 @param format_override: Dictionary for overriding field formatting functions, 3215 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 3216 @type verbose: boolean 3217 @param verbose: whether to use verbose field descriptions or not 3218 @type namefield: string 3219 @param namefield: Name of field to use for simple filters (see 3220 L{qlang.MakeFilter} for details) 3221 @type qfilter: list or None 3222 @param qfilter: Query filter (in addition to names) 3223 @param isnumeric: bool 3224 @param isnumeric: Whether the namefield's type is numeric, and therefore 3225 any simple filters built by namefield should use integer values to 3226 reflect that 3227 3228 """ 3229 if not names: 3230 names = None 3231 3232 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield, 3233 isnumeric=isnumeric) 3234 3235 if qfilter is None: 3236 qfilter = namefilter 3237 elif namefilter is not None: 3238 qfilter = [qlang.OP_AND, namefilter, qfilter] 3239 3240 if cl is None: 3241 cl = GetClient() 3242 3243 response = cl.Query(resource, fields, qfilter) 3244 3245 found_unknown = _WarnUnknownFields(response.fields) 3246 3247 (status, data) = FormatQueryResult(response, unit=unit, separator=separator, 3248 header=header, 3249 format_override=format_override, 3250 verbose=verbose) 3251 3252 for line in data: 3253 ToStdout(line) 3254 3255 assert ((found_unknown and status == QR_UNKNOWN) or 3256 (not found_unknown and status != QR_UNKNOWN)) 3257 3258 if status == QR_UNKNOWN: 3259 return constants.EXIT_UNKNOWN_FIELD 3260 3261 # TODO: Should the list command fail if not all data could be collected? 3262 return constants.EXIT_SUCCESS
3263
3264 3265 -def _FieldDescValues(fdef):
3266 """Helper function for L{GenericListFields} to get query field description. 3267 3268 @type fdef: L{objects.QueryFieldDefinition} 3269 @rtype: list 3270 3271 """ 3272 return [ 3273 fdef.name, 3274 _QFT_NAMES.get(fdef.kind, fdef.kind), 3275 fdef.title, 3276 fdef.doc, 3277 ]
3278
3279 3280 -def GenericListFields(resource, fields, separator, header, cl=None):
3281 """Generic implementation for listing fields for a resource. 3282 3283 @param resource: One of L{constants.QR_VIA_LUXI} 3284 @type fields: list of strings 3285 @param fields: List of fields to query for 3286 @type separator: string or None 3287 @param separator: String used to separate fields 3288 @type header: bool 3289 @param header: Whether to show header row 3290 3291 """ 3292 if cl is None: 3293 cl = GetClient() 3294 3295 if not fields: 3296 fields = None 3297 3298 response = cl.QueryFields(resource, fields) 3299 3300 found_unknown = _WarnUnknownFields(response.fields) 3301 3302 columns = [ 3303 TableColumn("Name", str, False), 3304 TableColumn("Type", str, False), 3305 TableColumn("Title", str, False), 3306 TableColumn("Description", str, False), 3307 ] 3308 3309 rows = map(_FieldDescValues, response.fields) 3310 3311 for line in FormatTable(rows, columns, header, separator): 3312 ToStdout(line) 3313 3314 if found_unknown: 3315 return constants.EXIT_UNKNOWN_FIELD 3316 3317 return constants.EXIT_SUCCESS
3318
3319 3320 -class TableColumn:
3321 """Describes a column for L{FormatTable}. 3322 3323 """
3324 - def __init__(self, title, fn, align_right):
3325 """Initializes this class. 3326 3327 @type title: string 3328 @param title: Column title 3329 @type fn: callable 3330 @param fn: Formatting function 3331 @type align_right: bool 3332 @param align_right: Whether to align values on the right-hand side 3333 3334 """ 3335 self.title = title 3336 self.format = fn 3337 self.align_right = align_right
3338
3339 3340 -def _GetColFormatString(width, align_right):
3341 """Returns the format string for a field. 3342 3343 """ 3344 if align_right: 3345 sign = "" 3346 else: 3347 sign = "-" 3348 3349 return "%%%s%ss" % (sign, width)
3350
3351 3352 -def FormatTable(rows, columns, header, separator):
3353 """Formats data as a table. 3354 3355 @type rows: list of lists 3356 @param rows: Row data, one list per row 3357 @type columns: list of L{TableColumn} 3358 @param columns: Column descriptions 3359 @type header: bool 3360 @param header: Whether to show header row 3361 @type separator: string or None 3362 @param separator: String used to separate columns 3363 3364 """ 3365 if header: 3366 data = [[col.title for col in columns]] 3367 colwidth = [len(col.title) for col in columns] 3368 else: 3369 data = [] 3370 colwidth = [0 for _ in columns] 3371 3372 # Format row data 3373 for row in rows: 3374 assert len(row) == len(columns) 3375 3376 formatted = [col.format(value) for value, col in zip(row, columns)] 3377 3378 if separator is None: 3379 # Update column widths 3380 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)): 3381 # Modifying a list's items while iterating is fine 3382 colwidth[idx] = max(oldwidth, len(value)) 3383 3384 data.append(formatted) 3385 3386 if separator is not None: 3387 # Return early if a separator is used 3388 return [separator.join(row) for row in data] 3389 3390 if columns and not columns[-1].align_right: 3391 # Avoid unnecessary spaces at end of line 3392 colwidth[-1] = 0 3393 3394 # Build format string 3395 fmt = " ".join([_GetColFormatString(width, col.align_right) 3396 for col, width in zip(columns, colwidth)]) 3397 3398 return [fmt % tuple(row) for row in data]
3399
3400 3401 -def FormatTimestamp(ts):
3402 """Formats a given timestamp. 3403 3404 @type ts: timestamp 3405 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds 3406 3407 @rtype: string 3408 @return: a string with the formatted timestamp 3409 3410 """ 3411 if not isinstance(ts, (tuple, list)) or len(ts) != 2: 3412 return "?" 3413 3414 (sec, usecs) = ts 3415 return utils.FormatTime(sec, usecs=usecs)
3416
3417 3418 -def ParseTimespec(value):
3419 """Parse a time specification. 3420 3421 The following suffixed will be recognized: 3422 3423 - s: seconds 3424 - m: minutes 3425 - h: hours 3426 - d: day 3427 - w: weeks 3428 3429 Without any suffix, the value will be taken to be in seconds. 3430 3431 """ 3432 value = str(value) 3433 if not value: 3434 raise errors.OpPrereqError("Empty time specification passed", 3435 errors.ECODE_INVAL) 3436 suffix_map = { 3437 "s": 1, 3438 "m": 60, 3439 "h": 3600, 3440 "d": 86400, 3441 "w": 604800, 3442 } 3443 if value[-1] not in suffix_map: 3444 try: 3445 value = int(value) 3446 except (TypeError, ValueError): 3447 raise errors.OpPrereqError("Invalid time specification '%s'" % value, 3448 errors.ECODE_INVAL) 3449 else: 3450 multiplier = suffix_map[value[-1]] 3451 value = value[:-1] 3452 if not value: # no data left after stripping the suffix 3453 raise errors.OpPrereqError("Invalid time specification (only" 3454 " suffix passed)", errors.ECODE_INVAL) 3455 try: 3456 value = int(value) * multiplier 3457 except (TypeError, ValueError): 3458 raise errors.OpPrereqError("Invalid time specification '%s'" % value, 3459 errors.ECODE_INVAL) 3460 return value
3461
3462 3463 -def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False, 3464 filter_master=False, nodegroup=None):
3465 """Returns the names of online nodes. 3466 3467 This function will also log a warning on stderr with the names of 3468 the online nodes. 3469 3470 @param nodes: if not empty, use only this subset of nodes (minus the 3471 offline ones) 3472 @param cl: if not None, luxi client to use 3473 @type nowarn: boolean 3474 @param nowarn: by default, this function will output a note with the 3475 offline nodes that are skipped; if this parameter is True the 3476 note is not displayed 3477 @type secondary_ips: boolean 3478 @param secondary_ips: if True, return the secondary IPs instead of the 3479 names, useful for doing network traffic over the replication interface 3480 (if any) 3481 @type filter_master: boolean 3482 @param filter_master: if True, do not return the master node in the list 3483 (useful in coordination with secondary_ips where we cannot check our 3484 node name against the list) 3485 @type nodegroup: string 3486 @param nodegroup: If set, only return nodes in this node group 3487 3488 """ 3489 if cl is None: 3490 cl = GetClient() 3491 3492 qfilter = [] 3493 3494 if nodes: 3495 qfilter.append(qlang.MakeSimpleFilter("name", nodes)) 3496 3497 if nodegroup is not None: 3498 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup], 3499 [qlang.OP_EQUAL, "group.uuid", nodegroup]]) 3500 3501 if filter_master: 3502 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]]) 3503 3504 if qfilter: 3505 if len(qfilter) > 1: 3506 final_filter = [qlang.OP_AND] + qfilter 3507 else: 3508 assert len(qfilter) == 1 3509 final_filter = qfilter[0] 3510 else: 3511 final_filter = None 3512 3513 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter) 3514 3515 def _IsOffline(row): 3516 (_, (_, offline), _) = row 3517 return offline
3518 3519 def _GetName(row): 3520 ((_, name), _, _) = row 3521 return name 3522 3523 def _GetSip(row): 3524 (_, _, (_, sip)) = row 3525 return sip 3526 3527 (offline, online) = compat.partition(result.data, _IsOffline) 3528 3529 if offline and not nowarn: 3530 ToStderr("Note: skipping offline node(s): %s" % 3531 utils.CommaJoin(map(_GetName, offline))) 3532 3533 if secondary_ips: 3534 fn = _GetSip 3535 else: 3536 fn = _GetName 3537 3538 return map(fn, online) 3539
3540 3541 -def _ToStream(stream, txt, *args):
3542 """Write a message to a stream, bypassing the logging system 3543 3544 @type stream: file object 3545 @param stream: the file to which we should write 3546 @type txt: str 3547 @param txt: the message 3548 3549 """ 3550 try: 3551 if args: 3552 args = tuple(args) 3553 stream.write(txt % args) 3554 else: 3555 stream.write(txt) 3556 stream.write("\n") 3557 stream.flush() 3558 except IOError, err: 3559 if err.errno == errno.EPIPE: 3560 # our terminal went away, we'll exit 3561 sys.exit(constants.EXIT_FAILURE) 3562 else: 3563 raise
3564
3565 3566 -def ToStdout(txt, *args):
3567 """Write a message to stdout only, bypassing the logging system 3568 3569 This is just a wrapper over _ToStream. 3570 3571 @type txt: str 3572 @param txt: the message 3573 3574 """ 3575 _ToStream(sys.stdout, txt, *args)
3576
3577 3578 -def ToStderr(txt, *args):
3579 """Write a message to stderr only, bypassing the logging system 3580 3581 This is just a wrapper over _ToStream. 3582 3583 @type txt: str 3584 @param txt: the message 3585 3586 """ 3587 _ToStream(sys.stderr, txt, *args)
3588
3589 3590 -class JobExecutor(object):
3591 """Class which manages the submission and execution of multiple jobs. 3592 3593 Note that instances of this class should not be reused between 3594 GetResults() calls. 3595 3596 """
3597 - def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3598 self.queue = [] 3599 if cl is None: 3600 cl = GetClient() 3601 self.cl = cl 3602 self.verbose = verbose 3603 self.jobs = [] 3604 self.opts = opts 3605 self.feedback_fn = feedback_fn 3606 self._counter = itertools.count()
3607 3608 @staticmethod
3609 - def _IfName(name, fmt):
3610 """Helper function for formatting name. 3611 3612 """ 3613 if name: 3614 return fmt % name 3615 3616 return ""
3617
3618 - def QueueJob(self, name, *ops):
3619 """Record a job for later submit. 3620 3621 @type name: string 3622 @param name: a description of the job, will be used in WaitJobSet 3623 3624 """ 3625 SetGenericOpcodeOpts(ops, self.opts) 3626 self.queue.append((self._counter.next(), name, ops))
3627
3628 - def AddJobId(self, name, status, job_id):
3629 """Adds a job ID to the internal queue. 3630 3631 """ 3632 self.jobs.append((self._counter.next(), status, job_id, name))
3633
3634 - def SubmitPending(self, each=False):
3635 """Submit all pending jobs. 3636 3637 """ 3638 if each: 3639 results = [] 3640 for (_, _, ops) in self.queue: 3641 # SubmitJob will remove the success status, but raise an exception if 3642 # the submission fails, so we'll notice that anyway. 3643 results.append([True, self.cl.SubmitJob(ops)[0]]) 3644 else: 3645 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue]) 3646 for ((status, data), (idx, name, _)) in zip(results, self.queue): 3647 self.jobs.append((idx, status, data, name))
3648
3649 - def _ChooseJob(self):
3650 """Choose a non-waiting/queued job to poll next. 3651 3652 """ 3653 assert self.jobs, "_ChooseJob called with empty job list" 3654 3655 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]], 3656 ["status"]) 3657 assert result 3658 3659 for job_data, status in zip(self.jobs, result): 3660 if (isinstance(status, list) and status and 3661 status[0] in (constants.JOB_STATUS_QUEUED, 3662 constants.JOB_STATUS_WAITING, 3663 constants.JOB_STATUS_CANCELING)): 3664 # job is still present and waiting 3665 continue 3666 # good candidate found (either running job or lost job) 3667 self.jobs.remove(job_data) 3668 return job_data 3669 3670 # no job found 3671 return self.jobs.pop(0)
3672
3673 - def GetResults(self):
3674 """Wait for and return the results of all jobs. 3675 3676 @rtype: list 3677 @return: list of tuples (success, job results), in the same order 3678 as the submitted jobs; if a job has failed, instead of the result 3679 there will be the error message 3680 3681 """ 3682 if not self.jobs: 3683 self.SubmitPending() 3684 results = [] 3685 if self.verbose: 3686 ok_jobs = [row[2] for row in self.jobs if row[1]] 3687 if ok_jobs: 3688 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs)) 3689 3690 # first, remove any non-submitted jobs 3691 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1]) 3692 for idx, _, jid, name in failures: 3693 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid) 3694 results.append((idx, False, jid)) 3695 3696 while self.jobs: 3697 (idx, _, jid, name) = self._ChooseJob() 3698 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s")) 3699 try: 3700 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn) 3701 success = True 3702 except errors.JobLost, err: 3703 _, job_result = FormatError(err) 3704 ToStderr("Job %s%s has been archived, cannot check its result", 3705 jid, self._IfName(name, " for %s")) 3706 success = False 3707 except (errors.GenericError, luxi.ProtocolError), err: 3708 _, job_result = FormatError(err) 3709 success = False 3710 # the error message will always be shown, verbose or not 3711 ToStderr("Job %s%s has failed: %s", 3712 jid, self._IfName(name, " for %s"), job_result) 3713 3714 results.append((idx, success, job_result)) 3715 3716 # sort based on the index, then drop it 3717 results.sort() 3718 results = [i[1:] for i in results] 3719 3720 return results
3721
3722 - def WaitOrShow(self, wait):
3723 """Wait for job results or only print the job IDs. 3724 3725 @type wait: boolean 3726 @param wait: whether to wait or not 3727 3728 """ 3729 if wait: 3730 return self.GetResults() 3731 else: 3732 if not self.jobs: 3733 self.SubmitPending() 3734 for _, status, result, name in self.jobs: 3735 if status: 3736 ToStdout("%s: %s", result, name) 3737 else: 3738 ToStderr("Failure for %s: %s", name, result) 3739 return [row[1:3] for row in self.jobs]
3740
3741 3742 -def FormatParamsDictInfo(param_dict, actual):
3743 """Formats a parameter dictionary. 3744 3745 @type param_dict: dict 3746 @param param_dict: the own parameters 3747 @type actual: dict 3748 @param actual: the current parameter set (including defaults) 3749 @rtype: dict 3750 @return: dictionary where the value of each parameter is either a fully 3751 formatted string or a dictionary containing formatted strings 3752 3753 """ 3754 ret = {} 3755 for (key, data) in actual.items(): 3756 if isinstance(data, dict) and data: 3757 ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data) 3758 else: 3759 ret[key] = str(param_dict.get(key, "default (%s)" % data)) 3760 return ret
3761
3762 3763 -def _FormatListInfoDefault(data, def_data):
3764 if data is not None: 3765 ret = utils.CommaJoin(data) 3766 else: 3767 ret = "default (%s)" % utils.CommaJoin(def_data) 3768 return ret
3769
3770 3771 -def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster):
3772 """Formats an instance policy. 3773 3774 @type custom_ipolicy: dict 3775 @param custom_ipolicy: own policy 3776 @type eff_ipolicy: dict 3777 @param eff_ipolicy: effective policy (including defaults); ignored for 3778 cluster 3779 @type iscluster: bool 3780 @param iscluster: the policy is at cluster level 3781 @rtype: list of pairs 3782 @return: formatted data, suitable for L{PrintGenericInfo} 3783 3784 """ 3785 if iscluster: 3786 eff_ipolicy = custom_ipolicy 3787 3788 minmax_out = [] 3789 custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX) 3790 if custom_minmax: 3791 for (k, minmax) in enumerate(custom_minmax): 3792 minmax_out.append([ 3793 ("%s/%s" % (key, k), 3794 FormatParamsDictInfo(minmax[key], minmax[key])) 3795 for key in constants.ISPECS_MINMAX_KEYS 3796 ]) 3797 else: 3798 for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]): 3799 minmax_out.append([ 3800 ("%s/%s" % (key, k), 3801 FormatParamsDictInfo({}, minmax[key])) 3802 for key in constants.ISPECS_MINMAX_KEYS 3803 ]) 3804 ret = [("bounds specs", minmax_out)] 3805 3806 if iscluster: 3807 stdspecs = custom_ipolicy[constants.ISPECS_STD] 3808 ret.append( 3809 (constants.ISPECS_STD, 3810 FormatParamsDictInfo(stdspecs, stdspecs)) 3811 ) 3812 3813 ret.append( 3814 ("allowed disk templates", 3815 _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS), 3816 eff_ipolicy[constants.IPOLICY_DTS])) 3817 ) 3818 ret.extend([ 3819 (key, str(custom_ipolicy.get(key, "default (%s)" % eff_ipolicy[key]))) 3820 for key in constants.IPOLICY_PARAMETERS 3821 ]) 3822 return ret
3823
3824 3825 -def _PrintSpecsParameters(buf, specs):
3826 values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items())) 3827 buf.write(",".join(values))
3828
3829 3830 -def PrintIPolicyCommand(buf, ipolicy, isgroup):
3831 """Print the command option used to generate the given instance policy. 3832 3833 Currently only the parts dealing with specs are supported. 3834 3835 @type buf: StringIO 3836 @param buf: stream to write into 3837 @type ipolicy: dict 3838 @param ipolicy: instance policy 3839 @type isgroup: bool 3840 @param isgroup: whether the policy is at group level 3841 3842 """ 3843 if not isgroup: 3844 stdspecs = ipolicy.get("std") 3845 if stdspecs: 3846 buf.write(" %s " % IPOLICY_STD_SPECS_STR) 3847 _PrintSpecsParameters(buf, stdspecs) 3848 minmaxes = ipolicy.get("minmax", []) 3849 first = True 3850 for minmax in minmaxes: 3851 minspecs = minmax.get("min") 3852 maxspecs = minmax.get("max") 3853 if minspecs and maxspecs: 3854 if first: 3855 buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR) 3856 first = False 3857 else: 3858 buf.write("//") 3859 buf.write("min:") 3860 _PrintSpecsParameters(buf, minspecs) 3861 buf.write("/max:") 3862 _PrintSpecsParameters(buf, maxspecs)
3863
3864 3865 -def ConfirmOperation(names, list_type, text, extra=""):
3866 """Ask the user to confirm an operation on a list of list_type. 3867 3868 This function is used to request confirmation for doing an operation 3869 on a given list of list_type. 3870 3871 @type names: list 3872 @param names: the list of names that we display when 3873 we ask for confirmation 3874 @type list_type: str 3875 @param list_type: Human readable name for elements in the list (e.g. nodes) 3876 @type text: str 3877 @param text: the operation that the user should confirm 3878 @rtype: boolean 3879 @return: True or False depending on user's confirmation. 3880 3881 """ 3882 count = len(names) 3883 msg = ("The %s will operate on %d %s.\n%s" 3884 "Do you want to continue?" % (text, count, list_type, extra)) 3885 affected = (("\nAffected %s:\n" % list_type) + 3886 "\n".join([" %s" % name for name in names])) 3887 3888 choices = [("y", True, "Yes, execute the %s" % text), 3889 ("n", False, "No, abort the %s" % text)] 3890 3891 if count > 20: 3892 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type)) 3893 question = msg 3894 else: 3895 question = msg + affected 3896 3897 choice = AskUser(question, choices) 3898 if choice == "v": 3899 choices.pop(1) 3900 choice = AskUser(msg + affected, choices) 3901 return choice
3902
3903 3904 -def _MaybeParseUnit(elements):
3905 """Parses and returns an array of potential values with units. 3906 3907 """ 3908 parsed = {} 3909 for k, v in elements.items(): 3910 if v == constants.VALUE_DEFAULT: 3911 parsed[k] = v 3912 else: 3913 parsed[k] = utils.ParseUnit(v) 3914 return parsed
3915
3916 3917 -def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count, 3918 ispecs_disk_count, ispecs_disk_size, 3919 ispecs_nic_count, group_ipolicy, fill_all):
3920 try: 3921 if ispecs_mem_size: 3922 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size) 3923 if ispecs_disk_size: 3924 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size) 3925 except (TypeError, ValueError, errors.UnitParseError), err: 3926 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size" 3927 " in policy: %s" % 3928 (ispecs_disk_size, ispecs_mem_size, err), 3929 errors.ECODE_INVAL) 3930 3931 # prepare ipolicy dict 3932 ispecs_transposed = { 3933 constants.ISPEC_MEM_SIZE: ispecs_mem_size, 3934 constants.ISPEC_CPU_COUNT: ispecs_cpu_count, 3935 constants.ISPEC_DISK_COUNT: ispecs_disk_count, 3936 constants.ISPEC_DISK_SIZE: ispecs_disk_size, 3937 constants.ISPEC_NIC_COUNT: ispecs_nic_count, 3938 } 3939 3940 # first, check that the values given are correct 3941 if group_ipolicy: 3942 forced_type = TISPECS_GROUP_TYPES 3943 else: 3944 forced_type = TISPECS_CLUSTER_TYPES 3945 for specs in ispecs_transposed.values(): 3946 assert type(specs) is dict 3947 utils.ForceDictType(specs, forced_type) 3948 3949 # then transpose 3950 ispecs = { 3951 constants.ISPECS_MIN: {}, 3952 constants.ISPECS_MAX: {}, 3953 constants.ISPECS_STD: {}, 3954 } 3955 for (name, specs) in ispecs_transposed.iteritems(): 3956 assert name in constants.ISPECS_PARAMETERS 3957 for key, val in specs.items(): # {min: .. ,max: .., std: ..} 3958 assert key in ispecs 3959 ispecs[key][name] = val 3960 minmax_out = {} 3961 for key in constants.ISPECS_MINMAX_KEYS: 3962 if fill_all: 3963 minmax_out[key] = \ 3964 objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key]) 3965 else: 3966 minmax_out[key] = ispecs[key] 3967 ipolicy[constants.ISPECS_MINMAX] = [minmax_out] 3968 if fill_all: 3969 ipolicy[constants.ISPECS_STD] = \ 3970 objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD], 3971 ispecs[constants.ISPECS_STD]) 3972 else: 3973 ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
3974
3975 3976 -def _ParseSpecUnit(spec, keyname):
3977 ret = spec.copy() 3978 for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]: 3979 if k in ret: 3980 try: 3981 ret[k] = utils.ParseUnit(ret[k]) 3982 except (TypeError, ValueError, errors.UnitParseError), err: 3983 raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance" 3984 " specs: %s" % (k, ret[k], keyname, err)), 3985 errors.ECODE_INVAL) 3986 return ret
3987
3988 3989 -def _ParseISpec(spec, keyname, required):
3990 ret = _ParseSpecUnit(spec, keyname) 3991 utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES) 3992 missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys()) 3993 if required and missing: 3994 raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" % 3995 (keyname, utils.CommaJoin(missing)), 3996 errors.ECODE_INVAL) 3997 return ret
3998
3999 4000 -def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
4001 ret = None 4002 if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and 4003 len(minmax_ispecs[0]) == 1): 4004 for (key, spec) in minmax_ispecs[0].items(): 4005 # This loop is executed exactly once 4006 if key in allowed_values and not spec: 4007 ret = key 4008 return ret
4009
4010 4011 -def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs, 4012 group_ipolicy, allowed_values):
4013 found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values) 4014 if found_allowed is not None: 4015 ipolicy_out[constants.ISPECS_MINMAX] = found_allowed 4016 elif minmax_ispecs is not None: 4017 minmax_out = [] 4018 for mmpair in minmax_ispecs: 4019 mmpair_out = {} 4020 for (key, spec) in mmpair.items(): 4021 if key not in constants.ISPECS_MINMAX_KEYS: 4022 msg = "Invalid key in bounds instance specifications: %s" % key 4023 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 4024 mmpair_out[key] = _ParseISpec(spec, key, True) 4025 minmax_out.append(mmpair_out) 4026 ipolicy_out[constants.ISPECS_MINMAX] = minmax_out 4027 if std_ispecs is not None: 4028 assert not group_ipolicy # This is not an option for gnt-group 4029 ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4030
4031 4032 -def CreateIPolicyFromOpts(ispecs_mem_size=None, 4033 ispecs_cpu_count=None, 4034 ispecs_disk_count=None, 4035 ispecs_disk_size=None, 4036 ispecs_nic_count=None, 4037 minmax_ispecs=None, 4038 std_ispecs=None, 4039 ipolicy_disk_templates=None, 4040 ipolicy_vcpu_ratio=None, 4041 ipolicy_spindle_ratio=None, 4042 group_ipolicy=False, 4043 allowed_values=None, 4044 fill_all=False):
4045 """Creation of instance policy based on command line options. 4046 4047 @param fill_all: whether for cluster policies we should ensure that 4048 all values are filled 4049 4050 """ 4051 assert not (fill_all and allowed_values) 4052 4053 split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or 4054 ispecs_disk_size or ispecs_nic_count) 4055 if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)): 4056 raise errors.OpPrereqError("A --specs-xxx option cannot be specified" 4057 " together with any --ipolicy-xxx-specs option", 4058 errors.ECODE_INVAL) 4059 4060 ipolicy_out = objects.MakeEmptyIPolicy() 4061 if split_specs: 4062 assert fill_all 4063 _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count, 4064 ispecs_disk_count, ispecs_disk_size, 4065 ispecs_nic_count, group_ipolicy, fill_all) 4066 elif (minmax_ispecs is not None or std_ispecs is not None): 4067 _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs, 4068 group_ipolicy, allowed_values) 4069 4070 if ipolicy_disk_templates is not None: 4071 if allowed_values and ipolicy_disk_templates in allowed_values: 4072 ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates 4073 else: 4074 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates) 4075 if ipolicy_vcpu_ratio is not None: 4076 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio 4077 if ipolicy_spindle_ratio is not None: 4078 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio 4079 4080 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS) 4081 4082 if not group_ipolicy and fill_all: 4083 ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out) 4084 4085 return ipolicy_out
4086
4087 4088 -def _SerializeGenericInfo(buf, data, level, afterkey=False):
4089 """Formatting core of L{PrintGenericInfo}. 4090 4091 @param buf: (string) stream to accumulate the result into 4092 @param data: data to format 4093 @type level: int 4094 @param level: depth in the data hierarchy, used for indenting 4095 @type afterkey: bool 4096 @param afterkey: True when we are in the middle of a line after a key (used 4097 to properly add newlines or indentation) 4098 4099 """ 4100 baseind = " " 4101 if isinstance(data, dict): 4102 if not data: 4103 buf.write("\n") 4104 else: 4105 if afterkey: 4106 buf.write("\n") 4107 doindent = True 4108 else: 4109 doindent = False 4110 for key in sorted(data): 4111 if doindent: 4112 buf.write(baseind * level) 4113 else: 4114 doindent = True 4115 buf.write(key) 4116 buf.write(": ") 4117 _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True) 4118 elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple): 4119 # list of tuples (an ordered dictionary) 4120 if afterkey: 4121 buf.write("\n") 4122 doindent = True 4123 else: 4124 doindent = False 4125 for (key, val) in data: 4126 if doindent: 4127 buf.write(baseind * level) 4128 else: 4129 doindent = True 4130 buf.write(key) 4131 buf.write(": ") 4132 _SerializeGenericInfo(buf, val, level + 1, afterkey=True) 4133 elif isinstance(data, list): 4134 if not data: 4135 buf.write("\n") 4136 else: 4137 if afterkey: 4138 buf.write("\n") 4139 doindent = True 4140 else: 4141 doindent = False 4142 for item in data: 4143 if doindent: 4144 buf.write(baseind * level) 4145 else: 4146 doindent = True 4147 buf.write("-") 4148 buf.write(baseind[1:]) 4149 _SerializeGenericInfo(buf, item, level + 1) 4150 else: 4151 # This branch should be only taken for strings, but it's practically 4152 # impossible to guarantee that no other types are produced somewhere 4153 buf.write(str(data)) 4154 buf.write("\n")
4155
4156 4157 -def PrintGenericInfo(data):
4158 """Print information formatted according to the hierarchy. 4159 4160 The output is a valid YAML string. 4161 4162 @param data: the data to print. It's a hierarchical structure whose elements 4163 can be: 4164 - dictionaries, where keys are strings and values are of any of the 4165 types listed here 4166 - lists of pairs (key, value), where key is a string and value is of 4167 any of the types listed here; it's a way to encode ordered 4168 dictionaries 4169 - lists of any of the types listed here 4170 - strings 4171 4172 """ 4173 buf = StringIO() 4174 _SerializeGenericInfo(buf, data, 0) 4175 ToStdout(buf.getvalue().rstrip("\n"))
4176