Package ganeti :: Module cli
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cli

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Module dealing with command line parsing""" 
  32   
  33   
  34  import sys 
  35  import textwrap 
  36  import os.path 
  37  import time 
  38  import logging 
  39  import errno 
  40  import itertools 
  41  import shlex 
  42  from cStringIO import StringIO 
  43   
  44  from ganeti import utils 
  45  from ganeti import errors 
  46  from ganeti import constants 
  47  from ganeti import opcodes 
  48  import ganeti.rpc.errors as rpcerr 
  49  import ganeti.rpc.node as rpc 
  50  from ganeti import ssh 
  51  from ganeti import compat 
  52  from ganeti import netutils 
  53  from ganeti import qlang 
  54  from ganeti import objects 
  55  from ganeti import pathutils 
  56  from ganeti import serializer 
  57   
  58  from ganeti.runtime import (GetClient) 
  59   
  60  from optparse import (OptionParser, TitledHelpFormatter, 
  61                        Option, OptionValueError) 
  62   
  63   
  64  __all__ = [ 
  65    # Command line options 
  66    "ABSOLUTE_OPT", 
  67    "ADD_UIDS_OPT", 
  68    "ADD_RESERVED_IPS_OPT", 
  69    "ALLOCATABLE_OPT", 
  70    "ALLOC_POLICY_OPT", 
  71    "ALL_OPT", 
  72    "ALLOW_FAILOVER_OPT", 
  73    "AUTO_PROMOTE_OPT", 
  74    "AUTO_REPLACE_OPT", 
  75    "BACKEND_OPT", 
  76    "BLK_OS_OPT", 
  77    "CAPAB_MASTER_OPT", 
  78    "CAPAB_VM_OPT", 
  79    "CLEANUP_OPT", 
  80    "CLUSTER_DOMAIN_SECRET_OPT", 
  81    "CONFIRM_OPT", 
  82    "CP_SIZE_OPT", 
  83    "COMPRESSION_TOOLS_OPT", 
  84    "DEBUG_OPT", 
  85    "DEBUG_SIMERR_OPT", 
  86    "DISKIDX_OPT", 
  87    "DISK_OPT", 
  88    "DISK_PARAMS_OPT", 
  89    "DISK_TEMPLATE_OPT", 
  90    "DRAINED_OPT", 
  91    "DRY_RUN_OPT", 
  92    "DRBD_HELPER_OPT", 
  93    "DST_NODE_OPT", 
  94    "EARLY_RELEASE_OPT", 
  95    "ENABLED_HV_OPT", 
  96    "ENABLED_DISK_TEMPLATES_OPT", 
  97    "ENABLED_USER_SHUTDOWN_OPT", 
  98    "ERROR_CODES_OPT", 
  99    "FAILURE_ONLY_OPT", 
 100    "FIELDS_OPT", 
 101    "FILESTORE_DIR_OPT", 
 102    "FILESTORE_DRIVER_OPT", 
 103    "FORCE_FAILOVER_OPT", 
 104    "FORCE_FILTER_OPT", 
 105    "FORCE_OPT", 
 106    "FORCE_VARIANT_OPT", 
 107    "GATEWAY_OPT", 
 108    "GATEWAY6_OPT", 
 109    "GLOBAL_FILEDIR_OPT", 
 110    "HID_OS_OPT", 
 111    "GLOBAL_GLUSTER_FILEDIR_OPT", 
 112    "GLOBAL_SHARED_FILEDIR_OPT", 
 113    "HOTPLUG_OPT", 
 114    "HOTPLUG_IF_POSSIBLE_OPT", 
 115    "HVLIST_OPT", 
 116    "HVOPTS_OPT", 
 117    "HYPERVISOR_OPT", 
 118    "IALLOCATOR_OPT", 
 119    "DEFAULT_IALLOCATOR_OPT", 
 120    "DEFAULT_IALLOCATOR_PARAMS_OPT", 
 121    "IDENTIFY_DEFAULTS_OPT", 
 122    "IGNORE_CONSIST_OPT", 
 123    "IGNORE_ERRORS_OPT", 
 124    "IGNORE_FAILURES_OPT", 
 125    "IGNORE_OFFLINE_OPT", 
 126    "IGNORE_REMOVE_FAILURES_OPT", 
 127    "IGNORE_SECONDARIES_OPT", 
 128    "IGNORE_SIZE_OPT", 
 129    "INCLUDEDEFAULTS_OPT", 
 130    "INTERVAL_OPT", 
 131    "INSTALL_IMAGE_OPT", 
 132    "INSTANCE_COMMUNICATION_OPT", 
 133    "INSTANCE_COMMUNICATION_NETWORK_OPT", 
 134    "MAC_PREFIX_OPT", 
 135    "MAINTAIN_NODE_HEALTH_OPT", 
 136    "MASTER_NETDEV_OPT", 
 137    "MASTER_NETMASK_OPT", 
 138    "MAX_TRACK_OPT", 
 139    "MC_OPT", 
 140    "MIGRATION_MODE_OPT", 
 141    "MODIFY_ETCHOSTS_OPT", 
 142    "NET_OPT", 
 143    "NETWORK_OPT", 
 144    "NETWORK6_OPT", 
 145    "NEW_CLUSTER_CERT_OPT", 
 146    "NEW_NODE_CERT_OPT", 
 147    "NEW_CLUSTER_DOMAIN_SECRET_OPT", 
 148    "NEW_CONFD_HMAC_KEY_OPT", 
 149    "NEW_RAPI_CERT_OPT", 
 150    "NEW_PRIMARY_OPT", 
 151    "NEW_SECONDARY_OPT", 
 152    "NEW_SPICE_CERT_OPT", 
 153    "NIC_PARAMS_OPT", 
 154    "NOCONFLICTSCHECK_OPT", 
 155    "NODE_FORCE_JOIN_OPT", 
 156    "NODE_LIST_OPT", 
 157    "NODE_PLACEMENT_OPT", 
 158    "NODEGROUP_OPT", 
 159    "NODE_PARAMS_OPT", 
 160    "NODE_POWERED_OPT", 
 161    "NOHDR_OPT", 
 162    "NOIPCHECK_OPT", 
 163    "NO_INSTALL_OPT", 
 164    "NONAMECHECK_OPT", 
 165    "NOMODIFY_ETCHOSTS_OPT", 
 166    "NOMODIFY_SSH_SETUP_OPT", 
 167    "NONICS_OPT", 
 168    "NONLIVE_OPT", 
 169    "NONPLUS1_OPT", 
 170    "NORUNTIME_CHGS_OPT", 
 171    "NOSHUTDOWN_OPT", 
 172    "NOSTART_OPT", 
 173    "NOSSH_KEYCHECK_OPT", 
 174    "NOVOTING_OPT", 
 175    "NO_REMEMBER_OPT", 
 176    "NWSYNC_OPT", 
 177    "OFFLINE_INST_OPT", 
 178    "ONLINE_INST_OPT", 
 179    "ON_PRIMARY_OPT", 
 180    "ON_SECONDARY_OPT", 
 181    "OFFLINE_OPT", 
 182    "OS_OPT", 
 183    "OSPARAMS_OPT", 
 184    "OSPARAMS_PRIVATE_OPT", 
 185    "OSPARAMS_SECRET_OPT", 
 186    "OS_SIZE_OPT", 
 187    "OOB_TIMEOUT_OPT", 
 188    "POWER_DELAY_OPT", 
 189    "PREALLOC_WIPE_DISKS_OPT", 
 190    "PRIMARY_IP_VERSION_OPT", 
 191    "PRIMARY_ONLY_OPT", 
 192    "PRINT_JOBID_OPT", 
 193    "PRIORITY_OPT", 
 194    "RAPI_CERT_OPT", 
 195    "READD_OPT", 
 196    "REASON_OPT", 
 197    "REBOOT_TYPE_OPT", 
 198    "REMOVE_INSTANCE_OPT", 
 199    "REMOVE_RESERVED_IPS_OPT", 
 200    "REMOVE_UIDS_OPT", 
 201    "RESERVED_LVS_OPT", 
 202    "RQL_OPT", 
 203    "RUNTIME_MEM_OPT", 
 204    "ROMAN_OPT", 
 205    "SECONDARY_IP_OPT", 
 206    "SECONDARY_ONLY_OPT", 
 207    "SELECT_OS_OPT", 
 208    "SEP_OPT", 
 209    "SHOWCMD_OPT", 
 210    "SHOW_MACHINE_OPT", 
 211    "COMPRESS_OPT", 
 212    "TRANSPORT_COMPRESSION_OPT", 
 213    "SHUTDOWN_TIMEOUT_OPT", 
 214    "SINGLE_NODE_OPT", 
 215    "SPECS_CPU_COUNT_OPT", 
 216    "SPECS_DISK_COUNT_OPT", 
 217    "SPECS_DISK_SIZE_OPT", 
 218    "SPECS_MEM_SIZE_OPT", 
 219    "SPECS_NIC_COUNT_OPT", 
 220    "SPLIT_ISPECS_OPTS", 
 221    "IPOLICY_STD_SPECS_OPT", 
 222    "IPOLICY_DISK_TEMPLATES", 
 223    "IPOLICY_VCPU_RATIO", 
 224    "IPOLICY_SPINDLE_RATIO", 
 225    "SEQUENTIAL_OPT", 
 226    "SPICE_CACERT_OPT", 
 227    "SPICE_CERT_OPT", 
 228    "SRC_DIR_OPT", 
 229    "SRC_NODE_OPT", 
 230    "SUBMIT_OPT", 
 231    "SUBMIT_OPTS", 
 232    "STARTUP_PAUSED_OPT", 
 233    "STATIC_OPT", 
 234    "SYNC_OPT", 
 235    "TAG_ADD_OPT", 
 236    "TAG_SRC_OPT", 
 237    "TIMEOUT_OPT", 
 238    "TO_GROUP_OPT", 
 239    "UIDPOOL_OPT", 
 240    "USEUNITS_OPT", 
 241    "USE_EXTERNAL_MIP_SCRIPT", 
 242    "USE_REPL_NET_OPT", 
 243    "VERBOSE_OPT", 
 244    "VG_NAME_OPT", 
 245    "WFSYNC_OPT", 
 246    "YES_DOIT_OPT", 
 247    "ZEROING_IMAGE_OPT", 
 248    "ZERO_FREE_SPACE_OPT", 
 249    "HELPER_STARTUP_TIMEOUT_OPT", 
 250    "HELPER_SHUTDOWN_TIMEOUT_OPT", 
 251    "ZEROING_TIMEOUT_FIXED_OPT", 
 252    "ZEROING_TIMEOUT_PER_MIB_OPT", 
 253    "DISK_STATE_OPT", 
 254    "HV_STATE_OPT", 
 255    "IGNORE_IPOLICY_OPT", 
 256    "INSTANCE_POLICY_OPTS", 
 257    # Generic functions for CLI programs 
 258    "ConfirmOperation", 
 259    "CreateIPolicyFromOpts", 
 260    "GenericMain", 
 261    "GenericInstanceCreate", 
 262    "GenericList", 
 263    "GenericListFields", 
 264    "GetClient", 
 265    "GetOnlineNodes", 
 266    "GetNodesSshPorts", 
 267    "JobExecutor", 
 268    "JobSubmittedException", 
 269    "ParseTimespec", 
 270    "RunWhileClusterStopped", 
 271    "RunWhileDaemonsStopped", 
 272    "SubmitOpCode", 
 273    "SubmitOpCodeToDrainedQueue", 
 274    "SubmitOrSend", 
 275    "UsesRPC", 
 276    # Formatting functions 
 277    "ToStderr", "ToStdout", 
 278    "ToStdoutAndLoginfo", 
 279    "FormatError", 
 280    "FormatQueryResult", 
 281    "FormatParamsDictInfo", 
 282    "FormatPolicyInfo", 
 283    "PrintIPolicyCommand", 
 284    "PrintGenericInfo", 
 285    "GenerateTable", 
 286    "AskUser", 
 287    "FormatTimestamp", 
 288    "FormatLogMessage", 
 289    # Tags functions 
 290    "ListTags", 
 291    "AddTags", 
 292    "RemoveTags", 
 293    # command line options support infrastructure 
 294    "ARGS_MANY_INSTANCES", 
 295    "ARGS_MANY_NODES", 
 296    "ARGS_MANY_GROUPS", 
 297    "ARGS_MANY_NETWORKS", 
 298    "ARGS_NONE", 
 299    "ARGS_ONE_INSTANCE", 
 300    "ARGS_ONE_NODE", 
 301    "ARGS_ONE_GROUP", 
 302    "ARGS_ONE_OS", 
 303    "ARGS_ONE_NETWORK", 
 304    "ArgChoice", 
 305    "ArgCommand", 
 306    "ArgFile", 
 307    "ArgGroup", 
 308    "ArgHost", 
 309    "ArgInstance", 
 310    "ArgJobId", 
 311    "ArgNetwork", 
 312    "ArgNode", 
 313    "ArgOs", 
 314    "ArgExtStorage", 
 315    "ArgSuggest", 
 316    "ArgUnknown", 
 317    "OPT_COMPL_INST_ADD_NODES", 
 318    "OPT_COMPL_MANY_NODES", 
 319    "OPT_COMPL_ONE_IALLOCATOR", 
 320    "OPT_COMPL_ONE_INSTANCE", 
 321    "OPT_COMPL_ONE_NODE", 
 322    "OPT_COMPL_ONE_NODEGROUP", 
 323    "OPT_COMPL_ONE_NETWORK", 
 324    "OPT_COMPL_ONE_OS", 
 325    "OPT_COMPL_ONE_EXTSTORAGE", 
 326    "cli_option", 
 327    "FixHvParams", 
 328    "SplitNodeOption", 
 329    "CalculateOSNames", 
 330    "ParseFields", 
 331    "COMMON_CREATE_OPTS", 
 332    ] 
 333   
 334  NO_PREFIX = "no_" 
 335  UN_PREFIX = "-" 
 336   
 337  #: Priorities (sorted) 
 338  _PRIORITY_NAMES = [ 
 339    ("low", constants.OP_PRIO_LOW), 
 340    ("normal", constants.OP_PRIO_NORMAL), 
 341    ("high", constants.OP_PRIO_HIGH), 
 342    ] 
 343   
 344  #: Priority dictionary for easier lookup 
 345  # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once 
 346  # we migrate to Python 2.6 
 347  _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES) 
 348   
 349  # Query result status for clients 
 350  (QR_NORMAL, 
 351   QR_UNKNOWN, 
 352   QR_INCOMPLETE) = range(3) 
 353   
 354  #: Maximum batch size for ChooseJob 
 355  _CHOOSE_BATCH = 25 
 356   
 357   
 358  # constants used to create InstancePolicy dictionary 
 359  TISPECS_GROUP_TYPES = { 
 360    constants.ISPECS_MIN: constants.VTYPE_INT, 
 361    constants.ISPECS_MAX: constants.VTYPE_INT, 
 362    } 
 363   
 364  TISPECS_CLUSTER_TYPES = { 
 365    constants.ISPECS_MIN: constants.VTYPE_INT, 
 366    constants.ISPECS_MAX: constants.VTYPE_INT, 
 367    constants.ISPECS_STD: constants.VTYPE_INT, 
 368    } 
 369   
 370  #: User-friendly names for query2 field types 
 371  _QFT_NAMES = { 
 372    constants.QFT_UNKNOWN: "Unknown", 
 373    constants.QFT_TEXT: "Text", 
 374    constants.QFT_BOOL: "Boolean", 
 375    constants.QFT_NUMBER: "Number", 
 376    constants.QFT_NUMBER_FLOAT: "Floating-point number", 
 377    constants.QFT_UNIT: "Storage size", 
 378    constants.QFT_TIMESTAMP: "Timestamp", 
 379    constants.QFT_OTHER: "Custom", 
 380    } 
381 382 383 -class _Argument(object):
384 - def __init__(self, min=0, max=None): # pylint: disable=W0622
385 self.min = min 386 self.max = max
387
388 - def __repr__(self):
389 return ("<%s min=%s max=%s>" % 390 (self.__class__.__name__, self.min, self.max))
391
392 393 -class ArgSuggest(_Argument):
394 """Suggesting argument. 395 396 Value can be any of the ones passed to the constructor. 397 398 """ 399 # pylint: disable=W0622
400 - def __init__(self, min=0, max=None, choices=None):
401 _Argument.__init__(self, min=min, max=max) 402 self.choices = choices
403
404 - def __repr__(self):
405 return ("<%s min=%s max=%s choices=%r>" % 406 (self.__class__.__name__, self.min, self.max, self.choices))
407
408 409 -class ArgChoice(ArgSuggest):
410 """Choice argument. 411 412 Value can be any of the ones passed to the constructor. Like L{ArgSuggest}, 413 but value must be one of the choices. 414 415 """
416
417 418 -class ArgUnknown(_Argument):
419 """Unknown argument to program (e.g. determined at runtime). 420 421 """
422
423 424 -class ArgInstance(_Argument):
425 """Instances argument. 426 427 """
428
429 430 -class ArgNode(_Argument):
431 """Node argument. 432 433 """
434
435 436 -class ArgNetwork(_Argument):
437 """Network argument. 438 439 """
440
441 442 -class ArgGroup(_Argument):
443 """Node group argument. 444 445 """
446
447 448 -class ArgJobId(_Argument):
449 """Job ID argument. 450 451 """
452
453 454 -class ArgFile(_Argument):
455 """File path argument. 456 457 """
458
459 460 -class ArgCommand(_Argument):
461 """Command argument. 462 463 """
464
465 466 -class ArgHost(_Argument):
467 """Host argument. 468 469 """
470
471 472 -class ArgOs(_Argument):
473 """OS argument. 474 475 """
476
477 478 -class ArgExtStorage(_Argument):
479 """ExtStorage argument. 480 481 """
482 483 484 ARGS_NONE = [] 485 ARGS_MANY_INSTANCES = [ArgInstance()] 486 ARGS_MANY_NETWORKS = [ArgNetwork()] 487 ARGS_MANY_NODES = [ArgNode()] 488 ARGS_MANY_GROUPS = [ArgGroup()] 489 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)] 490 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)] 491 ARGS_ONE_NODE = [ArgNode(min=1, max=1)] 492 # TODO 493 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)] 494 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
495 496 497 -def _ExtractTagsObject(opts, args):
498 """Extract the tag type object. 499 500 Note that this function will modify its args parameter. 501 502 """ 503 if not hasattr(opts, "tag_type"): 504 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject") 505 kind = opts.tag_type 506 if kind == constants.TAG_CLUSTER: 507 retval = kind, "" 508 elif kind in (constants.TAG_NODEGROUP, 509 constants.TAG_NODE, 510 constants.TAG_NETWORK, 511 constants.TAG_INSTANCE): 512 if not args: 513 raise errors.OpPrereqError("no arguments passed to the command", 514 errors.ECODE_INVAL) 515 name = args.pop(0) 516 retval = kind, name 517 else: 518 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind) 519 return retval
520
521 522 -def _ExtendTags(opts, args):
523 """Extend the args if a source file has been given. 524 525 This function will extend the tags with the contents of the file 526 passed in the 'tags_source' attribute of the opts parameter. A file 527 named '-' will be replaced by stdin. 528 529 """ 530 fname = opts.tags_source 531 if fname is None: 532 return 533 if fname == "-": 534 new_fh = sys.stdin 535 else: 536 new_fh = open(fname, "r") 537 new_data = [] 538 try: 539 # we don't use the nice 'new_data = [line.strip() for line in fh]' 540 # because of python bug 1633941 541 while True: 542 line = new_fh.readline() 543 if not line: 544 break 545 new_data.append(line.strip()) 546 finally: 547 new_fh.close() 548 args.extend(new_data)
549
550 551 -def ListTags(opts, args):
552 """List the tags on a given object. 553 554 This is a generic implementation that knows how to deal with all 555 three cases of tag objects (cluster, node, instance). The opts 556 argument is expected to contain a tag_type field denoting what 557 object type we work on. 558 559 """ 560 kind, name = _ExtractTagsObject(opts, args) 561 cl = GetClient() 562 result = cl.QueryTags(kind, name) 563 result = list(result) 564 result.sort() 565 for tag in result: 566 ToStdout(tag)
567
568 569 -def AddTags(opts, args):
570 """Add tags on a given object. 571 572 This is a generic implementation that knows how to deal with all 573 three cases of tag objects (cluster, node, instance). The opts 574 argument is expected to contain a tag_type field denoting what 575 object type we work on. 576 577 """ 578 kind, name = _ExtractTagsObject(opts, args) 579 _ExtendTags(opts, args) 580 if not args: 581 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL) 582 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args) 583 SubmitOrSend(op, opts)
584
585 586 -def RemoveTags(opts, args):
587 """Remove tags from a given object. 588 589 This is a generic implementation that knows how to deal with all 590 three cases of tag objects (cluster, node, instance). The opts 591 argument is expected to contain a tag_type field denoting what 592 object type we work on. 593 594 """ 595 kind, name = _ExtractTagsObject(opts, args) 596 _ExtendTags(opts, args) 597 if not args: 598 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL) 599 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args) 600 SubmitOrSend(op, opts)
601
602 603 -def check_unit(option, opt, value): # pylint: disable=W0613
604 """OptParsers custom converter for units. 605 606 """ 607 try: 608 return utils.ParseUnit(value) 609 except errors.UnitParseError, err: 610 raise OptionValueError("option %s: %s" % (opt, err)) 611
612 613 -def _SplitKeyVal(opt, data, parse_prefixes):
614 """Convert a KeyVal string into a dict. 615 616 This function will convert a key=val[,...] string into a dict. Empty 617 values will be converted specially: keys which have the prefix 'no_' 618 will have the value=False and the prefix stripped, keys with the prefix 619 "-" will have value=None and the prefix stripped, and the others will 620 have value=True. 621 622 @type opt: string 623 @param opt: a string holding the option name for which we process the 624 data, used in building error messages 625 @type data: string 626 @param data: a string of the format key=val,key=val,... 627 @type parse_prefixes: bool 628 @param parse_prefixes: whether to handle prefixes specially 629 @rtype: dict 630 @return: {key=val, key=val} 631 @raises errors.ParameterError: if there are duplicate keys 632 633 """ 634 kv_dict = {} 635 if data: 636 for elem in utils.UnescapeAndSplit(data, sep=","): 637 if "=" in elem: 638 key, val = elem.split("=", 1) 639 elif parse_prefixes: 640 if elem.startswith(NO_PREFIX): 641 key, val = elem[len(NO_PREFIX):], False 642 elif elem.startswith(UN_PREFIX): 643 key, val = elem[len(UN_PREFIX):], None 644 else: 645 key, val = elem, True 646 else: 647 raise errors.ParameterError("Missing value for key '%s' in option %s" % 648 (elem, opt)) 649 if key in kv_dict: 650 raise errors.ParameterError("Duplicate key '%s' in option %s" % 651 (key, opt)) 652 kv_dict[key] = val 653 return kv_dict
654
655 656 -def _SplitIdentKeyVal(opt, value, parse_prefixes):
657 """Helper function to parse "ident:key=val,key=val" options. 658 659 @type opt: string 660 @param opt: option name, used in error messages 661 @type value: string 662 @param value: expected to be in the format "ident:key=val,key=val,..." 663 @type parse_prefixes: bool 664 @param parse_prefixes: whether to handle prefixes specially (see 665 L{_SplitKeyVal}) 666 @rtype: tuple 667 @return: (ident, {key=val, key=val}) 668 @raises errors.ParameterError: in case of duplicates or other parsing errors 669 670 """ 671 if ":" not in value: 672 ident, rest = value, "" 673 else: 674 ident, rest = value.split(":", 1) 675 676 if parse_prefixes and ident.startswith(NO_PREFIX): 677 if rest: 678 msg = "Cannot pass options when removing parameter groups: %s" % value 679 raise errors.ParameterError(msg) 680 retval = (ident[len(NO_PREFIX):], False) 681 elif (parse_prefixes and ident.startswith(UN_PREFIX) and 682 (len(ident) <= len(UN_PREFIX) or not ident[len(UN_PREFIX)].isdigit())): 683 if rest: 684 msg = "Cannot pass options when removing parameter groups: %s" % value 685 raise errors.ParameterError(msg) 686 retval = (ident[len(UN_PREFIX):], None) 687 else: 688 kv_dict = _SplitKeyVal(opt, rest, parse_prefixes) 689 retval = (ident, kv_dict) 690 return retval
691
692 693 -def check_ident_key_val(option, opt, value): # pylint: disable=W0613
694 """Custom parser for ident:key=val,key=val options. 695 696 This will store the parsed values as a tuple (ident, {key: val}). As such, 697 multiple uses of this option via action=append is possible. 698 699 """ 700 return _SplitIdentKeyVal(opt, value, True) 701
702 703 -def check_key_val(option, opt, value): # pylint: disable=W0613
704 """Custom parser class for key=val,key=val options. 705 706 This will store the parsed values as a dict {key: val}. 707 708 """ 709 return _SplitKeyVal(opt, value, True) 710
711 712 -def check_key_private_val(option, opt, value): # pylint: disable=W0613
713 """Custom parser class for private and secret key=val,key=val options. 714 715 This will store the parsed values as a dict {key: val}. 716 717 """ 718 return serializer.PrivateDict(_SplitKeyVal(opt, value, True)) 719
720 721 -def _SplitListKeyVal(opt, value):
722 retval = {} 723 for elem in value.split("/"): 724 if not elem: 725 raise errors.ParameterError("Empty section in option '%s'" % opt) 726 (ident, valdict) = _SplitIdentKeyVal(opt, elem, False) 727 if ident in retval: 728 msg = ("Duplicated parameter '%s' in parsing %s: %s" % 729 (ident, opt, elem)) 730 raise errors.ParameterError(msg) 731 retval[ident] = valdict 732 return retval
733
734 735 -def check_multilist_ident_key_val(_, opt, value):
736 """Custom parser for "ident:key=val,key=val/ident:key=val//ident:.." options. 737 738 @rtype: list of dictionary 739 @return: [{ident: {key: val, key: val}, ident: {key: val}}, {ident:..}] 740 741 """ 742 retval = [] 743 for line in value.split("//"): 744 retval.append(_SplitListKeyVal(opt, line)) 745 return retval
746
747 748 -def check_bool(option, opt, value): # pylint: disable=W0613
749 """Custom parser for yes/no options. 750 751 This will store the parsed value as either True or False. 752 753 """ 754 value = value.lower() 755 if value == constants.VALUE_FALSE or value == "no": 756 return False 757 elif value == constants.VALUE_TRUE or value == "yes": 758 return True 759 else: 760 raise errors.ParameterError("Invalid boolean value '%s'" % value) 761
762 763 -def check_list(option, opt, value): # pylint: disable=W0613
764 """Custom parser for comma-separated lists. 765 766 """ 767 # we have to make this explicit check since "".split(",") is [""], 768 # not an empty list :( 769 if not value: 770 return [] 771 else: 772 return utils.UnescapeAndSplit(value) 773
774 775 -def check_maybefloat(option, opt, value): # pylint: disable=W0613
776 """Custom parser for float numbers which might be also defaults. 777 778 """ 779 value = value.lower() 780 781 if value == constants.VALUE_DEFAULT: 782 return value 783 else: 784 return float(value) 785 786 787 # completion_suggestion is normally a list. Using numeric values not evaluating 788 # to False for dynamic completion. 789 (OPT_COMPL_MANY_NODES, 790 OPT_COMPL_ONE_NODE, 791 OPT_COMPL_ONE_INSTANCE, 792 OPT_COMPL_ONE_OS, 793 OPT_COMPL_ONE_EXTSTORAGE, 794 OPT_COMPL_ONE_IALLOCATOR, 795 OPT_COMPL_ONE_NETWORK, 796 OPT_COMPL_INST_ADD_NODES, 797 OPT_COMPL_ONE_NODEGROUP) = range(100, 109) 798 799 OPT_COMPL_ALL = compat.UniqueFrozenset([ 800 OPT_COMPL_MANY_NODES, 801 OPT_COMPL_ONE_NODE, 802 OPT_COMPL_ONE_INSTANCE, 803 OPT_COMPL_ONE_OS, 804 OPT_COMPL_ONE_EXTSTORAGE, 805 OPT_COMPL_ONE_IALLOCATOR, 806 OPT_COMPL_ONE_NETWORK, 807 OPT_COMPL_INST_ADD_NODES, 808 OPT_COMPL_ONE_NODEGROUP, 809 ])
810 811 812 -class CliOption(Option):
813 """Custom option class for optparse. 814 815 """ 816 ATTRS = Option.ATTRS + [ 817 "completion_suggest", 818 ] 819 TYPES = Option.TYPES + ( 820 "multilistidentkeyval", 821 "identkeyval", 822 "keyval", 823 "keyprivateval", 824 "unit", 825 "bool", 826 "list", 827 "maybefloat", 828 ) 829 TYPE_CHECKER = Option.TYPE_CHECKER.copy() 830 TYPE_CHECKER["multilistidentkeyval"] = check_multilist_ident_key_val 831 TYPE_CHECKER["identkeyval"] = check_ident_key_val 832 TYPE_CHECKER["keyval"] = check_key_val 833 TYPE_CHECKER["keyprivateval"] = check_key_private_val 834 TYPE_CHECKER["unit"] = check_unit 835 TYPE_CHECKER["bool"] = check_bool 836 TYPE_CHECKER["list"] = check_list 837 TYPE_CHECKER["maybefloat"] = check_maybefloat
838 839 840 # optparse.py sets make_option, so we do it for our own option class, too 841 cli_option = CliOption 842 843 844 _YORNO = "yes|no" 845 846 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count", 847 help="Increase debugging level") 848 849 NOHDR_OPT = cli_option("--no-headers", default=False, 850 action="store_true", dest="no_headers", 851 help="Don't display column headers") 852 853 SEP_OPT = cli_option("--separator", default=None, 854 action="store", dest="separator", 855 help=("Separator between output fields" 856 " (defaults to one space)")) 857 858 USEUNITS_OPT = cli_option("--units", default=None, 859 dest="units", choices=("h", "m", "g", "t"), 860 help="Specify units for output (one of h/m/g/t)") 861 862 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store", 863 type="string", metavar="FIELDS", 864 help="Comma separated list of output fields") 865 866 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true", 867 default=False, help="Force the operation") 868 869 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true", 870 default=False, help="Do not require confirmation") 871 872 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline", 873 action="store_true", default=False, 874 help=("Ignore offline nodes and do as much" 875 " as possible")) 876 877 TAG_ADD_OPT = cli_option("--tags", dest="tags", 878 default=None, help="Comma-separated list of instance" 879 " tags") 880 881 TAG_SRC_OPT = cli_option("--from", dest="tags_source", 882 default=None, help="File with tag names") 883 884 SUBMIT_OPT = cli_option("--submit", dest="submit_only", 885 default=False, action="store_true", 886 help=("Submit the job and return the job ID, but" 887 " don't wait for the job to finish")) 888 889 PRINT_JOBID_OPT = cli_option("--print-jobid", dest="print_jobid", 890 default=False, action="store_true", 891 help=("Additionally print the job as first line" 892 " on stdout (for scripting).")) 893 894 SEQUENTIAL_OPT = cli_option("--sequential", dest="sequential", 895 default=False, action="store_true", 896 help=("Execute all resulting jobs sequentially")) 897 898 SYNC_OPT = cli_option("--sync", dest="do_locking", 899 default=False, action="store_true", 900 help=("Grab locks while doing the queries" 901 " in order to ensure more consistent results")) 902 903 DRY_RUN_OPT = cli_option("--dry-run", default=False, 904 action="store_true", 905 help=("Do not execute the operation, just run the" 906 " check steps and verify if it could be" 907 " executed")) 908 909 VERBOSE_OPT = cli_option("-v", "--verbose", default=False, 910 action="store_true", 911 help="Increase the verbosity of the operation") 912 913 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False, 914 action="store_true", dest="simulate_errors", 915 help="Debugging option that makes the operation" 916 " treat most runtime checks as failed") 917 918 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync", 919 default=True, action="store_false", 920 help="Don't wait for sync (DANGEROUS!)") 921 922 WFSYNC_OPT = cli_option("--wait-for-sync", dest="wait_for_sync", 923 default=False, action="store_true", 924 help="Wait for disks to sync") 925 926 ONLINE_INST_OPT = cli_option("--online", dest="online_inst", 927 action="store_true", default=False, 928 help="Enable offline instance") 929 930 OFFLINE_INST_OPT = cli_option("--offline", dest="offline_inst", 931 action="store_true", default=False, 932 help="Disable down instance") 933 934 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template", 935 help=("Custom disk setup (%s)" % 936 utils.CommaJoin(constants.DISK_TEMPLATES)), 937 default=None, metavar="TEMPL", 938 choices=list(constants.DISK_TEMPLATES)) 939 940 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true", 941 help="Do not create any network cards for" 942 " the instance") 943 944 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", 945 help="Relative path under default cluster-wide" 946 " file storage dir to store file-based disks", 947 default=None, metavar="<DIR>") 948 949 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver", 950 help="Driver to use for image files", 951 default=None, metavar="<DRIVER>", 952 choices=list(constants.FILE_DRIVER)) 953 954 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>", 955 help="Select nodes for the instance automatically" 956 " using the <NAME> iallocator plugin", 957 default=None, type="string", 958 completion_suggest=OPT_COMPL_ONE_IALLOCATOR) 959 960 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator", 961 metavar="<NAME>", 962 help="Set the default instance" 963 " allocator plugin", 964 default=None, type="string", 965 completion_suggest=OPT_COMPL_ONE_IALLOCATOR) 966 967 DEFAULT_IALLOCATOR_PARAMS_OPT = cli_option("--default-iallocator-params", 968 dest="default_iallocator_params", 969 help="iallocator template" 970 " parameters, in the format" 971 " template:option=value," 972 " option=value,...", 973 type="keyval", 974 default=None) 975 976 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run", 977 metavar="<os>", 978 completion_suggest=OPT_COMPL_ONE_OS) 979 980 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams", 981 type="keyval", default={}, 982 help="OS parameters") 983 984 OSPARAMS_PRIVATE_OPT = cli_option("--os-parameters-private", 985 dest="osparams_private", 986 type="keyprivateval", 987 default=serializer.PrivateDict(), 988 help="Private OS parameters" 989 " (won't be logged)") 990 991 OSPARAMS_SECRET_OPT = cli_option("--os-parameters-secret", 992 dest="osparams_secret", 993 type="keyprivateval", 994 default=serializer.PrivateDict(), 995 help="Secret OS parameters (won't be logged or" 996 " saved; you must supply these for every" 997 " operation.)") 998 999 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant", 1000 action="store_true", default=False, 1001 help="Force an unknown variant") 1002 1003 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install", 1004 action="store_true", default=False, 1005 help="Do not install the OS (will" 1006 " enable no-start)") 1007 1008 NORUNTIME_CHGS_OPT = cli_option("--no-runtime-changes", 1009 dest="allow_runtime_chgs", 1010 default=True, action="store_false", 1011 help="Don't allow runtime changes") 1012 1013 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams", 1014 type="keyval", default={}, 1015 help="Backend parameters") 1016 1017 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval", 1018 default={}, dest="hvparams", 1019 help="Hypervisor parameters") 1020 1021 DISK_PARAMS_OPT = cli_option("-D", "--disk-parameters", dest="diskparams", 1022 help="Disk template parameters, in the format" 1023 " template:option=value,option=value,...", 1024 type="identkeyval", action="append", default=[]) 1025 1026 SPECS_MEM_SIZE_OPT = cli_option("--specs-mem-size", dest="ispecs_mem_size", 1027 type="keyval", default={}, 1028 help="Memory size specs: list of key=value," 1029 " where key is one of min, max, std" 1030 " (in MB or using a unit)") 1031 1032 SPECS_CPU_COUNT_OPT = cli_option("--specs-cpu-count", dest="ispecs_cpu_count", 1033 type="keyval", default={}, 1034 help="CPU count specs: list of key=value," 1035 " where key is one of min, max, std") 1036 1037 SPECS_DISK_COUNT_OPT = cli_option("--specs-disk-count", 1038 dest="ispecs_disk_count", 1039 type="keyval", default={}, 1040 help="Disk count specs: list of key=value," 1041 " where key is one of min, max, std") 1042 1043 SPECS_DISK_SIZE_OPT = cli_option("--specs-disk-size", dest="ispecs_disk_size", 1044 type="keyval", default={}, 1045 help="Disk size specs: list of key=value," 1046 " where key is one of min, max, std" 1047 " (in MB or using a unit)") 1048 1049 SPECS_NIC_COUNT_OPT = cli_option("--specs-nic-count", dest="ispecs_nic_count", 1050 type="keyval", default={}, 1051 help="NIC count specs: list of key=value," 1052 " where key is one of min, max, std") 1053 1054 IPOLICY_BOUNDS_SPECS_STR = "--ipolicy-bounds-specs" 1055 IPOLICY_BOUNDS_SPECS_OPT = cli_option(IPOLICY_BOUNDS_SPECS_STR, 1056 dest="ipolicy_bounds_specs", 1057 type="multilistidentkeyval", default=None, 1058 help="Complete instance specs limits") 1059 1060 IPOLICY_STD_SPECS_STR = "--ipolicy-std-specs" 1061 IPOLICY_STD_SPECS_OPT = cli_option(IPOLICY_STD_SPECS_STR, 1062 dest="ipolicy_std_specs", 1063 type="keyval", default=None, 1064 help="Complete standard instance specs") 1065 1066 IPOLICY_DISK_TEMPLATES = cli_option("--ipolicy-disk-templates", 1067 dest="ipolicy_disk_templates", 1068 type="list", default=None, 1069 help="Comma-separated list of" 1070 " enabled disk templates") 1071 1072 IPOLICY_VCPU_RATIO = cli_option("--ipolicy-vcpu-ratio", 1073 dest="ipolicy_vcpu_ratio", 1074 type="maybefloat", default=None, 1075 help="The maximum allowed vcpu-to-cpu ratio") 1076 1077 IPOLICY_SPINDLE_RATIO = cli_option("--ipolicy-spindle-ratio", 1078 dest="ipolicy_spindle_ratio", 1079 type="maybefloat", default=None, 1080 help=("The maximum allowed instances to" 1081 " spindle ratio")) 1082 1083 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor", 1084 help="Hypervisor and hypervisor options, in the" 1085 " format hypervisor:option=value,option=value,...", 1086 default=None, type="identkeyval") 1087 1088 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams", 1089 help="Hypervisor and hypervisor options, in the" 1090 " format hypervisor:option=value,option=value,...", 1091 default=[], action="append", type="identkeyval") 1092 1093 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True, 1094 action="store_false", 1095 help="Don't check that the instance's IP" 1096 " is alive") 1097 1098 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check", 1099 default=True, action="store_false", 1100 help="Don't check that the instance's name" 1101 " is resolvable") 1102 1103 NET_OPT = cli_option("--net", 1104 help="NIC parameters", default=[], 1105 dest="nics", action="append", type="identkeyval") 1106 1107 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[], 1108 dest="disks", action="append", type="identkeyval") 1109 1110 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None, 1111 help="Comma-separated list of disks" 1112 " indices to act on (e.g. 0,2) (optional," 1113 " defaults to all disks)") 1114 1115 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size", 1116 help="Enforces a single-disk configuration using the" 1117 " given disk size, in MiB unless a suffix is used", 1118 default=None, type="unit", metavar="<size>") 1119 1120 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency", 1121 dest="ignore_consistency", 1122 action="store_true", default=False, 1123 help="Ignore the consistency of the disks on" 1124 " the secondary") 1125 1126 ALLOW_FAILOVER_OPT = cli_option("--allow-failover", 1127 dest="allow_failover", 1128 action="store_true", default=False, 1129 help="If migration is not possible fallback to" 1130 " failover") 1131 1132 FORCE_FAILOVER_OPT = cli_option("--force-failover", 1133 dest="force_failover", 1134 action="store_true", default=False, 1135 help="Do not use migration, always use" 1136 " failover") 1137 1138 NONLIVE_OPT = cli_option("--non-live", dest="live", 1139 default=True, action="store_false", 1140 help="Do a non-live migration (this usually means" 1141 " freeze the instance, save the state, transfer and" 1142 " only then resume running on the secondary node)") 1143 1144 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode", 1145 default=None, 1146 choices=list(constants.HT_MIGRATION_MODES), 1147 help="Override default migration mode (choose" 1148 " either live or non-live") 1149 1150 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node", 1151 help="Target node and optional secondary node", 1152 metavar="<pnode>[:<snode>]", 1153 completion_suggest=OPT_COMPL_INST_ADD_NODES) 1154 1155 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[], 1156 action="append", metavar="<node>", 1157 help="Use only this node (can be used multiple" 1158 " times, if not given defaults to all nodes)", 1159 completion_suggest=OPT_COMPL_ONE_NODE) 1160 1161 NODEGROUP_OPT_NAME = "--node-group" 1162 NODEGROUP_OPT = cli_option("-g", NODEGROUP_OPT_NAME, 1163 dest="nodegroup", 1164 help="Node group (name or uuid)", 1165 metavar="<nodegroup>", 1166 default=None, type="string", 1167 completion_suggest=OPT_COMPL_ONE_NODEGROUP) 1168 1169 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node", 1170 metavar="<node>", 1171 completion_suggest=OPT_COMPL_ONE_NODE) 1172 1173 NOSTART_OPT = cli_option("--no-start", dest="start", default=True, 1174 action="store_false", 1175 help="Don't start the instance after creation") 1176 1177 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command", 1178 action="store_true", default=False, 1179 help="Show command instead of executing it") 1180 1181 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup", 1182 default=False, action="store_true", 1183 help="Instead of performing the migration/failover," 1184 " try to recover from a failed cleanup. This is safe" 1185 " to run even if the instance is healthy, but it" 1186 " will create extra replication traffic and " 1187 " disrupt briefly the replication (like during the" 1188 " migration/failover") 1189 1190 STATIC_OPT = cli_option("-s", "--static", dest="static", 1191 action="store_true", default=False, 1192 help="Only show configuration data, not runtime data") 1193 1194 ALL_OPT = cli_option("--all", dest="show_all", 1195 default=False, action="store_true", 1196 help="Show info on all instances on the cluster." 1197 " This can take a long time to run, use wisely") 1198 1199 SELECT_OS_OPT = cli_option("--select-os", dest="select_os", 1200 action="store_true", default=False, 1201 help="Interactive OS reinstall, lists available" 1202 " OS templates for selection") 1203 1204 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures", 1205 action="store_true", default=False, 1206 help="Remove the instance from the cluster" 1207 " configuration even if there are failures" 1208 " during the removal process") 1209 1210 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures", 1211 dest="ignore_remove_failures", 1212 action="store_true", default=False, 1213 help="Remove the instance from the" 1214 " cluster configuration even if there" 1215 " are failures during the removal" 1216 " process") 1217 1218 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance", 1219 action="store_true", default=False, 1220 help="Remove the instance from the cluster") 1221 1222 DST_NODE_OPT = cli_option("-n", "--target-node", dest="dst_node", 1223 help="Specifies the new node for the instance", 1224 metavar="NODE", default=None, 1225 completion_suggest=OPT_COMPL_ONE_NODE) 1226 1227 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node", 1228 help="Specifies the new secondary node", 1229 metavar="NODE", default=None, 1230 completion_suggest=OPT_COMPL_ONE_NODE) 1231 1232 NEW_PRIMARY_OPT = cli_option("--new-primary", dest="new_primary_node", 1233 help="Specifies the new primary node", 1234 metavar="<node>", default=None, 1235 completion_suggest=OPT_COMPL_ONE_NODE) 1236 1237 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary", 1238 default=False, action="store_true", 1239 help="Replace the disk(s) on the primary" 1240 " node (applies only to internally mirrored" 1241 " disk templates, e.g. %s)" % 1242 utils.CommaJoin(constants.DTS_INT_MIRROR)) 1243 1244 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary", 1245 default=False, action="store_true", 1246 help="Replace the disk(s) on the secondary" 1247 " node (applies only to internally mirrored" 1248 " disk templates, e.g. %s)" % 1249 utils.CommaJoin(constants.DTS_INT_MIRROR)) 1250 1251 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote", 1252 default=False, action="store_true", 1253 help="Lock all nodes and auto-promote as needed" 1254 " to MC status") 1255 1256 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto", 1257 default=False, action="store_true", 1258 help="Automatically replace faulty disks" 1259 " (applies only to internally mirrored" 1260 " disk templates, e.g. %s)" % 1261 utils.CommaJoin(constants.DTS_INT_MIRROR)) 1262 1263 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size", 1264 default=False, action="store_true", 1265 help="Ignore current recorded size" 1266 " (useful for forcing activation when" 1267 " the recorded size is wrong)") 1268 1269 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node", 1270 metavar="<node>", 1271 completion_suggest=OPT_COMPL_ONE_NODE) 1272 1273 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory", 1274 metavar="<dir>") 1275 1276 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip", 1277 help="Specify the secondary ip for the node", 1278 metavar="ADDRESS", default=None) 1279 1280 READD_OPT = cli_option("--readd", dest="readd", 1281 default=False, action="store_true", 1282 help="Readd old node after replacing it") 1283 1284 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check", 1285 default=True, action="store_false", 1286 help="Disable SSH key fingerprint checking") 1287 1288 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join", 1289 default=False, action="store_true", 1290 help="Force the joining of a node") 1291 1292 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate", 1293 type="bool", default=None, metavar=_YORNO, 1294 help="Set the master_candidate flag on the node") 1295 1296 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO, 1297 type="bool", default=None, 1298 help=("Set the offline flag on the node" 1299 " (cluster does not communicate with offline" 1300 " nodes)")) 1301 1302 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO, 1303 type="bool", default=None, 1304 help=("Set the drained flag on the node" 1305 " (excluded from allocation operations)")) 1306 1307 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable", 1308 type="bool", default=None, metavar=_YORNO, 1309 help="Set the master_capable flag on the node") 1310 1311 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable", 1312 type="bool", default=None, metavar=_YORNO, 1313 help="Set the vm_capable flag on the node") 1314 1315 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable", 1316 type="bool", default=None, metavar=_YORNO, 1317 help="Set the allocatable flag on a volume") 1318 1319 ENABLED_HV_OPT = cli_option("--enabled-hypervisors", 1320 dest="enabled_hypervisors", 1321 help="Comma-separated list of hypervisors", 1322 type="string", default=None) 1323 1324 ENABLED_DISK_TEMPLATES_OPT = cli_option("--enabled-disk-templates", 1325 dest="enabled_disk_templates", 1326 help="Comma-separated list of " 1327 "disk templates", 1328 type="string", default=None) 1329 1330 ENABLED_USER_SHUTDOWN_OPT = cli_option("--user-shutdown", 1331 default=None, 1332 dest="enabled_user_shutdown", 1333 help="Whether user shutdown is enabled", 1334 type="bool") 1335 1336 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams", 1337 type="keyval", default={}, 1338 help="NIC parameters") 1339 1340 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None, 1341 dest="candidate_pool_size", type="int", 1342 help="Set the candidate pool size") 1343 1344 RQL_OPT = cli_option("--max-running-jobs", dest="max_running_jobs", 1345 type="int", help="Set the maximal number of jobs to " 1346 "run simultaneously") 1347 1348 MAX_TRACK_OPT = cli_option("--max-tracked-jobs", dest="max_tracked_jobs", 1349 type="int", help="Set the maximal number of jobs to " 1350 "be tracked simultaneously for " 1351 "scheduling") 1352 1353 COMPRESSION_TOOLS_OPT = \ 1354 cli_option("--compression-tools", 1355 dest="compression_tools", type="string", default=None, 1356 help="Comma-separated list of compression tools which are" 1357 " allowed to be used by Ganeti in various operations") 1358 1359 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name", 1360 help=("Enables LVM and specifies the volume group" 1361 " name (cluster-wide) for disk allocation" 1362 " [%s]" % constants.DEFAULT_VG), 1363 metavar="VG", default=None) 1364 1365 YES_DOIT_OPT = cli_option("--yes-do-it", "--ya-rly", dest="yes_do_it", 1366 help="Destroy cluster", action="store_true") 1367 1368 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting", 1369 help="Skip node agreement check (dangerous)", 1370 action="store_true", default=False) 1371 1372 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix", 1373 help="Specify the mac prefix for the instance IP" 1374 " addresses, in the format XX:XX:XX", 1375 metavar="PREFIX", 1376 default=None) 1377 1378 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev", 1379 help="Specify the node interface (cluster-wide)" 1380 " on which the master IP address will be added" 1381 " (cluster init default: %s)" % 1382 constants.DEFAULT_BRIDGE, 1383 metavar="NETDEV", 1384 default=None) 1385 1386 MASTER_NETMASK_OPT = cli_option("--master-netmask", dest="master_netmask", 1387 help="Specify the netmask of the master IP", 1388 metavar="NETMASK", 1389 default=None) 1390 1391 USE_EXTERNAL_MIP_SCRIPT = cli_option("--use-external-mip-script", 1392 dest="use_external_mip_script", 1393 help="Specify whether to run a" 1394 " user-provided script for the master" 1395 " IP address turnup and" 1396 " turndown operations", 1397 type="bool", metavar=_YORNO, default=None) 1398 1399 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", 1400 help="Specify the default directory (cluster-" 1401 "wide) for storing the file-based disks [%s]" % 1402 pathutils.DEFAULT_FILE_STORAGE_DIR, 1403 metavar="DIR", 1404 default=None) 1405 1406 GLOBAL_SHARED_FILEDIR_OPT = cli_option( 1407 "--shared-file-storage-dir", 1408 dest="shared_file_storage_dir", 1409 help="Specify the default directory (cluster-wide) for storing the" 1410 " shared file-based disks [%s]" % 1411 pathutils.DEFAULT_SHARED_FILE_STORAGE_DIR, 1412 metavar="SHAREDDIR", default=None) 1413 1414 GLOBAL_GLUSTER_FILEDIR_OPT = cli_option( 1415 "--gluster-storage-dir", 1416 dest="gluster_storage_dir", 1417 help="Specify the default directory (cluster-wide) for mounting Gluster" 1418 " file systems [%s]" % 1419 pathutils.DEFAULT_GLUSTER_STORAGE_DIR, 1420 metavar="GLUSTERDIR", 1421 default=pathutils.DEFAULT_GLUSTER_STORAGE_DIR) 1422 1423 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts", 1424 help="Don't modify %s" % pathutils.ETC_HOSTS, 1425 action="store_false", default=True) 1426 1427 MODIFY_ETCHOSTS_OPT = \ 1428 cli_option("--modify-etc-hosts", dest="modify_etc_hosts", metavar=_YORNO, 1429 default=None, type="bool", 1430 help="Defines whether the cluster should autonomously modify" 1431 " and keep in sync the /etc/hosts file of the nodes") 1432 1433 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup", 1434 help="Don't initialize SSH keys", 1435 action="store_false", default=True) 1436 1437 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes", 1438 help="Enable parseable error messages", 1439 action="store_true", default=False) 1440 1441 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem", 1442 help="Skip N+1 memory redundancy tests", 1443 action="store_true", default=False) 1444 1445 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type", 1446 help="Type of reboot: soft/hard/full", 1447 default=constants.INSTANCE_REBOOT_HARD, 1448 metavar="<REBOOT>", 1449 choices=list(constants.REBOOT_TYPES)) 1450 1451 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries", 1452 dest="ignore_secondaries", 1453 default=False, action="store_true", 1454 help="Ignore errors from secondaries") 1455 1456 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown", 1457 action="store_false", default=True, 1458 help="Don't shutdown the instance (unsafe)") 1459 1460 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int", 1461 default=constants.DEFAULT_SHUTDOWN_TIMEOUT, 1462 help="Maximum time to wait") 1463 1464 COMPRESS_OPT = cli_option("--compress", dest="compress", 1465 type="string", default=constants.IEC_NONE, 1466 help="The compression mode to use") 1467 1468 TRANSPORT_COMPRESSION_OPT = \ 1469 cli_option("--transport-compression", dest="transport_compression", 1470 type="string", default=constants.IEC_NONE, 1471 help="The compression mode to use during transport") 1472 1473 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout", 1474 dest="shutdown_timeout", type="int", 1475 default=constants.DEFAULT_SHUTDOWN_TIMEOUT, 1476 help="Maximum time to wait for instance" 1477 " shutdown") 1478 1479 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int", 1480 default=None, 1481 help=("Number of seconds between repetions of the" 1482 " command")) 1483 1484 EARLY_RELEASE_OPT = cli_option("--early-release", 1485 dest="early_release", default=False, 1486 action="store_true", 1487 help="Release the locks on the secondary" 1488 " node(s) early") 1489 1490 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate", 1491 dest="new_cluster_cert", 1492 default=False, action="store_true", 1493 help="Generate a new cluster certificate") 1494 1495 NEW_NODE_CERT_OPT = cli_option( 1496 "--new-node-certificates", dest="new_node_cert", default=False, 1497 action="store_true", help="Generate new node certificates (for all nodes)") 1498 1499 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert", 1500 default=None, 1501 help="File containing new RAPI certificate") 1502 1503 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert", 1504 default=None, action="store_true", 1505 help=("Generate a new self-signed RAPI" 1506 " certificate")) 1507 1508 SPICE_CERT_OPT = cli_option("--spice-certificate", dest="spice_cert", 1509 default=None, 1510 help="File containing new SPICE certificate") 1511 1512 SPICE_CACERT_OPT = cli_option("--spice-ca-certificate", dest="spice_cacert", 1513 default=None, 1514 help="File containing the certificate of the CA" 1515 " which signed the SPICE certificate") 1516 1517 NEW_SPICE_CERT_OPT = cli_option("--new-spice-certificate", 1518 dest="new_spice_cert", default=None, 1519 action="store_true", 1520 help=("Generate a new self-signed SPICE" 1521 " certificate")) 1522 1523 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key", 1524 dest="new_confd_hmac_key", 1525 default=False, action="store_true", 1526 help=("Create a new HMAC key for %s" % 1527 constants.CONFD)) 1528 1529 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret", 1530 dest="cluster_domain_secret", 1531 default=None, 1532 help=("Load new new cluster domain" 1533 " secret from file")) 1534 1535 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret", 1536 dest="new_cluster_domain_secret", 1537 default=False, action="store_true", 1538 help=("Create a new cluster domain" 1539 " secret")) 1540 1541 USE_REPL_NET_OPT = cli_option("--use-replication-network", 1542 dest="use_replication_network", 1543 help="Whether to use the replication network" 1544 " for talking to the nodes", 1545 action="store_true", default=False) 1546 1547 MAINTAIN_NODE_HEALTH_OPT = \ 1548 cli_option("--maintain-node-health", dest="maintain_node_health", 1549 metavar=_YORNO, default=None, type="bool", 1550 help="Configure the cluster to automatically maintain node" 1551 " health, by shutting down unknown instances, shutting down" 1552 " unknown DRBD devices, etc.") 1553 1554 IDENTIFY_DEFAULTS_OPT = \ 1555 cli_option("--identify-defaults", dest="identify_defaults", 1556 default=False, action="store_true", 1557 help="Identify which saved instance parameters are equal to" 1558 " the current cluster defaults and set them as such, instead" 1559 " of marking them as overridden") 1560 1561 UIDPOOL_OPT = cli_option("--uid-pool", default=None, 1562 action="store", dest="uid_pool", 1563 help=("A list of user-ids or user-id" 1564 " ranges separated by commas")) 1565 1566 ADD_UIDS_OPT = cli_option("--add-uids", default=None, 1567 action="store", dest="add_uids", 1568 help=("A list of user-ids or user-id" 1569 " ranges separated by commas, to be" 1570 " added to the user-id pool")) 1571 1572 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None, 1573 action="store", dest="remove_uids", 1574 help=("A list of user-ids or user-id" 1575 " ranges separated by commas, to be" 1576 " removed from the user-id pool")) 1577 1578 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None, 1579 action="store", dest="reserved_lvs", 1580 help=("A comma-separated list of reserved" 1581 " logical volumes names, that will be" 1582 " ignored by cluster verify")) 1583 1584 ROMAN_OPT = cli_option("--roman", 1585 dest="roman_integers", default=False, 1586 action="store_true", 1587 help="Use roman numbers for positive integers") 1588 1589 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper", 1590 action="store", default=None, 1591 help="Specifies usermode helper for DRBD") 1592 1593 PRIMARY_IP_VERSION_OPT = \ 1594 cli_option("--primary-ip-version", default=constants.IP4_VERSION, 1595 action="store", dest="primary_ip_version", 1596 metavar="%d|%d" % (constants.IP4_VERSION, 1597 constants.IP6_VERSION), 1598 help="Cluster-wide IP version for primary IP") 1599 1600 SHOW_MACHINE_OPT = cli_option("-M", "--show-machine-names", default=False, 1601 action="store_true", 1602 help="Show machine name for every line in output") 1603 1604 FAILURE_ONLY_OPT = cli_option("--failure-only", default=False, 1605 action="store_true", 1606 help=("Hide successful results and show failures" 1607 " only (determined by the exit code)")) 1608 1609 REASON_OPT = cli_option("--reason", default=None, 1610 help="The reason for executing the command")
1611 1612 1613 -def _PriorityOptionCb(option, _, value, parser):
1614 """Callback for processing C{--priority} option. 1615 1616 """ 1617 value = _PRIONAME_TO_VALUE[value] 1618 1619 setattr(parser.values, option.dest, value)
1620 1621 1622 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority", 1623 metavar="|".join(name for name, _ in _PRIORITY_NAMES), 1624 choices=_PRIONAME_TO_VALUE.keys(), 1625 action="callback", type="choice", 1626 callback=_PriorityOptionCb, 1627 help="Priority for opcode processing") 1628 1629 OPPORTUNISTIC_OPT = cli_option("--opportunistic-locking", 1630 dest="opportunistic_locking", 1631 action="store_true", default=False, 1632 help="Opportunistically acquire locks") 1633 1634 HID_OS_OPT = cli_option("--hidden", dest="hidden", 1635 type="bool", default=None, metavar=_YORNO, 1636 help="Sets the hidden flag on the OS") 1637 1638 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted", 1639 type="bool", default=None, metavar=_YORNO, 1640 help="Sets the blacklisted flag on the OS") 1641 1642 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None, 1643 type="bool", metavar=_YORNO, 1644 dest="prealloc_wipe_disks", 1645 help=("Wipe disks prior to instance" 1646 " creation")) 1647 1648 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams", 1649 type="keyval", default=None, 1650 help="Node parameters") 1651 1652 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy", 1653 action="store", metavar="POLICY", default=None, 1654 help="Allocation policy for the node group") 1655 1656 NODE_POWERED_OPT = cli_option("--node-powered", default=None, 1657 type="bool", metavar=_YORNO, 1658 dest="node_powered", 1659 help="Specify if the SoR for node is powered") 1660 1661 OOB_TIMEOUT_OPT = cli_option("--oob-timeout", dest="oob_timeout", type="int", 1662 default=constants.OOB_TIMEOUT, 1663 help="Maximum time to wait for out-of-band helper") 1664 1665 POWER_DELAY_OPT = cli_option("--power-delay", dest="power_delay", type="float", 1666 default=constants.OOB_POWER_DELAY, 1667 help="Time in seconds to wait between power-ons") 1668 1669 FORCE_FILTER_OPT = cli_option("-F", "--filter", dest="force_filter", 1670 action="store_true", default=False, 1671 help=("Whether command argument should be treated" 1672 " as filter")) 1673 1674 NO_REMEMBER_OPT = cli_option("--no-remember", 1675 dest="no_remember", 1676 action="store_true", default=False, 1677 help="Perform but do not record the change" 1678 " in the configuration") 1679 1680 PRIMARY_ONLY_OPT = cli_option("-p", "--primary-only", 1681 default=False, action="store_true", 1682 help="Evacuate primary instances only") 1683 1684 SECONDARY_ONLY_OPT = cli_option("-s", "--secondary-only", 1685 default=False, action="store_true", 1686 help="Evacuate secondary instances only" 1687 " (applies only to internally mirrored" 1688 " disk templates, e.g. %s)" % 1689 utils.CommaJoin(constants.DTS_INT_MIRROR)) 1690 1691 STARTUP_PAUSED_OPT = cli_option("--paused", dest="startup_paused", 1692 action="store_true", default=False, 1693 help="Pause instance at startup") 1694 1695 TO_GROUP_OPT = cli_option("--to", dest="to", metavar="<group>", 1696 help="Destination node group (name or uuid)", 1697 default=None, action="append", 1698 completion_suggest=OPT_COMPL_ONE_NODEGROUP) 1699 1700 IGNORE_ERRORS_OPT = cli_option("-I", "--ignore-errors", default=[], 1701 action="append", dest="ignore_errors", 1702 choices=list(constants.CV_ALL_ECODES_STRINGS), 1703 help="Error code to be ignored") 1704 1705 DISK_STATE_OPT = cli_option("--disk-state", default=[], dest="disk_state", 1706 action="append", 1707 help=("Specify disk state information in the" 1708 " format" 1709 " storage_type/identifier:option=value,...;" 1710 " note this is unused for now"), 1711 type="identkeyval") 1712 1713 HV_STATE_OPT = cli_option("--hypervisor-state", default=[], dest="hv_state", 1714 action="append", 1715 help=("Specify hypervisor state information in the" 1716 " format hypervisor:option=value,...;" 1717 " note this is unused for now"), 1718 type="identkeyval") 1719 1720 IGNORE_IPOLICY_OPT = cli_option("--ignore-ipolicy", dest="ignore_ipolicy", 1721 action="store_true", default=False, 1722 help="Ignore instance policy violations") 1723 1724 RUNTIME_MEM_OPT = cli_option("-m", "--runtime-memory", dest="runtime_mem", 1725 help="Sets the instance's runtime memory," 1726 " ballooning it up or down to the new value", 1727 default=None, type="unit", metavar="<size>") 1728 1729 ABSOLUTE_OPT = cli_option("--absolute", dest="absolute", 1730 action="store_true", default=False, 1731 help="Marks the grow as absolute instead of the" 1732 " (default) relative mode") 1733 1734 NETWORK_OPT = cli_option("--network", 1735 action="store", default=None, dest="network", 1736 help="IP network in CIDR notation") 1737 1738 GATEWAY_OPT = cli_option("--gateway", 1739 action="store", default=None, dest="gateway", 1740 help="IP address of the router (gateway)") 1741 1742 ADD_RESERVED_IPS_OPT = cli_option("--add-reserved-ips", 1743 action="store", default=None, 1744 dest="add_reserved_ips", 1745 help="Comma-separated list of" 1746 " reserved IPs to add") 1747 1748 REMOVE_RESERVED_IPS_OPT = cli_option("--remove-reserved-ips", 1749 action="store", default=None, 1750 dest="remove_reserved_ips", 1751 help="Comma-delimited list of" 1752 " reserved IPs to remove") 1753 1754 NETWORK6_OPT = cli_option("--network6", 1755 action="store", default=None, dest="network6", 1756 help="IP network in CIDR notation") 1757 1758 GATEWAY6_OPT = cli_option("--gateway6", 1759 action="store", default=None, dest="gateway6", 1760 help="IP6 address of the router (gateway)") 1761 1762 NOCONFLICTSCHECK_OPT = cli_option("--no-conflicts-check", 1763 dest="conflicts_check", 1764 default=True, 1765 action="store_false", 1766 help="Don't check for conflicting IPs") 1767 1768 INCLUDEDEFAULTS_OPT = cli_option("--include-defaults", dest="include_defaults", 1769 default=False, action="store_true", 1770 help="Include default values") 1771 1772 HOTPLUG_OPT = cli_option("--hotplug", dest="hotplug", 1773 action="store_true", default=False, 1774 help="Hotplug supported devices (NICs and Disks)") 1775 1776 HOTPLUG_IF_POSSIBLE_OPT = cli_option("--hotplug-if-possible", 1777 dest="hotplug_if_possible", 1778 action="store_true", default=False, 1779 help="Hotplug devices in case" 1780 " hotplug is supported") 1781 1782 INSTALL_IMAGE_OPT = \ 1783 cli_option("--install-image", 1784 dest="install_image", 1785 action="store", 1786 type="string", 1787 default=None, 1788 help="The OS image to use for running the OS scripts safely") 1789 1790 INSTANCE_COMMUNICATION_OPT = \ 1791 cli_option("-c", "--communication", 1792 dest="instance_communication", 1793 help=constants.INSTANCE_COMMUNICATION_DOC, 1794 type="bool") 1795 1796 INSTANCE_COMMUNICATION_NETWORK_OPT = \ 1797 cli_option("--instance-communication-network", 1798 dest="instance_communication_network", 1799 type="string", 1800 help="Set the network name for instance communication") 1801 1802 ZEROING_IMAGE_OPT = \ 1803 cli_option("--zeroing-image", 1804 dest="zeroing_image", action="store", default=None, 1805 help="The OS image to use to zero instance disks") 1806 1807 ZERO_FREE_SPACE_OPT = \ 1808 cli_option("--zero-free-space", 1809 dest="zero_free_space", action="store_true", default=False, 1810 help="Whether to zero the free space on the disks of the " 1811 "instance prior to the export") 1812 1813 HELPER_STARTUP_TIMEOUT_OPT = \ 1814 cli_option("--helper-startup-timeout", 1815 dest="helper_startup_timeout", action="store", type="int", 1816 help="Startup timeout for the helper VM") 1817 1818 HELPER_SHUTDOWN_TIMEOUT_OPT = \ 1819 cli_option("--helper-shutdown-timeout", 1820 dest="helper_shutdown_timeout", action="store", type="int", 1821 help="Shutdown timeout for the helper VM") 1822 1823 ZEROING_TIMEOUT_FIXED_OPT = \ 1824 cli_option("--zeroing-timeout-fixed", 1825 dest="zeroing_timeout_fixed", action="store", type="int", 1826 help="The fixed amount of time to wait before assuming that the " 1827 "zeroing failed") 1828 1829 ZEROING_TIMEOUT_PER_MIB_OPT = \ 1830 cli_option("--zeroing-timeout-per-mib", 1831 dest="zeroing_timeout_per_mib", action="store", type="float", 1832 help="The amount of time to wait per MiB of data to zero, in " 1833 "addition to the fixed timeout") 1834 1835 #: Options provided by all commands 1836 COMMON_OPTS = [DEBUG_OPT, REASON_OPT] 1837 1838 # options related to asynchronous job handling 1839 1840 SUBMIT_OPTS = [ 1841 SUBMIT_OPT, 1842 PRINT_JOBID_OPT, 1843 ] 1844 1845 # common options for creating instances. add and import then add their own 1846 # specific ones. 1847 COMMON_CREATE_OPTS = [ 1848 BACKEND_OPT, 1849 DISK_OPT, 1850 DISK_TEMPLATE_OPT, 1851 FILESTORE_DIR_OPT, 1852 FILESTORE_DRIVER_OPT, 1853 HYPERVISOR_OPT, 1854 IALLOCATOR_OPT, 1855 NET_OPT, 1856 NODE_PLACEMENT_OPT, 1857 NOIPCHECK_OPT, 1858 NOCONFLICTSCHECK_OPT, 1859 NONAMECHECK_OPT, 1860 NONICS_OPT, 1861 NWSYNC_OPT, 1862 OSPARAMS_OPT, 1863 OSPARAMS_PRIVATE_OPT, 1864 OSPARAMS_SECRET_OPT, 1865 OS_SIZE_OPT, 1866 OPPORTUNISTIC_OPT, 1867 SUBMIT_OPT, 1868 PRINT_JOBID_OPT, 1869 TAG_ADD_OPT, 1870 DRY_RUN_OPT, 1871 PRIORITY_OPT, 1872 ] 1873 1874 # common instance policy options 1875 INSTANCE_POLICY_OPTS = [ 1876 IPOLICY_BOUNDS_SPECS_OPT, 1877 IPOLICY_DISK_TEMPLATES, 1878 IPOLICY_VCPU_RATIO, 1879 IPOLICY_SPINDLE_RATIO, 1880 ] 1881 1882 # instance policy split specs options 1883 SPLIT_ISPECS_OPTS = [ 1884 SPECS_CPU_COUNT_OPT, 1885 SPECS_DISK_COUNT_OPT, 1886 SPECS_DISK_SIZE_OPT, 1887 SPECS_MEM_SIZE_OPT, 1888 SPECS_NIC_COUNT_OPT, 1889 ]
1890 1891 1892 -class _ShowUsage(Exception):
1893 """Exception class for L{_ParseArgs}. 1894 1895 """
1896 - def __init__(self, exit_error):
1897 """Initializes instances of this class. 1898 1899 @type exit_error: bool 1900 @param exit_error: Whether to report failure on exit 1901 1902 """ 1903 Exception.__init__(self) 1904 self.exit_error = exit_error
1905
1906 1907 -class _ShowVersion(Exception):
1908 """Exception class for L{_ParseArgs}. 1909 1910 """
1911
1912 1913 -def _ParseArgs(binary, argv, commands, aliases, env_override):
1914 """Parser for the command line arguments. 1915 1916 This function parses the arguments and returns the function which 1917 must be executed together with its (modified) arguments. 1918 1919 @param binary: Script name 1920 @param argv: Command line arguments 1921 @param commands: Dictionary containing command definitions 1922 @param aliases: dictionary with command aliases {"alias": "target", ...} 1923 @param env_override: list of env variables allowed for default args 1924 @raise _ShowUsage: If usage description should be shown 1925 @raise _ShowVersion: If version should be shown 1926 1927 """ 1928 assert not (env_override - set(commands)) 1929 assert not (set(aliases.keys()) & set(commands.keys())) 1930 1931 if len(argv) > 1: 1932 cmd = argv[1] 1933 else: 1934 # No option or command given 1935 raise _ShowUsage(exit_error=True) 1936 1937 if cmd == "--version": 1938 raise _ShowVersion() 1939 elif cmd == "--help": 1940 raise _ShowUsage(exit_error=False) 1941 elif not (cmd in commands or cmd in aliases): 1942 raise _ShowUsage(exit_error=True) 1943 1944 # get command, unalias it, and look it up in commands 1945 if cmd in aliases: 1946 if aliases[cmd] not in commands: 1947 raise errors.ProgrammerError("Alias '%s' maps to non-existing" 1948 " command '%s'" % (cmd, aliases[cmd])) 1949 1950 cmd = aliases[cmd] 1951 1952 if cmd in env_override: 1953 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper() 1954 env_args = os.environ.get(args_env_name) 1955 if env_args: 1956 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args)) 1957 1958 func, args_def, parser_opts, usage, description = commands[cmd] 1959 parser = OptionParser(option_list=parser_opts + COMMON_OPTS, 1960 description=description, 1961 formatter=TitledHelpFormatter(), 1962 usage="%%prog %s %s" % (cmd, usage)) 1963 parser.disable_interspersed_args() 1964 options, args = parser.parse_args(args=argv[2:]) 1965 1966 if not _CheckArguments(cmd, args_def, args): 1967 return None, None, None 1968 1969 return func, options, args
1970
1971 1972 -def _FormatUsage(binary, commands):
1973 """Generates a nice description of all commands. 1974 1975 @param binary: Script name 1976 @param commands: Dictionary containing command definitions 1977 1978 """ 1979 # compute the max line length for cmd + usage 1980 mlen = min(60, max(map(len, commands))) 1981 1982 yield "Usage: %s {command} [options...] [argument...]" % binary 1983 yield "%s <command> --help to see details, or man %s" % (binary, binary) 1984 yield "" 1985 yield "Commands:" 1986 1987 # and format a nice command list 1988 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()): 1989 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen) 1990 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0)) 1991 for line in help_lines: 1992 yield " %-*s %s" % (mlen, "", line) 1993 1994 yield ""
1995
1996 1997 -def _CheckArguments(cmd, args_def, args):
1998 """Verifies the arguments using the argument definition. 1999 2000 Algorithm: 2001 2002 1. Abort with error if values specified by user but none expected. 2003 2004 1. For each argument in definition 2005 2006 1. Keep running count of minimum number of values (min_count) 2007 1. Keep running count of maximum number of values (max_count) 2008 1. If it has an unlimited number of values 2009 2010 1. Abort with error if it's not the last argument in the definition 2011 2012 1. If last argument has limited number of values 2013 2014 1. Abort with error if number of values doesn't match or is too large 2015 2016 1. Abort with error if user didn't pass enough values (min_count) 2017 2018 """ 2019 if args and not args_def: 2020 ToStderr("Error: Command %s expects no arguments", cmd) 2021 return False 2022 2023 min_count = None 2024 max_count = None 2025 check_max = None 2026 2027 last_idx = len(args_def) - 1 2028 2029 for idx, arg in enumerate(args_def): 2030 if min_count is None: 2031 min_count = arg.min 2032 elif arg.min is not None: 2033 min_count += arg.min 2034 2035 if max_count is None: 2036 max_count = arg.max 2037 elif arg.max is not None: 2038 max_count += arg.max 2039 2040 if idx == last_idx: 2041 check_max = (arg.max is not None) 2042 2043 elif arg.max is None: 2044 raise errors.ProgrammerError("Only the last argument can have max=None") 2045 2046 if check_max: 2047 # Command with exact number of arguments 2048 if (min_count is not None and max_count is not None and 2049 min_count == max_count and len(args) != min_count): 2050 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count) 2051 return False 2052 2053 # Command with limited number of arguments 2054 if max_count is not None and len(args) > max_count: 2055 ToStderr("Error: Command %s expects only %d argument(s)", 2056 cmd, max_count) 2057 return False 2058 2059 # Command with some required arguments 2060 if min_count is not None and len(args) < min_count: 2061 ToStderr("Error: Command %s expects at least %d argument(s)", 2062 cmd, min_count) 2063 return False 2064 2065 return True
2066
2067 2068 -def SplitNodeOption(value):
2069 """Splits the value of a --node option. 2070 2071 """ 2072 if value and ":" in value: 2073 return value.split(":", 1) 2074 else: 2075 return (value, None)
2076
2077 2078 -def CalculateOSNames(os_name, os_variants):
2079 """Calculates all the names an OS can be called, according to its variants. 2080 2081 @type os_name: string 2082 @param os_name: base name of the os 2083 @type os_variants: list or None 2084 @param os_variants: list of supported variants 2085 @rtype: list 2086 @return: list of valid names 2087 2088 """ 2089 if os_variants: 2090 return ["%s+%s" % (os_name, v) for v in os_variants] 2091 else: 2092 return [os_name]
2093
2094 2095 -def ParseFields(selected, default):
2096 """Parses the values of "--field"-like options. 2097 2098 @type selected: string or None 2099 @param selected: User-selected options 2100 @type default: list 2101 @param default: Default fields 2102 2103 """ 2104 if selected is None: 2105 return default 2106 2107 if selected.startswith("+"): 2108 return default + selected[1:].split(",") 2109 2110 return selected.split(",")
2111 2112 2113 UsesRPC = rpc.RunWithRPC
2114 2115 2116 -def AskUser(text, choices=None):
2117 """Ask the user a question. 2118 2119 @param text: the question to ask 2120 2121 @param choices: list with elements tuples (input_char, return_value, 2122 description); if not given, it will default to: [('y', True, 2123 'Perform the operation'), ('n', False, 'Do no do the operation')]; 2124 note that the '?' char is reserved for help 2125 2126 @return: one of the return values from the choices list; if input is 2127 not possible (i.e. not running with a tty, we return the last 2128 entry from the list 2129 2130 """ 2131 if choices is None: 2132 choices = [("y", True, "Perform the operation"), 2133 ("n", False, "Do not perform the operation")] 2134 if not choices or not isinstance(choices, list): 2135 raise errors.ProgrammerError("Invalid choices argument to AskUser") 2136 for entry in choices: 2137 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?": 2138 raise errors.ProgrammerError("Invalid choices element to AskUser") 2139 2140 answer = choices[-1][1] 2141 new_text = [] 2142 for line in text.splitlines(): 2143 new_text.append(textwrap.fill(line, 70, replace_whitespace=False)) 2144 text = "\n".join(new_text) 2145 try: 2146 f = file("/dev/tty", "a+") 2147 except IOError: 2148 return answer 2149 try: 2150 chars = [entry[0] for entry in choices] 2151 chars[-1] = "[%s]" % chars[-1] 2152 chars.append("?") 2153 maps = dict([(entry[0], entry[1]) for entry in choices]) 2154 while True: 2155 f.write(text) 2156 f.write("\n") 2157 f.write("/".join(chars)) 2158 f.write(": ") 2159 line = f.readline(2).strip().lower() 2160 if line in maps: 2161 answer = maps[line] 2162 break 2163 elif line == "?": 2164 for entry in choices: 2165 f.write(" %s - %s\n" % (entry[0], entry[2])) 2166 f.write("\n") 2167 continue 2168 finally: 2169 f.close() 2170 return answer
2171
2172 2173 -class JobSubmittedException(Exception):
2174 """Job was submitted, client should exit. 2175 2176 This exception has one argument, the ID of the job that was 2177 submitted. The handler should print this ID. 2178 2179 This is not an error, just a structured way to exit from clients. 2180 2181 """
2182
2183 2184 -def SendJob(ops, cl=None):
2185 """Function to submit an opcode without waiting for the results. 2186 2187 @type ops: list 2188 @param ops: list of opcodes 2189 @type cl: luxi.Client 2190 @param cl: the luxi client to use for communicating with the master; 2191 if None, a new client will be created 2192 2193 """ 2194 if cl is None: 2195 cl = GetClient() 2196 2197 job_id = cl.SubmitJob(ops) 2198 2199 return job_id
2200
2201 2202 -def GenericPollJob(job_id, cbs, report_cbs):
2203 """Generic job-polling function. 2204 2205 @type job_id: number 2206 @param job_id: Job ID 2207 @type cbs: Instance of L{JobPollCbBase} 2208 @param cbs: Data callbacks 2209 @type report_cbs: Instance of L{JobPollReportCbBase} 2210 @param report_cbs: Reporting callbacks 2211 2212 """ 2213 prev_job_info = None 2214 prev_logmsg_serial = None 2215 2216 status = None 2217 2218 while True: 2219 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info, 2220 prev_logmsg_serial) 2221 if not result: 2222 # job not found, go away! 2223 raise errors.JobLost("Job with id %s lost" % job_id) 2224 2225 if result == constants.JOB_NOTCHANGED: 2226 report_cbs.ReportNotChanged(job_id, status) 2227 2228 # Wait again 2229 continue 2230 2231 # Split result, a tuple of (field values, log entries) 2232 (job_info, log_entries) = result 2233 (status, ) = job_info 2234 2235 if log_entries: 2236 for log_entry in log_entries: 2237 (serial, timestamp, log_type, message) = log_entry 2238 report_cbs.ReportLogMessage(job_id, serial, timestamp, 2239 log_type, message) 2240 prev_logmsg_serial = max(prev_logmsg_serial, serial) 2241 2242 # TODO: Handle canceled and archived jobs 2243 elif status in (constants.JOB_STATUS_SUCCESS, 2244 constants.JOB_STATUS_ERROR, 2245 constants.JOB_STATUS_CANCELING, 2246 constants.JOB_STATUS_CANCELED): 2247 break 2248 2249 prev_job_info = job_info 2250 2251 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"]) 2252 if not jobs: 2253 raise errors.JobLost("Job with id %s lost" % job_id) 2254 2255 status, opstatus, result = jobs[0] 2256 2257 if status == constants.JOB_STATUS_SUCCESS: 2258 return result 2259 2260 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED): 2261 raise errors.OpExecError("Job was canceled") 2262 2263 has_ok = False 2264 for idx, (status, msg) in enumerate(zip(opstatus, result)): 2265 if status == constants.OP_STATUS_SUCCESS: 2266 has_ok = True 2267 elif status == constants.OP_STATUS_ERROR: 2268 errors.MaybeRaise(msg) 2269 2270 if has_ok: 2271 raise errors.OpExecError("partial failure (opcode %d): %s" % 2272 (idx, msg)) 2273 2274 raise errors.OpExecError(str(msg)) 2275 2276 # default failure mode 2277 raise errors.OpExecError(result)
2278
2279 2280 -class JobPollCbBase(object):
2281 """Base class for L{GenericPollJob} callbacks. 2282 2283 """
2284 - def __init__(self):
2285 """Initializes this class. 2286 2287 """
2288
2289 - def WaitForJobChangeOnce(self, job_id, fields, 2290 prev_job_info, prev_log_serial):
2291 """Waits for changes on a job. 2292 2293 """ 2294 raise NotImplementedError()
2295
2296 - def QueryJobs(self, job_ids, fields):
2297 """Returns the selected fields for the selected job IDs. 2298 2299 @type job_ids: list of numbers 2300 @param job_ids: Job IDs 2301 @type fields: list of strings 2302 @param fields: Fields 2303 2304 """ 2305 raise NotImplementedError()
2306
2307 2308 -class JobPollReportCbBase(object):
2309 """Base class for L{GenericPollJob} reporting callbacks. 2310 2311 """
2312 - def __init__(self):
2313 """Initializes this class. 2314 2315 """
2316
2317 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2318 """Handles a log message. 2319 2320 """ 2321 raise NotImplementedError()
2322
2323 - def ReportNotChanged(self, job_id, status):
2324 """Called for if a job hasn't changed in a while. 2325 2326 @type job_id: number 2327 @param job_id: Job ID 2328 @type status: string or None 2329 @param status: Job status if available 2330 2331 """ 2332 raise NotImplementedError()
2333
2334 2335 -class _LuxiJobPollCb(JobPollCbBase):
2336 - def __init__(self, cl):
2337 """Initializes this class. 2338 2339 """ 2340 JobPollCbBase.__init__(self) 2341 self.cl = cl
2342
2343 - def WaitForJobChangeOnce(self, job_id, fields, 2344 prev_job_info, prev_log_serial):
2345 """Waits for changes on a job. 2346 2347 """ 2348 return self.cl.WaitForJobChangeOnce(job_id, fields, 2349 prev_job_info, prev_log_serial)
2350
2351 - def QueryJobs(self, job_ids, fields):
2352 """Returns the selected fields for the selected job IDs. 2353 2354 """ 2355 return self.cl.QueryJobs(job_ids, fields)
2356
2357 2358 -class FeedbackFnJobPollReportCb(JobPollReportCbBase):
2359 - def __init__(self, feedback_fn):
2360 """Initializes this class. 2361 2362 """ 2363 JobPollReportCbBase.__init__(self) 2364 2365 self.feedback_fn = feedback_fn 2366 2367 assert callable(feedback_fn)
2368
2369 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2370 """Handles a log message. 2371 2372 """ 2373 self.feedback_fn((timestamp, log_type, log_msg))
2374
2375 - def ReportNotChanged(self, job_id, status):
2376 """Called if a job hasn't changed in a while. 2377 2378 """
2379 # Ignore
2380 2381 2382 -class StdioJobPollReportCb(JobPollReportCbBase):
2383 - def __init__(self):
2384 """Initializes this class. 2385 2386 """ 2387 JobPollReportCbBase.__init__(self) 2388 2389 self.notified_queued = False 2390 self.notified_waitlock = False
2391
2392 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
2393 """Handles a log message. 2394 2395 """ 2396 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)), 2397 FormatLogMessage(log_type, log_msg))
2398
2399 - def ReportNotChanged(self, job_id, status):
2400 """Called if a job hasn't changed in a while. 2401 2402 """ 2403 if status is None: 2404 return 2405 2406 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued: 2407 ToStderr("Job %s is waiting in queue", job_id) 2408 self.notified_queued = True 2409 2410 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock: 2411 ToStderr("Job %s is trying to acquire all necessary locks", job_id) 2412 self.notified_waitlock = True
2413
2414 2415 -def FormatLogMessage(log_type, log_msg):
2416 """Formats a job message according to its type. 2417 2418 """ 2419 if log_type != constants.ELOG_MESSAGE: 2420 log_msg = str(log_msg) 2421 2422 return utils.SafeEncode(log_msg)
2423
2424 2425 -def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
2426 """Function to poll for the result of a job. 2427 2428 @type job_id: job identified 2429 @param job_id: the job to poll for results 2430 @type cl: luxi.Client 2431 @param cl: the luxi client to use for communicating with the master; 2432 if None, a new client will be created 2433 2434 """ 2435 if cl is None: 2436 cl = GetClient() 2437 2438 if reporter is None: 2439 if feedback_fn: 2440 reporter = FeedbackFnJobPollReportCb(feedback_fn) 2441 else: 2442 reporter = StdioJobPollReportCb() 2443 elif feedback_fn: 2444 raise errors.ProgrammerError("Can't specify reporter and feedback function") 2445 2446 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
2447
2448 2449 -def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
2450 """Legacy function to submit an opcode. 2451 2452 This is just a simple wrapper over the construction of the processor 2453 instance. It should be extended to better handle feedback and 2454 interaction functions. 2455 2456 """ 2457 if cl is None: 2458 cl = GetClient() 2459 2460 SetGenericOpcodeOpts([op], opts) 2461 2462 job_id = SendJob([op], cl=cl) 2463 if hasattr(opts, "print_jobid") and opts.print_jobid: 2464 ToStdout("%d" % job_id) 2465 2466 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn, 2467 reporter=reporter) 2468 2469 return op_results[0]
2470
2471 2472 -def SubmitOpCodeToDrainedQueue(op):
2473 """Forcefully insert a job in the queue, even if it is drained. 2474 2475 """ 2476 cl = GetClient() 2477 job_id = cl.SubmitJobToDrainedQueue([op]) 2478 op_results = PollJob(job_id, cl=cl) 2479 return op_results[0]
2480
2481 2482 -def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
2483 """Wrapper around SubmitOpCode or SendJob. 2484 2485 This function will decide, based on the 'opts' parameter, whether to 2486 submit and wait for the result of the opcode (and return it), or 2487 whether to just send the job and print its identifier. It is used in 2488 order to simplify the implementation of the '--submit' option. 2489 2490 It will also process the opcodes if we're sending the via SendJob 2491 (otherwise SubmitOpCode does it). 2492 2493 """ 2494 if opts and opts.submit_only: 2495 job = [op] 2496 SetGenericOpcodeOpts(job, opts) 2497 job_id = SendJob(job, cl=cl) 2498 if opts.print_jobid: 2499 ToStdout("%d" % job_id) 2500 raise JobSubmittedException(job_id) 2501 else: 2502 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
2503
2504 2505 -def _InitReasonTrail(op, opts):
2506 """Builds the first part of the reason trail 2507 2508 Builds the initial part of the reason trail, adding the user provided reason 2509 (if it exists) and the name of the command starting the operation. 2510 2511 @param op: the opcode the reason trail will be added to 2512 @param opts: the command line options selected by the user 2513 2514 """ 2515 assert len(sys.argv) >= 2 2516 trail = [] 2517 2518 if opts.reason: 2519 trail.append((constants.OPCODE_REASON_SRC_USER, 2520 opts.reason, 2521 utils.EpochNano())) 2522 2523 binary = os.path.basename(sys.argv[0]) 2524 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary) 2525 command = sys.argv[1] 2526 trail.append((source, command, utils.EpochNano())) 2527 op.reason = trail
2528
2529 2530 -def SetGenericOpcodeOpts(opcode_list, options):
2531 """Processor for generic options. 2532 2533 This function updates the given opcodes based on generic command 2534 line options (like debug, dry-run, etc.). 2535 2536 @param opcode_list: list of opcodes 2537 @param options: command line options or None 2538 @return: None (in-place modification) 2539 2540 """ 2541 if not options: 2542 return 2543 for op in opcode_list: 2544 op.debug_level = options.debug 2545 if hasattr(options, "dry_run"): 2546 op.dry_run = options.dry_run 2547 if getattr(options, "priority", None) is not None: 2548 op.priority = options.priority 2549 _InitReasonTrail(op, options)
2550
2551 2552 -def FormatError(err):
2553 """Return a formatted error message for a given error. 2554 2555 This function takes an exception instance and returns a tuple 2556 consisting of two values: first, the recommended exit code, and 2557 second, a string describing the error message (not 2558 newline-terminated). 2559 2560 """ 2561 retcode = 1 2562 obuf = StringIO() 2563 msg = str(err) 2564 if isinstance(err, errors.ConfigurationError): 2565 txt = "Corrupt configuration file: %s" % msg 2566 logging.error(txt) 2567 obuf.write(txt + "\n") 2568 obuf.write("Aborting.") 2569 retcode = 2 2570 elif isinstance(err, errors.HooksAbort): 2571 obuf.write("Failure: hooks execution failed:\n") 2572 for node, script, out in err.args[0]: 2573 if out: 2574 obuf.write(" node: %s, script: %s, output: %s\n" % 2575 (node, script, out)) 2576 else: 2577 obuf.write(" node: %s, script: %s (no output)\n" % 2578 (node, script)) 2579 elif isinstance(err, errors.HooksFailure): 2580 obuf.write("Failure: hooks general failure: %s" % msg) 2581 elif isinstance(err, errors.ResolverError): 2582 this_host = netutils.Hostname.GetSysName() 2583 if err.args[0] == this_host: 2584 msg = "Failure: can't resolve my own hostname ('%s')" 2585 else: 2586 msg = "Failure: can't resolve hostname '%s'" 2587 obuf.write(msg % err.args[0]) 2588 elif isinstance(err, errors.OpPrereqError): 2589 if len(err.args) == 2: 2590 obuf.write("Failure: prerequisites not met for this" 2591 " operation:\nerror type: %s, error details:\n%s" % 2592 (err.args[1], err.args[0])) 2593 else: 2594 obuf.write("Failure: prerequisites not met for this" 2595 " operation:\n%s" % msg) 2596 elif isinstance(err, errors.OpExecError): 2597 obuf.write("Failure: command execution error:\n%s" % msg) 2598 elif isinstance(err, errors.TagError): 2599 obuf.write("Failure: invalid tag(s) given:\n%s" % msg) 2600 elif isinstance(err, errors.JobQueueDrainError): 2601 obuf.write("Failure: the job queue is marked for drain and doesn't" 2602 " accept new requests\n") 2603 elif isinstance(err, errors.JobQueueFull): 2604 obuf.write("Failure: the job queue is full and doesn't accept new" 2605 " job submissions until old jobs are archived\n") 2606 elif isinstance(err, errors.TypeEnforcementError): 2607 obuf.write("Parameter Error: %s" % msg) 2608 elif isinstance(err, errors.ParameterError): 2609 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg) 2610 elif isinstance(err, rpcerr.NoMasterError): 2611 if err.args[0] == pathutils.MASTER_SOCKET: 2612 daemon = "the master daemon" 2613 elif err.args[0] == pathutils.QUERY_SOCKET: 2614 daemon = "the config daemon" 2615 else: 2616 daemon = "socket '%s'" % str(err.args[0]) 2617 obuf.write("Cannot communicate with %s.\nIs the process running" 2618 " and listening for connections?" % daemon) 2619 elif isinstance(err, rpcerr.TimeoutError): 2620 obuf.write("Timeout while talking to the master daemon. Jobs might have" 2621 " been submitted and will continue to run even if the call" 2622 " timed out. Useful commands in this situation are \"gnt-job" 2623 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n") 2624 obuf.write(msg) 2625 elif isinstance(err, rpcerr.PermissionError): 2626 obuf.write("It seems you don't have permissions to connect to the" 2627 " master daemon.\nPlease retry as a different user.") 2628 elif isinstance(err, rpcerr.ProtocolError): 2629 obuf.write("Unhandled protocol error while talking to the master daemon:\n" 2630 "%s" % msg) 2631 elif isinstance(err, errors.JobLost): 2632 obuf.write("Error checking job status: %s" % msg) 2633 elif isinstance(err, errors.QueryFilterParseError): 2634 obuf.write("Error while parsing query filter: %s\n" % err.args[0]) 2635 obuf.write("\n".join(err.GetDetails())) 2636 elif isinstance(err, errors.GenericError): 2637 obuf.write("Unhandled Ganeti error: %s" % msg) 2638 elif isinstance(err, JobSubmittedException): 2639 obuf.write("JobID: %s\n" % err.args[0]) 2640 retcode = 0 2641 else: 2642 obuf.write("Unhandled exception: %s" % msg) 2643 return retcode, obuf.getvalue().rstrip("\n")
2644
2645 2646 -def GenericMain(commands, override=None, aliases=None, 2647 env_override=frozenset()):
2648 """Generic main function for all the gnt-* commands. 2649 2650 @param commands: a dictionary with a special structure, see the design doc 2651 for command line handling. 2652 @param override: if not None, we expect a dictionary with keys that will 2653 override command line options; this can be used to pass 2654 options from the scripts to generic functions 2655 @param aliases: dictionary with command aliases {'alias': 'target, ...} 2656 @param env_override: list of environment names which are allowed to submit 2657 default args for commands 2658 2659 """ 2660 # save the program name and the entire command line for later logging 2661 if sys.argv: 2662 binary = os.path.basename(sys.argv[0]) 2663 if not binary: 2664 binary = sys.argv[0] 2665 2666 if len(sys.argv) >= 2: 2667 logname = utils.ShellQuoteArgs([binary, sys.argv[1]]) 2668 else: 2669 logname = binary 2670 2671 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:]) 2672 else: 2673 binary = "<unknown program>" 2674 cmdline = "<unknown>" 2675 2676 if aliases is None: 2677 aliases = {} 2678 2679 try: 2680 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases, 2681 env_override) 2682 except _ShowVersion: 2683 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION, 2684 constants.RELEASE_VERSION) 2685 return constants.EXIT_SUCCESS 2686 except _ShowUsage, err: 2687 for line in _FormatUsage(binary, commands): 2688 ToStdout(line) 2689 2690 if err.exit_error: 2691 return constants.EXIT_FAILURE 2692 else: 2693 return constants.EXIT_SUCCESS 2694 except errors.ParameterError, err: 2695 result, err_msg = FormatError(err) 2696 ToStderr(err_msg) 2697 return 1 2698 2699 if func is None: # parse error 2700 return 1 2701 2702 if override is not None: 2703 for key, val in override.iteritems(): 2704 setattr(options, key, val) 2705 2706 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug, 2707 stderr_logging=True) 2708 2709 logging.debug("Command line: %s", cmdline) 2710 2711 try: 2712 result = func(options, args) 2713 except (errors.GenericError, rpcerr.ProtocolError, 2714 JobSubmittedException), err: 2715 result, err_msg = FormatError(err) 2716 logging.exception("Error during command processing") 2717 ToStderr(err_msg) 2718 except KeyboardInterrupt: 2719 result = constants.EXIT_FAILURE 2720 ToStderr("Aborted. Note that if the operation created any jobs, they" 2721 " might have been submitted and" 2722 " will continue to run in the background.") 2723 except IOError, err: 2724 if err.errno == errno.EPIPE: 2725 # our terminal went away, we'll exit 2726 sys.exit(constants.EXIT_FAILURE) 2727 else: 2728 raise 2729 2730 return result
2731
2732 2733 -def ParseNicOption(optvalue):
2734 """Parses the value of the --net option(s). 2735 2736 """ 2737 try: 2738 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue) 2739 except (TypeError, ValueError), err: 2740 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err), 2741 errors.ECODE_INVAL) 2742 2743 nics = [{}] * nic_max 2744 for nidx, ndict in optvalue: 2745 nidx = int(nidx) 2746 2747 if not isinstance(ndict, dict): 2748 raise errors.OpPrereqError("Invalid nic/%d value: expected dict," 2749 " got %s" % (nidx, ndict), errors.ECODE_INVAL) 2750 2751 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES) 2752 2753 nics[nidx] = ndict 2754 2755 return nics
2756
2757 2758 -def FixHvParams(hvparams):
2759 # In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from 2760 # comma to space because commas cannot be accepted on the command line 2761 # (they already act as the separator between different hvparams). Still, 2762 # RAPI should be able to accept commas for backwards compatibility. 2763 # Therefore, we convert spaces into commas here, and we keep the old 2764 # parsing logic everywhere else. 2765 try: 2766 new_usb_devices = hvparams[constants.HV_USB_DEVICES].replace(" ", ",") 2767 hvparams[constants.HV_USB_DEVICES] = new_usb_devices 2768 except KeyError: 2769 #No usb_devices, no modification required 2770 pass
2771
2772 2773 -def GenericInstanceCreate(mode, opts, args):
2774 """Add an instance to the cluster via either creation or import. 2775 2776 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT 2777 @param opts: the command line options selected by the user 2778 @type args: list 2779 @param args: should contain only one element, the new instance name 2780 @rtype: int 2781 @return: the desired exit code 2782 2783 """ 2784 instance = args[0] 2785 2786 (pnode, snode) = SplitNodeOption(opts.node) 2787 2788 hypervisor = None 2789 hvparams = {} 2790 if opts.hypervisor: 2791 hypervisor, hvparams = opts.hypervisor 2792 2793 if opts.nics: 2794 nics = ParseNicOption(opts.nics) 2795 elif opts.no_nics: 2796 # no nics 2797 nics = [] 2798 elif mode == constants.INSTANCE_CREATE: 2799 # default of one nic, all auto 2800 nics = [{}] 2801 else: 2802 # mode == import 2803 nics = [] 2804 2805 if opts.disk_template == constants.DT_DISKLESS: 2806 if opts.disks or opts.sd_size is not None: 2807 raise errors.OpPrereqError("Diskless instance but disk" 2808 " information passed", errors.ECODE_INVAL) 2809 disks = [] 2810 else: 2811 if (not opts.disks and not opts.sd_size 2812 and mode == constants.INSTANCE_CREATE): 2813 raise errors.OpPrereqError("No disk information specified", 2814 errors.ECODE_INVAL) 2815 if opts.disks and opts.sd_size is not None: 2816 raise errors.OpPrereqError("Please use either the '--disk' or" 2817 " '-s' option", errors.ECODE_INVAL) 2818 if opts.sd_size is not None: 2819 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})] 2820 2821 if opts.disks: 2822 try: 2823 disk_max = max(int(didx[0]) + 1 for didx in opts.disks) 2824 except ValueError, err: 2825 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err), 2826 errors.ECODE_INVAL) 2827 disks = [{}] * disk_max 2828 else: 2829 disks = [] 2830 for didx, ddict in opts.disks: 2831 didx = int(didx) 2832 if not isinstance(ddict, dict): 2833 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict) 2834 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 2835 elif constants.IDISK_SIZE in ddict: 2836 if constants.IDISK_ADOPT in ddict: 2837 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed" 2838 " (disk %d)" % didx, errors.ECODE_INVAL) 2839 try: 2840 ddict[constants.IDISK_SIZE] = \ 2841 utils.ParseUnit(ddict[constants.IDISK_SIZE]) 2842 except ValueError, err: 2843 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" % 2844 (didx, err), errors.ECODE_INVAL) 2845 elif constants.IDISK_ADOPT in ddict: 2846 if constants.IDISK_SPINDLES in ddict: 2847 raise errors.OpPrereqError("spindles is not a valid option when" 2848 " adopting a disk", errors.ECODE_INVAL) 2849 if mode == constants.INSTANCE_IMPORT: 2850 raise errors.OpPrereqError("Disk adoption not allowed for instance" 2851 " import", errors.ECODE_INVAL) 2852 ddict[constants.IDISK_SIZE] = 0 2853 else: 2854 raise errors.OpPrereqError("Missing size or adoption source for" 2855 " disk %d" % didx, errors.ECODE_INVAL) 2856 if constants.IDISK_SPINDLES in ddict: 2857 ddict[constants.IDISK_SPINDLES] = int(ddict[constants.IDISK_SPINDLES]) 2858 2859 disks[didx] = ddict 2860 2861 if opts.tags is not None: 2862 tags = opts.tags.split(",") 2863 else: 2864 tags = [] 2865 2866 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT) 2867 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES) 2868 FixHvParams(hvparams) 2869 2870 osparams_private = opts.osparams_private or serializer.PrivateDict() 2871 osparams_secret = opts.osparams_secret or serializer.PrivateDict() 2872 2873 helper_startup_timeout = opts.helper_startup_timeout 2874 helper_shutdown_timeout = opts.helper_shutdown_timeout 2875 2876 if mode == constants.INSTANCE_CREATE: 2877 start = opts.start 2878 os_type = opts.os 2879 force_variant = opts.force_variant 2880 src_node = None 2881 src_path = None 2882 no_install = opts.no_install 2883 identify_defaults = False 2884 compress = constants.IEC_NONE 2885 if opts.instance_communication is None: 2886 instance_communication = False 2887 else: 2888 instance_communication = opts.instance_communication 2889 elif mode == constants.INSTANCE_IMPORT: 2890 start = False 2891 os_type = None 2892 force_variant = False 2893 src_node = opts.src_node 2894 src_path = opts.src_dir 2895 no_install = None 2896 identify_defaults = opts.identify_defaults 2897 compress = opts.compress 2898 instance_communication = False 2899 else: 2900 raise errors.ProgrammerError("Invalid creation mode %s" % mode) 2901 2902 op = opcodes.OpInstanceCreate( 2903 instance_name=instance, 2904 disks=disks, 2905 disk_template=opts.disk_template, 2906 nics=nics, 2907 conflicts_check=opts.conflicts_check, 2908 pnode=pnode, snode=snode, 2909 ip_check=opts.ip_check, 2910 name_check=opts.name_check, 2911 wait_for_sync=opts.wait_for_sync, 2912 file_storage_dir=opts.file_storage_dir, 2913 file_driver=opts.file_driver, 2914 iallocator=opts.iallocator, 2915 hypervisor=hypervisor, 2916 hvparams=hvparams, 2917 beparams=opts.beparams, 2918 osparams=opts.osparams, 2919 osparams_private=osparams_private, 2920 osparams_secret=osparams_secret, 2921 mode=mode, 2922 opportunistic_locking=opts.opportunistic_locking, 2923 start=start, 2924 os_type=os_type, 2925 force_variant=force_variant, 2926 src_node=src_node, 2927 src_path=src_path, 2928 compress=compress, 2929 tags=tags, 2930 no_install=no_install, 2931 identify_defaults=identify_defaults, 2932 ignore_ipolicy=opts.ignore_ipolicy, 2933 instance_communication=instance_communication, 2934 helper_startup_timeout=helper_startup_timeout, 2935 helper_shutdown_timeout=helper_shutdown_timeout) 2936 2937 SubmitOrSend(op, opts) 2938 return 0
2939
2940 2941 -class _RunWhileDaemonsStoppedHelper(object):
2942 """Helper class for L{RunWhileDaemonsStopped} to simplify state management 2943 2944 """
2945 - def __init__(self, feedback_fn, cluster_name, master_node, 2946 online_nodes, ssh_ports, exclude_daemons, debug, 2947 verbose):
2948 """Initializes this class. 2949 2950 @type feedback_fn: callable 2951 @param feedback_fn: Feedback function 2952 @type cluster_name: string 2953 @param cluster_name: Cluster name 2954 @type master_node: string 2955 @param master_node Master node name 2956 @type online_nodes: list 2957 @param online_nodes: List of names of online nodes 2958 @type ssh_ports: list 2959 @param ssh_ports: List of SSH ports of online nodes 2960 @type exclude_daemons: list of string 2961 @param exclude_daemons: list of daemons that will be restarted on master 2962 after all others are shutdown 2963 @type debug: boolean 2964 @param debug: show debug output 2965 @type verbose: boolesn 2966 @param verbose: show verbose output 2967 2968 """ 2969 self.feedback_fn = feedback_fn 2970 self.cluster_name = cluster_name 2971 self.master_node = master_node 2972 self.online_nodes = online_nodes 2973 self.ssh_ports = dict(zip(online_nodes, ssh_ports)) 2974 2975 self.ssh = ssh.SshRunner(self.cluster_name) 2976 2977 self.nonmaster_nodes = [name for name in online_nodes 2978 if name != master_node] 2979 2980 self.exclude_daemons = exclude_daemons 2981 self.debug = debug 2982 self.verbose = verbose 2983 2984 assert self.master_node not in self.nonmaster_nodes
2985
2986 - def _RunCmd(self, node_name, cmd):
2987 """Runs a command on the local or a remote machine. 2988 2989 @type node_name: string 2990 @param node_name: Machine name 2991 @type cmd: list 2992 @param cmd: Command 2993 2994 """ 2995 if node_name is None or node_name == self.master_node: 2996 # No need to use SSH 2997 result = utils.RunCmd(cmd) 2998 else: 2999 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER, 3000 utils.ShellQuoteArgs(cmd), 3001 port=self.ssh_ports[node_name]) 3002 3003 if result.failed: 3004 errmsg = ["Failed to run command %s" % result.cmd] 3005 if node_name: 3006 errmsg.append("on node %s" % node_name) 3007 errmsg.append(": exitcode %s and error %s" % 3008 (result.exit_code, result.output)) 3009 raise errors.OpExecError(" ".join(errmsg))
3010
3011 - def Call(self, fn, *args):
3012 """Call function while all daemons are stopped. 3013 3014 @type fn: callable 3015 @param fn: Function to be called 3016 3017 """ 3018 # Pause watcher by acquiring an exclusive lock on watcher state file 3019 self.feedback_fn("Blocking watcher") 3020 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE) 3021 try: 3022 # TODO: Currently, this just blocks. There's no timeout. 3023 # TODO: Should it be a shared lock? 3024 watcher_block.Exclusive(blocking=True) 3025 3026 # Stop master daemons, so that no new jobs can come in and all running 3027 # ones are finished 3028 self.feedback_fn("Stopping master daemons") 3029 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"]) 3030 try: 3031 # Stop daemons on all nodes 3032 for node_name in self.online_nodes: 3033 self.feedback_fn("Stopping daemons on %s" % node_name) 3034 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"]) 3035 # Starting any daemons listed as exception 3036 if node_name == self.master_node: 3037 for daemon in self.exclude_daemons: 3038 self.feedback_fn("Starting daemon '%s' on %s" % (daemon, 3039 node_name)) 3040 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start", daemon]) 3041 3042 # All daemons are shut down now 3043 try: 3044 return fn(self, *args) 3045 except Exception, err: 3046 _, errmsg = FormatError(err) 3047 logging.exception("Caught exception") 3048 self.feedback_fn(errmsg) 3049 raise 3050 finally: 3051 # Start cluster again, master node last 3052 for node_name in self.nonmaster_nodes + [self.master_node]: 3053 # Stopping any daemons listed as exception. 3054 # This might look unnecessary, but it makes sure that daemon-util 3055 # starts all daemons in the right order. 3056 if node_name == self.master_node: 3057 self.exclude_daemons.reverse() 3058 for daemon in self.exclude_daemons: 3059 self.feedback_fn("Stopping daemon '%s' on %s" % (daemon, 3060 node_name)) 3061 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop", daemon]) 3062 self.feedback_fn("Starting daemons on %s" % node_name) 3063 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"]) 3064 3065 finally: 3066 # Resume watcher 3067 watcher_block.Close()
3068
3069 3070 -def RunWhileDaemonsStopped(feedback_fn, exclude_daemons, fn, *args, **kwargs):
3071 """Calls a function while all cluster daemons are stopped. 3072 3073 @type feedback_fn: callable 3074 @param feedback_fn: Feedback function 3075 @type exclude_daemons: list of string 3076 @param exclude_daemons: list of daemons that stopped, but immediately 3077 restarted on the master to be available when calling 3078 'fn'. If None, all daemons will be stopped and none 3079 will be started before calling 'fn'. 3080 @type fn: callable 3081 @param fn: Function to be called when daemons are stopped 3082 3083 """ 3084 feedback_fn("Gathering cluster information") 3085 3086 # This ensures we're running on the master daemon 3087 cl = GetClient() 3088 3089 (cluster_name, master_node) = \ 3090 cl.QueryConfigValues(["cluster_name", "master_node"]) 3091 3092 online_nodes = GetOnlineNodes([], cl=cl) 3093 ssh_ports = GetNodesSshPorts(online_nodes, cl) 3094 3095 # Don't keep a reference to the client. The master daemon will go away. 3096 del cl 3097 3098 assert master_node in online_nodes 3099 if exclude_daemons is None: 3100 exclude_daemons = [] 3101 3102 debug = kwargs.get("debug", False) 3103 verbose = kwargs.get("verbose", False) 3104 3105 return _RunWhileDaemonsStoppedHelper( 3106 feedback_fn, cluster_name, master_node, online_nodes, ssh_ports, 3107 exclude_daemons, debug, verbose).Call(fn, *args)
3108
3109 3110 -def RunWhileClusterStopped(feedback_fn, fn, *args):
3111 """Calls a function while all cluster daemons are stopped. 3112 3113 @type feedback_fn: callable 3114 @param feedback_fn: Feedback function 3115 @type fn: callable 3116 @param fn: Function to be called when daemons are stopped 3117 3118 """ 3119 RunWhileDaemonsStopped(feedback_fn, None, fn, *args)
3120
3121 3122 -def GenerateTable(headers, fields, separator, data, 3123 numfields=None, unitfields=None, 3124 units=None):
3125 """Prints a table with headers and different fields. 3126 3127 @type headers: dict 3128 @param headers: dictionary mapping field names to headers for 3129 the table 3130 @type fields: list 3131 @param fields: the field names corresponding to each row in 3132 the data field 3133 @param separator: the separator to be used; if this is None, 3134 the default 'smart' algorithm is used which computes optimal 3135 field width, otherwise just the separator is used between 3136 each field 3137 @type data: list 3138 @param data: a list of lists, each sublist being one row to be output 3139 @type numfields: list 3140 @param numfields: a list with the fields that hold numeric 3141 values and thus should be right-aligned 3142 @type unitfields: list 3143 @param unitfields: a list with the fields that hold numeric 3144 values that should be formatted with the units field 3145 @type units: string or None 3146 @param units: the units we should use for formatting, or None for 3147 automatic choice (human-readable for non-separator usage, otherwise 3148 megabytes); this is a one-letter string 3149 3150 """ 3151 if units is None: 3152 if separator: 3153 units = "m" 3154 else: 3155 units = "h" 3156 3157 if numfields is None: 3158 numfields = [] 3159 if unitfields is None: 3160 unitfields = [] 3161 3162 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142 3163 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142 3164 3165 format_fields = [] 3166 for field in fields: 3167 if headers and field not in headers: 3168 # TODO: handle better unknown fields (either revert to old 3169 # style of raising exception, or deal more intelligently with 3170 # variable fields) 3171 headers[field] = field 3172 if separator is not None: 3173 format_fields.append("%s") 3174 elif numfields.Matches(field): 3175 format_fields.append("%*s") 3176 else: 3177 format_fields.append("%-*s") 3178 3179 if separator is None: 3180 mlens = [0 for name in fields] 3181 format_str = " ".join(format_fields) 3182 else: 3183 format_str = separator.replace("%", "%%").join(format_fields) 3184 3185 for row in data: 3186 if row is None: 3187 continue 3188 for idx, val in enumerate(row): 3189 if unitfields.Matches(fields[idx]): 3190 try: 3191 val = int(val) 3192 except (TypeError, ValueError): 3193 pass 3194 else: 3195 val = row[idx] = utils.FormatUnit(val, units) 3196 val = row[idx] = str(val) 3197 if separator is None: 3198 mlens[idx] = max(mlens[idx], len(val)) 3199 3200 result = [] 3201 if headers: 3202 args = [] 3203 for idx, name in enumerate(fields): 3204 hdr = headers[name] 3205 if separator is None: 3206 mlens[idx] = max(mlens[idx], len(hdr)) 3207 args.append(mlens[idx]) 3208 args.append(hdr) 3209 result.append(format_str % tuple(args)) 3210 3211 if separator is None: 3212 assert len(mlens) == len(fields) 3213 3214 if fields and not numfields.Matches(fields[-1]): 3215 mlens[-1] = 0 3216 3217 for line in data: 3218 args = [] 3219 if line is None: 3220 line = ["-" for _ in fields] 3221 for idx in range(len(fields)): 3222 if separator is None: 3223 args.append(mlens[idx]) 3224 args.append(line[idx]) 3225 result.append(format_str % tuple(args)) 3226 3227 return result
3228
3229 3230 -def _FormatBool(value):
3231 """Formats a boolean value as a string. 3232 3233 """ 3234 if value: 3235 return "Y" 3236 return "N"
3237 3238 3239 #: Default formatting for query results; (callback, align right) 3240 _DEFAULT_FORMAT_QUERY = { 3241 constants.QFT_TEXT: (str, False), 3242 constants.QFT_BOOL: (_FormatBool, False), 3243 constants.QFT_NUMBER: (str, True), 3244 constants.QFT_NUMBER_FLOAT: (str, True), 3245 constants.QFT_TIMESTAMP: (utils.FormatTime, False), 3246 constants.QFT_OTHER: (str, False), 3247 constants.QFT_UNKNOWN: (str, False), 3248 }
3249 3250 3251 -def _GetColumnFormatter(fdef, override, unit):
3252 """Returns formatting function for a field. 3253 3254 @type fdef: L{objects.QueryFieldDefinition} 3255 @type override: dict 3256 @param override: Dictionary for overriding field formatting functions, 3257 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 3258 @type unit: string 3259 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} 3260 @rtype: tuple; (callable, bool) 3261 @return: Returns the function to format a value (takes one parameter) and a 3262 boolean for aligning the value on the right-hand side 3263 3264 """ 3265 fmt = override.get(fdef.name, None) 3266 if fmt is not None: 3267 return fmt 3268 3269 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY 3270 3271 if fdef.kind == constants.QFT_UNIT: 3272 # Can't keep this information in the static dictionary 3273 return (lambda value: utils.FormatUnit(value, unit), True) 3274 3275 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None) 3276 if fmt is not None: 3277 return fmt 3278 3279 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
3280
3281 3282 -class _QueryColumnFormatter(object):
3283 """Callable class for formatting fields of a query. 3284 3285 """
3286 - def __init__(self, fn, status_fn, verbose):
3287 """Initializes this class. 3288 3289 @type fn: callable 3290 @param fn: Formatting function 3291 @type status_fn: callable 3292 @param status_fn: Function to report fields' status 3293 @type verbose: boolean 3294 @param verbose: whether to use verbose field descriptions or not 3295 3296 """ 3297 self._fn = fn 3298 self._status_fn = status_fn 3299 self._verbose = verbose
3300
3301 - def __call__(self, data):
3302 """Returns a field's string representation. 3303 3304 """ 3305 (status, value) = data 3306 3307 # Report status 3308 self._status_fn(status) 3309 3310 if status == constants.RS_NORMAL: 3311 return self._fn(value) 3312 3313 assert value is None, \ 3314 "Found value %r for abnormal status %s" % (value, status) 3315 3316 return FormatResultError(status, self._verbose)
3317
3318 3319 -def FormatResultError(status, verbose):
3320 """Formats result status other than L{constants.RS_NORMAL}. 3321 3322 @param status: The result status 3323 @type verbose: boolean 3324 @param verbose: Whether to return the verbose text 3325 @return: Text of result status 3326 3327 """ 3328 assert status != constants.RS_NORMAL, \ 3329 "FormatResultError called with status equal to constants.RS_NORMAL" 3330 try: 3331 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status] 3332 except KeyError: 3333 raise NotImplementedError("Unknown status %s" % status) 3334 else: 3335 if verbose: 3336 return verbose_text 3337 return normal_text
3338
3339 3340 -def FormatQueryResult(result, unit=None, format_override=None, separator=None, 3341 header=False, verbose=False):
3342 """Formats data in L{objects.QueryResponse}. 3343 3344 @type result: L{objects.QueryResponse} 3345 @param result: result of query operation 3346 @type unit: string 3347 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}, 3348 see L{utils.text.FormatUnit} 3349 @type format_override: dict 3350 @param format_override: Dictionary for overriding field formatting functions, 3351 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 3352 @type separator: string or None 3353 @param separator: String used to separate fields 3354 @type header: bool 3355 @param header: Whether to output header row 3356 @type verbose: boolean 3357 @param verbose: whether to use verbose field descriptions or not 3358 3359 """ 3360 if unit is None: 3361 if separator: 3362 unit = "m" 3363 else: 3364 unit = "h" 3365 3366 if format_override is None: 3367 format_override = {} 3368 3369 stats = dict.fromkeys(constants.RS_ALL, 0) 3370 3371 def _RecordStatus(status): 3372 if status in stats: 3373 stats[status] += 1
3374 3375 columns = [] 3376 for fdef in result.fields: 3377 assert fdef.title and fdef.name 3378 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit) 3379 columns.append(TableColumn(fdef.title, 3380 _QueryColumnFormatter(fn, _RecordStatus, 3381 verbose), 3382 align_right)) 3383 3384 table = FormatTable(result.data, columns, header, separator) 3385 3386 # Collect statistics 3387 assert len(stats) == len(constants.RS_ALL) 3388 assert compat.all(count >= 0 for count in stats.values()) 3389 3390 # Determine overall status. If there was no data, unknown fields must be 3391 # detected via the field definitions. 3392 if (stats[constants.RS_UNKNOWN] or 3393 (not result.data and _GetUnknownFields(result.fields))): 3394 status = QR_UNKNOWN 3395 elif compat.any(count > 0 for key, count in stats.items() 3396 if key != constants.RS_NORMAL): 3397 status = QR_INCOMPLETE 3398 else: 3399 status = QR_NORMAL 3400 3401 return (status, table) 3402
3403 3404 -def _GetUnknownFields(fdefs):
3405 """Returns list of unknown fields included in C{fdefs}. 3406 3407 @type fdefs: list of L{objects.QueryFieldDefinition} 3408 3409 """ 3410 return [fdef for fdef in fdefs 3411 if fdef.kind == constants.QFT_UNKNOWN]
3412
3413 3414 -def _WarnUnknownFields(fdefs):
3415 """Prints a warning to stderr if a query included unknown fields. 3416 3417 @type fdefs: list of L{objects.QueryFieldDefinition} 3418 3419 """ 3420 unknown = _GetUnknownFields(fdefs) 3421 if unknown: 3422 ToStderr("Warning: Queried for unknown fields %s", 3423 utils.CommaJoin(fdef.name for fdef in unknown)) 3424 return True 3425 3426 return False
3427
3428 3429 -def GenericList(resource, fields, names, unit, separator, header, cl=None, 3430 format_override=None, verbose=False, force_filter=False, 3431 namefield=None, qfilter=None, isnumeric=False):
3432 """Generic implementation for listing all items of a resource. 3433 3434 @param resource: One of L{constants.QR_VIA_LUXI} 3435 @type fields: list of strings 3436 @param fields: List of fields to query for 3437 @type names: list of strings 3438 @param names: Names of items to query for 3439 @type unit: string or None 3440 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or 3441 None for automatic choice (human-readable for non-separator usage, 3442 otherwise megabytes); this is a one-letter string 3443 @type separator: string or None 3444 @param separator: String used to separate fields 3445 @type header: bool 3446 @param header: Whether to show header row 3447 @type force_filter: bool 3448 @param force_filter: Whether to always treat names as filter 3449 @type format_override: dict 3450 @param format_override: Dictionary for overriding field formatting functions, 3451 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 3452 @type verbose: boolean 3453 @param verbose: whether to use verbose field descriptions or not 3454 @type namefield: string 3455 @param namefield: Name of field to use for simple filters (see 3456 L{qlang.MakeFilter} for details) 3457 @type qfilter: list or None 3458 @param qfilter: Query filter (in addition to names) 3459 @param isnumeric: bool 3460 @param isnumeric: Whether the namefield's type is numeric, and therefore 3461 any simple filters built by namefield should use integer values to 3462 reflect that 3463 3464 """ 3465 if not names: 3466 names = None 3467 3468 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield, 3469 isnumeric=isnumeric) 3470 3471 if qfilter is None: 3472 qfilter = namefilter 3473 elif namefilter is not None: 3474 qfilter = [qlang.OP_AND, namefilter, qfilter] 3475 3476 if cl is None: 3477 cl = GetClient() 3478 3479 response = cl.Query(resource, fields, qfilter) 3480 3481 found_unknown = _WarnUnknownFields(response.fields) 3482 3483 (status, data) = FormatQueryResult(response, unit=unit, separator=separator, 3484 header=header, 3485 format_override=format_override, 3486 verbose=verbose) 3487 3488 for line in data: 3489 ToStdout(line) 3490 3491 assert ((found_unknown and status == QR_UNKNOWN) or 3492 (not found_unknown and status != QR_UNKNOWN)) 3493 3494 if status == QR_UNKNOWN: 3495 return constants.EXIT_UNKNOWN_FIELD 3496 3497 # TODO: Should the list command fail if not all data could be collected? 3498 return constants.EXIT_SUCCESS
3499
3500 3501 -def _FieldDescValues(fdef):
3502 """Helper function for L{GenericListFields} to get query field description. 3503 3504 @type fdef: L{objects.QueryFieldDefinition} 3505 @rtype: list 3506 3507 """ 3508 return [ 3509 fdef.name, 3510 _QFT_NAMES.get(fdef.kind, fdef.kind), 3511 fdef.title, 3512 fdef.doc, 3513 ]
3514
3515 3516 -def GenericListFields(resource, fields, separator, header, cl=None):
3517 """Generic implementation for listing fields for a resource. 3518 3519 @param resource: One of L{constants.QR_VIA_LUXI} 3520 @type fields: list of strings 3521 @param fields: List of fields to query for 3522 @type separator: string or None 3523 @param separator: String used to separate fields 3524 @type header: bool 3525 @param header: Whether to show header row 3526 3527 """ 3528 if cl is None: 3529 cl = GetClient() 3530 3531 if not fields: 3532 fields = None 3533 3534 response = cl.QueryFields(resource, fields) 3535 3536 found_unknown = _WarnUnknownFields(response.fields) 3537 3538 columns = [ 3539 TableColumn("Name", str, False), 3540 TableColumn("Type", str, False), 3541 TableColumn("Title", str, False), 3542 TableColumn("Description", str, False), 3543 ] 3544 3545 rows = map(_FieldDescValues, response.fields) 3546 3547 for line in FormatTable(rows, columns, header, separator): 3548 ToStdout(line) 3549 3550 if found_unknown: 3551 return constants.EXIT_UNKNOWN_FIELD 3552 3553 return constants.EXIT_SUCCESS
3554
3555 3556 -class TableColumn(object):
3557 """Describes a column for L{FormatTable}. 3558 3559 """
3560 - def __init__(self, title, fn, align_right):
3561 """Initializes this class. 3562 3563 @type title: string 3564 @param title: Column title 3565 @type fn: callable 3566 @param fn: Formatting function 3567 @type align_right: bool 3568 @param align_right: Whether to align values on the right-hand side 3569 3570 """ 3571 self.title = title 3572 self.format = fn 3573 self.align_right = align_right
3574
3575 3576 -def _GetColFormatString(width, align_right):
3577 """Returns the format string for a field. 3578 3579 """ 3580 if align_right: 3581 sign = "" 3582 else: 3583 sign = "-" 3584 3585 return "%%%s%ss" % (sign, width)
3586
3587 3588 -def FormatTable(rows, columns, header, separator):
3589 """Formats data as a table. 3590 3591 @type rows: list of lists 3592 @param rows: Row data, one list per row 3593 @type columns: list of L{TableColumn} 3594 @param columns: Column descriptions 3595 @type header: bool 3596 @param header: Whether to show header row 3597 @type separator: string or None 3598 @param separator: String used to separate columns 3599 3600 """ 3601 if header: 3602 data = [[col.title for col in columns]] 3603 colwidth = [len(col.title) for col in columns] 3604 else: 3605 data = [] 3606 colwidth = [0 for _ in columns] 3607 3608 # Format row data 3609 for row in rows: 3610 assert len(row) == len(columns) 3611 3612 formatted = [col.format(value) for value, col in zip(row, columns)] 3613 3614 if separator is None: 3615 # Update column widths 3616 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)): 3617 # Modifying a list's items while iterating is fine 3618 colwidth[idx] = max(oldwidth, len(value)) 3619 3620 data.append(formatted) 3621 3622 if separator is not None: 3623 # Return early if a separator is used 3624 return [separator.join(row) for row in data] 3625 3626 if columns and not columns[-1].align_right: 3627 # Avoid unnecessary spaces at end of line 3628 colwidth[-1] = 0 3629 3630 # Build format string 3631 fmt = " ".join([_GetColFormatString(width, col.align_right) 3632 for col, width in zip(columns, colwidth)]) 3633 3634 return [fmt % tuple(row) for row in data]
3635
3636 3637 -def FormatTimestamp(ts):
3638 """Formats a given timestamp. 3639 3640 @type ts: timestamp 3641 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds 3642 3643 @rtype: string 3644 @return: a string with the formatted timestamp 3645 3646 """ 3647 if not isinstance(ts, (tuple, list)) or len(ts) != 2: 3648 return "?" 3649 3650 (sec, usecs) = ts 3651 return utils.FormatTime(sec, usecs=usecs)
3652
3653 3654 -def ParseTimespec(value):
3655 """Parse a time specification. 3656 3657 The following suffixed will be recognized: 3658 3659 - s: seconds 3660 - m: minutes 3661 - h: hours 3662 - d: day 3663 - w: weeks 3664 3665 Without any suffix, the value will be taken to be in seconds. 3666 3667 """ 3668 value = str(value) 3669 if not value: 3670 raise errors.OpPrereqError("Empty time specification passed", 3671 errors.ECODE_INVAL) 3672 suffix_map = { 3673 "s": 1, 3674 "m": 60, 3675 "h": 3600, 3676 "d": 86400, 3677 "w": 604800, 3678 } 3679 if value[-1] not in suffix_map: 3680 try: 3681 value = int(value) 3682 except (TypeError, ValueError): 3683 raise errors.OpPrereqError("Invalid time specification '%s'" % value, 3684 errors.ECODE_INVAL) 3685 else: 3686 multiplier = suffix_map[value[-1]] 3687 value = value[:-1] 3688 if not value: # no data left after stripping the suffix 3689 raise errors.OpPrereqError("Invalid time specification (only" 3690 " suffix passed)", errors.ECODE_INVAL) 3691 try: 3692 value = int(value) * multiplier 3693 except (TypeError, ValueError): 3694 raise errors.OpPrereqError("Invalid time specification '%s'" % value, 3695 errors.ECODE_INVAL) 3696 return value
3697
3698 3699 -def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False, 3700 filter_master=False, nodegroup=None):
3701 """Returns the names of online nodes. 3702 3703 This function will also log a warning on stderr with the names of 3704 the online nodes. 3705 3706 @param nodes: if not empty, use only this subset of nodes (minus the 3707 offline ones) 3708 @param cl: if not None, luxi client to use 3709 @type nowarn: boolean 3710 @param nowarn: by default, this function will output a note with the 3711 offline nodes that are skipped; if this parameter is True the 3712 note is not displayed 3713 @type secondary_ips: boolean 3714 @param secondary_ips: if True, return the secondary IPs instead of the 3715 names, useful for doing network traffic over the replication interface 3716 (if any) 3717 @type filter_master: boolean 3718 @param filter_master: if True, do not return the master node in the list 3719 (useful in coordination with secondary_ips where we cannot check our 3720 node name against the list) 3721 @type nodegroup: string 3722 @param nodegroup: If set, only return nodes in this node group 3723 3724 """ 3725 if cl is None: 3726 cl = GetClient() 3727 3728 qfilter = [] 3729 3730 if nodes: 3731 qfilter.append(qlang.MakeSimpleFilter("name", nodes)) 3732 3733 if nodegroup is not None: 3734 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup], 3735 [qlang.OP_EQUAL, "group.uuid", nodegroup]]) 3736 3737 if filter_master: 3738 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]]) 3739 3740 if qfilter: 3741 if len(qfilter) > 1: 3742 final_filter = [qlang.OP_AND] + qfilter 3743 else: 3744 assert len(qfilter) == 1 3745 final_filter = qfilter[0] 3746 else: 3747 final_filter = None 3748 3749 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter) 3750 3751 def _IsOffline(row): 3752 (_, (_, offline), _) = row 3753 return offline
3754 3755 def _GetName(row): 3756 ((_, name), _, _) = row 3757 return name 3758 3759 def _GetSip(row): 3760 (_, _, (_, sip)) = row 3761 return sip 3762 3763 (offline, online) = compat.partition(result.data, _IsOffline) 3764 3765 if offline and not nowarn: 3766 ToStderr("Note: skipping offline node(s): %s" % 3767 utils.CommaJoin(map(_GetName, offline))) 3768 3769 if secondary_ips: 3770 fn = _GetSip 3771 else: 3772 fn = _GetName 3773 3774 return map(fn, online) 3775
3776 3777 -def GetNodesSshPorts(nodes, cl):
3778 """Retrieves SSH ports of given nodes. 3779 3780 @param nodes: the names of nodes 3781 @type nodes: a list of strings 3782 @param cl: a client to use for the query 3783 @type cl: L{ganeti.luxi.Client} 3784 @return: the list of SSH ports corresponding to the nodes 3785 @rtype: a list of tuples 3786 """ 3787 return map(lambda t: t[0], 3788 cl.QueryNodes(names=nodes, 3789 fields=["ndp/ssh_port"], 3790 use_locking=False))
3791
3792 3793 -def _ToStream(stream, txt, *args):
3794 """Write a message to a stream, bypassing the logging system 3795 3796 @type stream: file object 3797 @param stream: the file to which we should write 3798 @type txt: str 3799 @param txt: the message 3800 3801 """ 3802 try: 3803 if args: 3804 args = tuple(args) 3805 stream.write(txt % args) 3806 else: 3807 stream.write(txt) 3808 stream.write("\n") 3809 stream.flush() 3810 except IOError, err: 3811 if err.errno == errno.EPIPE: 3812 # our terminal went away, we'll exit 3813 sys.exit(constants.EXIT_FAILURE) 3814 else: 3815 raise
3816
3817 3818 -def ToStdout(txt, *args):
3819 """Write a message to stdout only, bypassing the logging system 3820 3821 This is just a wrapper over _ToStream. 3822 3823 @type txt: str 3824 @param txt: the message 3825 3826 """ 3827 _ToStream(sys.stdout, txt, *args)
3828
3829 3830 -def ToStdoutAndLoginfo(txt, *args):
3831 """Write a message to stdout and additionally log it at INFO level""" 3832 ToStdout(txt, *args) 3833 logging.info(txt, *args)
3834
3835 3836 -def ToStderr(txt, *args):
3837 """Write a message to stderr only, bypassing the logging system 3838 3839 This is just a wrapper over _ToStream. 3840 3841 @type txt: str 3842 @param txt: the message 3843 3844 """ 3845 _ToStream(sys.stderr, txt, *args)
3846
3847 3848 -class JobExecutor(object):
3849 """Class which manages the submission and execution of multiple jobs. 3850 3851 Note that instances of this class should not be reused between 3852 GetResults() calls. 3853 3854 """
3855 - def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
3856 self.queue = [] 3857 if cl is None: 3858 cl = GetClient() 3859 self.cl = cl 3860 self.verbose = verbose 3861 self.jobs = [] 3862 self.opts = opts 3863 self.feedback_fn = feedback_fn 3864 self._counter = itertools.count()
3865 3866 @staticmethod
3867 - def _IfName(name, fmt):
3868 """Helper function for formatting name. 3869 3870 """ 3871 if name: 3872 return fmt % name 3873 3874 return ""
3875
3876 - def QueueJob(self, name, *ops):
3877 """Record a job for later submit. 3878 3879 @type name: string 3880 @param name: a description of the job, will be used in WaitJobSet 3881 3882 """ 3883 SetGenericOpcodeOpts(ops, self.opts) 3884 self.queue.append((self._counter.next(), name, ops))
3885
3886 - def AddJobId(self, name, status, job_id):
3887 """Adds a job ID to the internal queue. 3888 3889 """ 3890 self.jobs.append((self._counter.next(), status, job_id, name))
3891
3892 - def SubmitPending(self, each=False):
3893 """Submit all pending jobs. 3894 3895 """ 3896 if each: 3897 results = [] 3898 for (_, _, ops) in self.queue: 3899 # SubmitJob will remove the success status, but raise an exception if 3900 # the submission fails, so we'll notice that anyway. 3901 results.append([True, self.cl.SubmitJob(ops)[0]]) 3902 else: 3903 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue]) 3904 for ((status, data), (idx, name, _)) in zip(results, self.queue): 3905 self.jobs.append((idx, status, data, name))
3906
3907 - def _ChooseJob(self):
3908 """Choose a non-waiting/queued job to poll next. 3909 3910 """ 3911 assert self.jobs, "_ChooseJob called with empty job list" 3912 3913 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]], 3914 ["status"]) 3915 assert result 3916 3917 for job_data, status in zip(self.jobs, result): 3918 if (isinstance(status, list) and status and 3919 status[0] in (constants.JOB_STATUS_QUEUED, 3920 constants.JOB_STATUS_WAITING, 3921 constants.JOB_STATUS_CANCELING)): 3922 # job is still present and waiting 3923 continue 3924 # good candidate found (either running job or lost job) 3925 self.jobs.remove(job_data) 3926 return job_data 3927 3928 # no job found 3929 return self.jobs.pop(0)
3930
3931 - def GetResults(self):
3932 """Wait for and return the results of all jobs. 3933 3934 @rtype: list 3935 @return: list of tuples (success, job results), in the same order 3936 as the submitted jobs; if a job has failed, instead of the result 3937 there will be the error message 3938 3939 """ 3940 if not self.jobs: 3941 self.SubmitPending() 3942 results = [] 3943 if self.verbose: 3944 ok_jobs = [row[2] for row in self.jobs if row[1]] 3945 if ok_jobs: 3946 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs)) 3947 3948 # first, remove any non-submitted jobs 3949 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1]) 3950 for idx, _, jid, name in failures: 3951 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid) 3952 results.append((idx, False, jid)) 3953 3954 while self.jobs: 3955 (idx, _, jid, name) = self._ChooseJob() 3956 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s")) 3957 try: 3958 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn) 3959 success = True 3960 except errors.JobLost, err: 3961 _, job_result = FormatError(err) 3962 ToStderr("Job %s%s has been archived, cannot check its result", 3963 jid, self._IfName(name, " for %s")) 3964 success = False 3965 except (errors.GenericError, rpcerr.ProtocolError), err: 3966 _, job_result = FormatError(err) 3967 success = False 3968 # the error message will always be shown, verbose or not 3969 ToStderr("Job %s%s has failed: %s", 3970 jid, self._IfName(name, " for %s"), job_result) 3971 3972 results.append((idx, success, job_result)) 3973 3974 # sort based on the index, then drop it 3975 results.sort() 3976 results = [i[1:] for i in results] 3977 3978 return results
3979
3980 - def WaitOrShow(self, wait):
3981 """Wait for job results or only print the job IDs. 3982 3983 @type wait: boolean 3984 @param wait: whether to wait or not 3985 3986 """ 3987 if wait: 3988 return self.GetResults() 3989 else: 3990 if not self.jobs: 3991 self.SubmitPending() 3992 for _, status, result, name in self.jobs: 3993 if status: 3994 ToStdout("%s: %s", result, name) 3995 else: 3996 ToStderr("Failure for %s: %s", name, result) 3997 return [row[1:3] for row in self.jobs]
3998
3999 4000 -def FormatParamsDictInfo(param_dict, actual, roman=False):
4001 """Formats a parameter dictionary. 4002 4003 @type param_dict: dict 4004 @param param_dict: the own parameters 4005 @type actual: dict 4006 @param actual: the current parameter set (including defaults) 4007 @rtype: dict 4008 @return: dictionary where the value of each parameter is either a fully 4009 formatted string or a dictionary containing formatted strings 4010 4011 """ 4012 ret = {} 4013 for (key, data) in actual.items(): 4014 if isinstance(data, dict) and data: 4015 ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data, roman) 4016 else: 4017 default_str = "default (%s)" % compat.TryToRoman(data, roman) 4018 ret[key] = str(compat.TryToRoman(param_dict.get(key, default_str), roman)) 4019 return ret
4020
4021 4022 -def _FormatListInfoDefault(data, def_data):
4023 if data is not None: 4024 ret = utils.CommaJoin(data) 4025 else: 4026 ret = "default (%s)" % utils.CommaJoin(def_data) 4027 return ret
4028
4029 4030 -def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster, roman=False):
4031 """Formats an instance policy. 4032 4033 @type custom_ipolicy: dict 4034 @param custom_ipolicy: own policy 4035 @type eff_ipolicy: dict 4036 @param eff_ipolicy: effective policy (including defaults); ignored for 4037 cluster 4038 @type iscluster: bool 4039 @param iscluster: the policy is at cluster level 4040 @type roman: bool 4041 @param roman: whether to print the values in roman numerals 4042 @rtype: list of pairs 4043 @return: formatted data, suitable for L{PrintGenericInfo} 4044 4045 """ 4046 if iscluster: 4047 eff_ipolicy = custom_ipolicy 4048 4049 minmax_out = [] 4050 custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX) 4051 if custom_minmax: 4052 for (k, minmax) in enumerate(custom_minmax): 4053 minmax_out.append([ 4054 ("%s/%s" % (key, k), 4055 FormatParamsDictInfo(minmax[key], minmax[key], roman)) 4056 for key in constants.ISPECS_MINMAX_KEYS 4057 ]) 4058 else: 4059 for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]): 4060 minmax_out.append([ 4061 ("%s/%s" % (key, k), 4062 FormatParamsDictInfo({}, minmax[key], roman)) 4063 for key in constants.ISPECS_MINMAX_KEYS 4064 ]) 4065 ret = [("bounds specs", minmax_out)] 4066 4067 if iscluster: 4068 stdspecs = custom_ipolicy[constants.ISPECS_STD] 4069 ret.append( 4070 (constants.ISPECS_STD, 4071 FormatParamsDictInfo(stdspecs, stdspecs, roman)) 4072 ) 4073 4074 ret.append( 4075 ("allowed disk templates", 4076 _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS), 4077 eff_ipolicy[constants.IPOLICY_DTS])) 4078 ) 4079 to_roman = compat.TryToRoman 4080 ret.extend([ 4081 (key, str(to_roman(custom_ipolicy.get(key, 4082 "default (%s)" % eff_ipolicy[key]), 4083 roman))) 4084 for key in constants.IPOLICY_PARAMETERS 4085 ]) 4086 return ret
4087
4088 4089 -def _PrintSpecsParameters(buf, specs):
4090 values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items())) 4091 buf.write(",".join(values))
4092
4093 4094 -def PrintIPolicyCommand(buf, ipolicy, isgroup):
4095 """Print the command option used to generate the given instance policy. 4096 4097 Currently only the parts dealing with specs are supported. 4098 4099 @type buf: StringIO 4100 @param buf: stream to write into 4101 @type ipolicy: dict 4102 @param ipolicy: instance policy 4103 @type isgroup: bool 4104 @param isgroup: whether the policy is at group level 4105 4106 """ 4107 if not isgroup: 4108 stdspecs = ipolicy.get("std") 4109 if stdspecs: 4110 buf.write(" %s " % IPOLICY_STD_SPECS_STR) 4111 _PrintSpecsParameters(buf, stdspecs) 4112 minmaxes = ipolicy.get("minmax", []) 4113 first = True 4114 for minmax in minmaxes: 4115 minspecs = minmax.get("min") 4116 maxspecs = minmax.get("max") 4117 if minspecs and maxspecs: 4118 if first: 4119 buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR) 4120 first = False 4121 else: 4122 buf.write("//") 4123 buf.write("min:") 4124 _PrintSpecsParameters(buf, minspecs) 4125 buf.write("/max:") 4126 _PrintSpecsParameters(buf, maxspecs)
4127
4128 4129 -def ConfirmOperation(names, list_type, text, extra=""):
4130 """Ask the user to confirm an operation on a list of list_type. 4131 4132 This function is used to request confirmation for doing an operation 4133 on a given list of list_type. 4134 4135 @type names: list 4136 @param names: the list of names that we display when 4137 we ask for confirmation 4138 @type list_type: str 4139 @param list_type: Human readable name for elements in the list (e.g. nodes) 4140 @type text: str 4141 @param text: the operation that the user should confirm 4142 @rtype: boolean 4143 @return: True or False depending on user's confirmation. 4144 4145 """ 4146 count = len(names) 4147 msg = ("The %s will operate on %d %s.\n%s" 4148 "Do you want to continue?" % (text, count, list_type, extra)) 4149 affected = (("\nAffected %s:\n" % list_type) + 4150 "\n".join([" %s" % name for name in names])) 4151 4152 choices = [("y", True, "Yes, execute the %s" % text), 4153 ("n", False, "No, abort the %s" % text)] 4154 4155 if count > 20: 4156 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type)) 4157 question = msg 4158 else: 4159 question = msg + affected 4160 4161 choice = AskUser(question, choices) 4162 if choice == "v": 4163 choices.pop(1) 4164 choice = AskUser(msg + affected, choices) 4165 return choice
4166
4167 4168 -def _MaybeParseUnit(elements):
4169 """Parses and returns an array of potential values with units. 4170 4171 """ 4172 parsed = {} 4173 for k, v in elements.items(): 4174 if v == constants.VALUE_DEFAULT: 4175 parsed[k] = v 4176 else: 4177 parsed[k] = utils.ParseUnit(v) 4178 return parsed
4179
4180 4181 -def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count, 4182 ispecs_disk_count, ispecs_disk_size, 4183 ispecs_nic_count, group_ipolicy, fill_all):
4184 try: 4185 if ispecs_mem_size: 4186 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size) 4187 if ispecs_disk_size: 4188 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size) 4189 except (TypeError, ValueError, errors.UnitParseError), err: 4190 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size" 4191 " in policy: %s" % 4192 (ispecs_disk_size, ispecs_mem_size, err), 4193 errors.ECODE_INVAL) 4194 4195 # prepare ipolicy dict 4196 ispecs_transposed = { 4197 constants.ISPEC_MEM_SIZE: ispecs_mem_size, 4198 constants.ISPEC_CPU_COUNT: ispecs_cpu_count, 4199 constants.ISPEC_DISK_COUNT: ispecs_disk_count, 4200 constants.ISPEC_DISK_SIZE: ispecs_disk_size, 4201 constants.ISPEC_NIC_COUNT: ispecs_nic_count, 4202 } 4203 4204 # first, check that the values given are correct 4205 if group_ipolicy: 4206 forced_type = TISPECS_GROUP_TYPES 4207 else: 4208 forced_type = TISPECS_CLUSTER_TYPES 4209 for specs in ispecs_transposed.values(): 4210 assert type(specs) is dict 4211 utils.ForceDictType(specs, forced_type) 4212 4213 # then transpose 4214 ispecs = { 4215 constants.ISPECS_MIN: {}, 4216 constants.ISPECS_MAX: {}, 4217 constants.ISPECS_STD: {}, 4218 } 4219 for (name, specs) in ispecs_transposed.iteritems(): 4220 assert name in constants.ISPECS_PARAMETERS 4221 for key, val in specs.items(): # {min: .. ,max: .., std: ..} 4222 assert key in ispecs 4223 ispecs[key][name] = val 4224 minmax_out = {} 4225 for key in constants.ISPECS_MINMAX_KEYS: 4226 if fill_all: 4227 minmax_out[key] = \ 4228 objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key]) 4229 else: 4230 minmax_out[key] = ispecs[key] 4231 ipolicy[constants.ISPECS_MINMAX] = [minmax_out] 4232 if fill_all: 4233 ipolicy[constants.ISPECS_STD] = \ 4234 objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD], 4235 ispecs[constants.ISPECS_STD]) 4236 else: 4237 ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
4238
4239 4240 -def _ParseSpecUnit(spec, keyname):
4241 ret = spec.copy() 4242 for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]: 4243 if k in ret: 4244 try: 4245 ret[k] = utils.ParseUnit(ret[k]) 4246 except (TypeError, ValueError, errors.UnitParseError), err: 4247 raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance" 4248 " specs: %s" % (k, ret[k], keyname, err)), 4249 errors.ECODE_INVAL) 4250 return ret
4251
4252 4253 -def _ParseISpec(spec, keyname, required):
4254 ret = _ParseSpecUnit(spec, keyname) 4255 utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES) 4256 missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys()) 4257 if required and missing: 4258 raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" % 4259 (keyname, utils.CommaJoin(missing)), 4260 errors.ECODE_INVAL) 4261 return ret
4262
4263 4264 -def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
4265 ret = None 4266 if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and 4267 len(minmax_ispecs[0]) == 1): 4268 for (key, spec) in minmax_ispecs[0].items(): 4269 # This loop is executed exactly once 4270 if key in allowed_values and not spec: 4271 ret = key 4272 return ret
4273
4274 4275 -def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs, 4276 group_ipolicy, allowed_values):
4277 found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values) 4278 if found_allowed is not None: 4279 ipolicy_out[constants.ISPECS_MINMAX] = found_allowed 4280 elif minmax_ispecs is not None: 4281 minmax_out = [] 4282 for mmpair in minmax_ispecs: 4283 mmpair_out = {} 4284 for (key, spec) in mmpair.items(): 4285 if key not in constants.ISPECS_MINMAX_KEYS: 4286 msg = "Invalid key in bounds instance specifications: %s" % key 4287 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 4288 mmpair_out[key] = _ParseISpec(spec, key, True) 4289 minmax_out.append(mmpair_out) 4290 ipolicy_out[constants.ISPECS_MINMAX] = minmax_out 4291 if std_ispecs is not None: 4292 assert not group_ipolicy # This is not an option for gnt-group 4293 ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
4294
4295 4296 -def CreateIPolicyFromOpts(ispecs_mem_size=None, 4297 ispecs_cpu_count=None, 4298 ispecs_disk_count=None, 4299 ispecs_disk_size=None, 4300 ispecs_nic_count=None, 4301 minmax_ispecs=None, 4302 std_ispecs=None, 4303 ipolicy_disk_templates=None, 4304 ipolicy_vcpu_ratio=None, 4305 ipolicy_spindle_ratio=None, 4306 group_ipolicy=False, 4307 allowed_values=None, 4308 fill_all=False):
4309 """Creation of instance policy based on command line options. 4310 4311 @param fill_all: whether for cluster policies we should ensure that 4312 all values are filled 4313 4314 """ 4315 assert not (fill_all and allowed_values) 4316 4317 split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or 4318 ispecs_disk_size or ispecs_nic_count) 4319 if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)): 4320 raise errors.OpPrereqError("A --specs-xxx option cannot be specified" 4321 " together with any --ipolicy-xxx-specs option", 4322 errors.ECODE_INVAL) 4323 4324 ipolicy_out = objects.MakeEmptyIPolicy() 4325 if split_specs: 4326 assert fill_all 4327 _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count, 4328 ispecs_disk_count, ispecs_disk_size, 4329 ispecs_nic_count, group_ipolicy, fill_all) 4330 elif (minmax_ispecs is not None or std_ispecs is not None): 4331 _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs, 4332 group_ipolicy, allowed_values) 4333 4334 if ipolicy_disk_templates is not None: 4335 if allowed_values and ipolicy_disk_templates in allowed_values: 4336 ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates 4337 else: 4338 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates) 4339 if ipolicy_vcpu_ratio is not None: 4340 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio 4341 if ipolicy_spindle_ratio is not None: 4342 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio 4343 4344 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS) 4345 4346 if not group_ipolicy and fill_all: 4347 ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out) 4348 4349 return ipolicy_out
4350
4351 4352 -def _NotAContainer(data):
4353 """ Checks whether the input is not a container data type. 4354 4355 @rtype: bool 4356 4357 """ 4358 return not (isinstance(data, (list, dict, tuple)))
4359
4360 4361 -def _GetAlignmentMapping(data):
4362 """ Returns info about alignment if present in an encoded ordered dictionary. 4363 4364 @type data: list of tuple 4365 @param data: The encoded ordered dictionary, as defined in 4366 L{_SerializeGenericInfo}. 4367 @rtype: dict of any to int 4368 @return: The dictionary mapping alignment groups to the maximum length of the 4369 dictionary key found in the group. 4370 4371 """ 4372 alignment_map = {} 4373 for entry in data: 4374 if len(entry) > 2: 4375 group_key = entry[2] 4376 key_length = len(entry[0]) 4377 if group_key in alignment_map: 4378 alignment_map[group_key] = max(alignment_map[group_key], key_length) 4379 else: 4380 alignment_map[group_key] = key_length 4381 4382 return alignment_map
4383
4384 4385 -def _SerializeGenericInfo(buf, data, level, afterkey=False):
4386 """Formatting core of L{PrintGenericInfo}. 4387 4388 @param buf: (string) stream to accumulate the result into 4389 @param data: data to format 4390 @type level: int 4391 @param level: depth in the data hierarchy, used for indenting 4392 @type afterkey: bool 4393 @param afterkey: True when we are in the middle of a line after a key (used 4394 to properly add newlines or indentation) 4395 4396 """ 4397 baseind = " " 4398 if isinstance(data, dict): 4399 if not data: 4400 buf.write("\n") 4401 else: 4402 if afterkey: 4403 buf.write("\n") 4404 doindent = True 4405 else: 4406 doindent = False 4407 for key in sorted(data): 4408 if doindent: 4409 buf.write(baseind * level) 4410 else: 4411 doindent = True 4412 buf.write(key) 4413 buf.write(": ") 4414 _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True) 4415 elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple): 4416 # list of tuples (an ordered dictionary) 4417 # the tuples may have two or three members - key, value, and alignment group 4418 # if the alignment group is present, align all values sharing the same group 4419 if afterkey: 4420 buf.write("\n") 4421 doindent = True 4422 else: 4423 doindent = False 4424 4425 alignment_mapping = _GetAlignmentMapping(data) 4426 for entry in data: 4427 key, val = entry[0:2] 4428 if doindent: 4429 buf.write(baseind * level) 4430 else: 4431 doindent = True 4432 buf.write(key) 4433 buf.write(": ") 4434 if len(entry) > 2: 4435 max_key_length = alignment_mapping[entry[2]] 4436 buf.write(" " * (max_key_length - len(key))) 4437 _SerializeGenericInfo(buf, val, level + 1, afterkey=True) 4438 elif isinstance(data, tuple) and all(map(_NotAContainer, data)): 4439 # tuples with simple content are serialized as inline lists 4440 buf.write("[%s]\n" % utils.CommaJoin(data)) 4441 elif isinstance(data, list) or isinstance(data, tuple): 4442 # lists and tuples 4443 if not data: 4444 buf.write("\n") 4445 else: 4446 if afterkey: 4447 buf.write("\n") 4448 doindent = True 4449 else: 4450 doindent = False 4451 for item in data: 4452 if doindent: 4453 buf.write(baseind * level) 4454 else: 4455 doindent = True 4456 buf.write("-") 4457 buf.write(baseind[1:]) 4458 _SerializeGenericInfo(buf, item, level + 1) 4459 else: 4460 # This branch should be only taken for strings, but it's practically 4461 # impossible to guarantee that no other types are produced somewhere 4462 buf.write(str(data)) 4463 buf.write("\n")
4464
4465 4466 -def PrintGenericInfo(data):
4467 """Print information formatted according to the hierarchy. 4468 4469 The output is a valid YAML string. 4470 4471 @param data: the data to print. It's a hierarchical structure whose elements 4472 can be: 4473 - dictionaries, where keys are strings and values are of any of the 4474 types listed here 4475 - lists of tuples (key, value) or (key, value, alignment_group), where 4476 key is a string, value is of any of the types listed here, and 4477 alignment_group can be any hashable value; it's a way to encode 4478 ordered dictionaries; any entries sharing the same alignment group are 4479 aligned by appending whitespace before the value as needed 4480 - lists of any of the types listed here 4481 - strings 4482 4483 """ 4484 buf = StringIO() 4485 _SerializeGenericInfo(buf, data, 0) 4486 ToStdout(buf.getvalue().rstrip("\n"))
4487