Package ganeti :: Module cli
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cli

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc. 
   5  # 
   6  # This program is free software; you can redistribute it and/or modify 
   7  # it under the terms of the GNU General Public License as published by 
   8  # the Free Software Foundation; either version 2 of the License, or 
   9  # (at your option) any later version. 
  10  # 
  11  # This program is distributed in the hope that it will be useful, but 
  12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
  13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
  14  # General Public License for more details. 
  15  # 
  16  # You should have received a copy of the GNU General Public License 
  17  # along with this program; if not, write to the Free Software 
  18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
  19  # 02110-1301, USA. 
  20   
  21   
  22  """Module dealing with command line parsing""" 
  23   
  24   
  25  import sys 
  26  import textwrap 
  27  import os.path 
  28  import time 
  29  import logging 
  30  import errno 
  31  from cStringIO import StringIO 
  32   
  33  from ganeti import utils 
  34  from ganeti import errors 
  35  from ganeti import constants 
  36  from ganeti import opcodes 
  37  from ganeti import luxi 
  38  from ganeti import ssconf 
  39  from ganeti import rpc 
  40  from ganeti import ssh 
  41  from ganeti import compat 
  42  from ganeti import netutils 
  43  from ganeti import qlang 
  44   
  45  from optparse import (OptionParser, TitledHelpFormatter, 
  46                        Option, OptionValueError) 
  47   
  48   
  49  __all__ = [ 
  50    # Command line options 
  51    "ADD_UIDS_OPT", 
  52    "ALLOCATABLE_OPT", 
  53    "ALLOC_POLICY_OPT", 
  54    "ALL_OPT", 
  55    "AUTO_PROMOTE_OPT", 
  56    "AUTO_REPLACE_OPT", 
  57    "BACKEND_OPT", 
  58    "BLK_OS_OPT", 
  59    "CAPAB_MASTER_OPT", 
  60    "CAPAB_VM_OPT", 
  61    "CLEANUP_OPT", 
  62    "CLUSTER_DOMAIN_SECRET_OPT", 
  63    "CONFIRM_OPT", 
  64    "CP_SIZE_OPT", 
  65    "DEBUG_OPT", 
  66    "DEBUG_SIMERR_OPT", 
  67    "DISKIDX_OPT", 
  68    "DISK_OPT", 
  69    "DISK_TEMPLATE_OPT", 
  70    "DRAINED_OPT", 
  71    "DRY_RUN_OPT", 
  72    "DRBD_HELPER_OPT", 
  73    "EARLY_RELEASE_OPT", 
  74    "ENABLED_HV_OPT", 
  75    "ERROR_CODES_OPT", 
  76    "FIELDS_OPT", 
  77    "FILESTORE_DIR_OPT", 
  78    "FILESTORE_DRIVER_OPT", 
  79    "FORCE_OPT", 
  80    "FORCE_VARIANT_OPT", 
  81    "GLOBAL_FILEDIR_OPT", 
  82    "HID_OS_OPT", 
  83    "HVLIST_OPT", 
  84    "HVOPTS_OPT", 
  85    "HYPERVISOR_OPT", 
  86    "IALLOCATOR_OPT", 
  87    "DEFAULT_IALLOCATOR_OPT", 
  88    "IDENTIFY_DEFAULTS_OPT", 
  89    "IGNORE_CONSIST_OPT", 
  90    "IGNORE_FAILURES_OPT", 
  91    "IGNORE_OFFLINE_OPT", 
  92    "IGNORE_REMOVE_FAILURES_OPT", 
  93    "IGNORE_SECONDARIES_OPT", 
  94    "IGNORE_SIZE_OPT", 
  95    "INTERVAL_OPT", 
  96    "MAC_PREFIX_OPT", 
  97    "MAINTAIN_NODE_HEALTH_OPT", 
  98    "MASTER_NETDEV_OPT", 
  99    "MC_OPT", 
 100    "MIGRATION_MODE_OPT", 
 101    "NET_OPT", 
 102    "NEW_CLUSTER_CERT_OPT", 
 103    "NEW_CLUSTER_DOMAIN_SECRET_OPT", 
 104    "NEW_CONFD_HMAC_KEY_OPT", 
 105    "NEW_RAPI_CERT_OPT", 
 106    "NEW_SECONDARY_OPT", 
 107    "NIC_PARAMS_OPT", 
 108    "NODE_FORCE_JOIN_OPT", 
 109    "NODE_LIST_OPT", 
 110    "NODE_PLACEMENT_OPT", 
 111    "NODEGROUP_OPT", 
 112    "NODE_PARAMS_OPT", 
 113    "NODE_POWERED_OPT", 
 114    "NODRBD_STORAGE_OPT", 
 115    "NOHDR_OPT", 
 116    "NOIPCHECK_OPT", 
 117    "NO_INSTALL_OPT", 
 118    "NONAMECHECK_OPT", 
 119    "NOLVM_STORAGE_OPT", 
 120    "NOMODIFY_ETCHOSTS_OPT", 
 121    "NOMODIFY_SSH_SETUP_OPT", 
 122    "NONICS_OPT", 
 123    "NONLIVE_OPT", 
 124    "NONPLUS1_OPT", 
 125    "NOSHUTDOWN_OPT", 
 126    "NOSTART_OPT", 
 127    "NOSSH_KEYCHECK_OPT", 
 128    "NOVOTING_OPT", 
 129    "NO_REMEMBER_OPT", 
 130    "NWSYNC_OPT", 
 131    "ON_PRIMARY_OPT", 
 132    "ON_SECONDARY_OPT", 
 133    "OFFLINE_OPT", 
 134    "OSPARAMS_OPT", 
 135    "OS_OPT", 
 136    "OS_SIZE_OPT", 
 137    "PREALLOC_WIPE_DISKS_OPT", 
 138    "PRIMARY_IP_VERSION_OPT", 
 139    "PRIORITY_OPT", 
 140    "RAPI_CERT_OPT", 
 141    "READD_OPT", 
 142    "REBOOT_TYPE_OPT", 
 143    "REMOVE_INSTANCE_OPT", 
 144    "REMOVE_UIDS_OPT", 
 145    "RESERVED_LVS_OPT", 
 146    "ROMAN_OPT", 
 147    "SECONDARY_IP_OPT", 
 148    "SELECT_OS_OPT", 
 149    "SEP_OPT", 
 150    "SHOWCMD_OPT", 
 151    "SHUTDOWN_TIMEOUT_OPT", 
 152    "SINGLE_NODE_OPT", 
 153    "SRC_DIR_OPT", 
 154    "SRC_NODE_OPT", 
 155    "SUBMIT_OPT", 
 156    "STATIC_OPT", 
 157    "SYNC_OPT", 
 158    "TAG_SRC_OPT", 
 159    "TIMEOUT_OPT", 
 160    "UIDPOOL_OPT", 
 161    "USEUNITS_OPT", 
 162    "USE_REPL_NET_OPT", 
 163    "VERBOSE_OPT", 
 164    "VG_NAME_OPT", 
 165    "YES_DOIT_OPT", 
 166    # Generic functions for CLI programs 
 167    "GenericMain", 
 168    "GenericInstanceCreate", 
 169    "GenericList", 
 170    "GenericListFields", 
 171    "GetClient", 
 172    "GetOnlineNodes", 
 173    "JobExecutor", 
 174    "JobSubmittedException", 
 175    "ParseTimespec", 
 176    "RunWhileClusterStopped", 
 177    "SubmitOpCode", 
 178    "SubmitOrSend", 
 179    "UsesRPC", 
 180    # Formatting functions 
 181    "ToStderr", "ToStdout", 
 182    "FormatError", 
 183    "FormatQueryResult", 
 184    "FormatParameterDict", 
 185    "GenerateTable", 
 186    "AskUser", 
 187    "FormatTimestamp", 
 188    "FormatLogMessage", 
 189    # Tags functions 
 190    "ListTags", 
 191    "AddTags", 
 192    "RemoveTags", 
 193    # command line options support infrastructure 
 194    "ARGS_MANY_INSTANCES", 
 195    "ARGS_MANY_NODES", 
 196    "ARGS_MANY_GROUPS", 
 197    "ARGS_NONE", 
 198    "ARGS_ONE_INSTANCE", 
 199    "ARGS_ONE_NODE", 
 200    "ARGS_ONE_GROUP", 
 201    "ARGS_ONE_OS", 
 202    "ArgChoice", 
 203    "ArgCommand", 
 204    "ArgFile", 
 205    "ArgGroup", 
 206    "ArgHost", 
 207    "ArgInstance", 
 208    "ArgJobId", 
 209    "ArgNode", 
 210    "ArgOs", 
 211    "ArgSuggest", 
 212    "ArgUnknown", 
 213    "OPT_COMPL_INST_ADD_NODES", 
 214    "OPT_COMPL_MANY_NODES", 
 215    "OPT_COMPL_ONE_IALLOCATOR", 
 216    "OPT_COMPL_ONE_INSTANCE", 
 217    "OPT_COMPL_ONE_NODE", 
 218    "OPT_COMPL_ONE_NODEGROUP", 
 219    "OPT_COMPL_ONE_OS", 
 220    "cli_option", 
 221    "SplitNodeOption", 
 222    "CalculateOSNames", 
 223    "ParseFields", 
 224    "COMMON_CREATE_OPTS", 
 225    ] 
 226   
 227  NO_PREFIX = "no_" 
 228  UN_PREFIX = "-" 
 229   
 230  #: Priorities (sorted) 
 231  _PRIORITY_NAMES = [ 
 232    ("low", constants.OP_PRIO_LOW), 
 233    ("normal", constants.OP_PRIO_NORMAL), 
 234    ("high", constants.OP_PRIO_HIGH), 
 235    ] 
 236   
 237  #: Priority dictionary for easier lookup 
 238  # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once 
 239  # we migrate to Python 2.6 
 240  _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES) 
 241   
 242  # Query result status for clients 
 243  (QR_NORMAL, 
 244   QR_UNKNOWN, 
 245   QR_INCOMPLETE) = range(3) 
 246   
 247   
248 -class _Argument:
249 - def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
250 self.min = min 251 self.max = max
252
253 - def __repr__(self):
254 return ("<%s min=%s max=%s>" % 255 (self.__class__.__name__, self.min, self.max))
256 257
258 -class ArgSuggest(_Argument):
259 """Suggesting argument. 260 261 Value can be any of the ones passed to the constructor. 262 263 """ 264 # pylint: disable-msg=W0622
265 - def __init__(self, min=0, max=None, choices=None):
266 _Argument.__init__(self, min=min, max=max) 267 self.choices = choices
268
269 - def __repr__(self):
270 return ("<%s min=%s max=%s choices=%r>" % 271 (self.__class__.__name__, self.min, self.max, self.choices))
272 273
274 -class ArgChoice(ArgSuggest):
275 """Choice argument. 276 277 Value can be any of the ones passed to the constructor. Like L{ArgSuggest}, 278 but value must be one of the choices. 279 280 """
281 282
283 -class ArgUnknown(_Argument):
284 """Unknown argument to program (e.g. determined at runtime). 285 286 """
287 288
289 -class ArgInstance(_Argument):
290 """Instances argument. 291 292 """
293 294
295 -class ArgNode(_Argument):
296 """Node argument. 297 298 """
299 300
301 -class ArgGroup(_Argument):
302 """Node group argument. 303 304 """
305 306
307 -class ArgJobId(_Argument):
308 """Job ID argument. 309 310 """
311 312
313 -class ArgFile(_Argument):
314 """File path argument. 315 316 """
317 318
319 -class ArgCommand(_Argument):
320 """Command argument. 321 322 """
323 324
325 -class ArgHost(_Argument):
326 """Host argument. 327 328 """
329 330
331 -class ArgOs(_Argument):
332 """OS argument. 333 334 """
335 336 337 ARGS_NONE = [] 338 ARGS_MANY_INSTANCES = [ArgInstance()] 339 ARGS_MANY_NODES = [ArgNode()] 340 ARGS_MANY_GROUPS = [ArgGroup()] 341 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)] 342 ARGS_ONE_NODE = [ArgNode(min=1, max=1)] 343 # TODO 344 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)] 345 ARGS_ONE_OS = [ArgOs(min=1, max=1)] 346 347
348 -def _ExtractTagsObject(opts, args):
349 """Extract the tag type object. 350 351 Note that this function will modify its args parameter. 352 353 """ 354 if not hasattr(opts, "tag_type"): 355 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject") 356 kind = opts.tag_type 357 if kind == constants.TAG_CLUSTER: 358 retval = kind, kind 359 elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE: 360 if not args: 361 raise errors.OpPrereqError("no arguments passed to the command") 362 name = args.pop(0) 363 retval = kind, name 364 else: 365 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind) 366 return retval
367 368
369 -def _ExtendTags(opts, args):
370 """Extend the args if a source file has been given. 371 372 This function will extend the tags with the contents of the file 373 passed in the 'tags_source' attribute of the opts parameter. A file 374 named '-' will be replaced by stdin. 375 376 """ 377 fname = opts.tags_source 378 if fname is None: 379 return 380 if fname == "-": 381 new_fh = sys.stdin 382 else: 383 new_fh = open(fname, "r") 384 new_data = [] 385 try: 386 # we don't use the nice 'new_data = [line.strip() for line in fh]' 387 # because of python bug 1633941 388 while True: 389 line = new_fh.readline() 390 if not line: 391 break 392 new_data.append(line.strip()) 393 finally: 394 new_fh.close() 395 args.extend(new_data)
396 397
398 -def ListTags(opts, args):
399 """List the tags on a given object. 400 401 This is a generic implementation that knows how to deal with all 402 three cases of tag objects (cluster, node, instance). The opts 403 argument is expected to contain a tag_type field denoting what 404 object type we work on. 405 406 """ 407 kind, name = _ExtractTagsObject(opts, args) 408 cl = GetClient() 409 result = cl.QueryTags(kind, name) 410 result = list(result) 411 result.sort() 412 for tag in result: 413 ToStdout(tag)
414 415
416 -def AddTags(opts, args):
417 """Add tags on a given object. 418 419 This is a generic implementation that knows how to deal with all 420 three cases of tag objects (cluster, node, instance). The opts 421 argument is expected to contain a tag_type field denoting what 422 object type we work on. 423 424 """ 425 kind, name = _ExtractTagsObject(opts, args) 426 _ExtendTags(opts, args) 427 if not args: 428 raise errors.OpPrereqError("No tags to be added") 429 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args) 430 SubmitOpCode(op, opts=opts)
431 432
433 -def RemoveTags(opts, args):
434 """Remove tags from a given object. 435 436 This is a generic implementation that knows how to deal with all 437 three cases of tag objects (cluster, node, instance). The opts 438 argument is expected to contain a tag_type field denoting what 439 object type we work on. 440 441 """ 442 kind, name = _ExtractTagsObject(opts, args) 443 _ExtendTags(opts, args) 444 if not args: 445 raise errors.OpPrereqError("No tags to be removed") 446 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args) 447 SubmitOpCode(op, opts=opts)
448 449
450 -def check_unit(option, opt, value): # pylint: disable-msg=W0613
451 """OptParsers custom converter for units. 452 453 """ 454 try: 455 return utils.ParseUnit(value) 456 except errors.UnitParseError, err: 457 raise OptionValueError("option %s: %s" % (opt, err)) 458 459
460 -def _SplitKeyVal(opt, data):
461 """Convert a KeyVal string into a dict. 462 463 This function will convert a key=val[,...] string into a dict. Empty 464 values will be converted specially: keys which have the prefix 'no_' 465 will have the value=False and the prefix stripped, the others will 466 have value=True. 467 468 @type opt: string 469 @param opt: a string holding the option name for which we process the 470 data, used in building error messages 471 @type data: string 472 @param data: a string of the format key=val,key=val,... 473 @rtype: dict 474 @return: {key=val, key=val} 475 @raises errors.ParameterError: if there are duplicate keys 476 477 """ 478 kv_dict = {} 479 if data: 480 for elem in utils.UnescapeAndSplit(data, sep=","): 481 if "=" in elem: 482 key, val = elem.split("=", 1) 483 else: 484 if elem.startswith(NO_PREFIX): 485 key, val = elem[len(NO_PREFIX):], False 486 elif elem.startswith(UN_PREFIX): 487 key, val = elem[len(UN_PREFIX):], None 488 else: 489 key, val = elem, True 490 if key in kv_dict: 491 raise errors.ParameterError("Duplicate key '%s' in option %s" % 492 (key, opt)) 493 kv_dict[key] = val 494 return kv_dict
495 496
497 -def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613
498 """Custom parser for ident:key=val,key=val options. 499 500 This will store the parsed values as a tuple (ident, {key: val}). As such, 501 multiple uses of this option via action=append is possible. 502 503 """ 504 if ":" not in value: 505 ident, rest = value, '' 506 else: 507 ident, rest = value.split(":", 1) 508 509 if ident.startswith(NO_PREFIX): 510 if rest: 511 msg = "Cannot pass options when removing parameter groups: %s" % value 512 raise errors.ParameterError(msg) 513 retval = (ident[len(NO_PREFIX):], False) 514 elif ident.startswith(UN_PREFIX): 515 if rest: 516 msg = "Cannot pass options when removing parameter groups: %s" % value 517 raise errors.ParameterError(msg) 518 retval = (ident[len(UN_PREFIX):], None) 519 else: 520 kv_dict = _SplitKeyVal(opt, rest) 521 retval = (ident, kv_dict) 522 return retval 523 524
525 -def check_key_val(option, opt, value): # pylint: disable-msg=W0613
526 """Custom parser class for key=val,key=val options. 527 528 This will store the parsed values as a dict {key: val}. 529 530 """ 531 return _SplitKeyVal(opt, value) 532 533
534 -def check_bool(option, opt, value): # pylint: disable-msg=W0613
535 """Custom parser for yes/no options. 536 537 This will store the parsed value as either True or False. 538 539 """ 540 value = value.lower() 541 if value == constants.VALUE_FALSE or value == "no": 542 return False 543 elif value == constants.VALUE_TRUE or value == "yes": 544 return True 545 else: 546 raise errors.ParameterError("Invalid boolean value '%s'" % value) 547 548 549 # completion_suggestion is normally a list. Using numeric values not evaluating 550 # to False for dynamic completion. 551 (OPT_COMPL_MANY_NODES, 552 OPT_COMPL_ONE_NODE, 553 OPT_COMPL_ONE_INSTANCE, 554 OPT_COMPL_ONE_OS, 555 OPT_COMPL_ONE_IALLOCATOR, 556 OPT_COMPL_INST_ADD_NODES, 557 OPT_COMPL_ONE_NODEGROUP) = range(100, 107) 558 559 OPT_COMPL_ALL = frozenset([ 560 OPT_COMPL_MANY_NODES, 561 OPT_COMPL_ONE_NODE, 562 OPT_COMPL_ONE_INSTANCE, 563 OPT_COMPL_ONE_OS, 564 OPT_COMPL_ONE_IALLOCATOR, 565 OPT_COMPL_INST_ADD_NODES, 566 OPT_COMPL_ONE_NODEGROUP, 567 ]) 568 569
570 -class CliOption(Option):
571 """Custom option class for optparse. 572 573 """ 574 ATTRS = Option.ATTRS + [ 575 "completion_suggest", 576 ] 577 TYPES = Option.TYPES + ( 578 "identkeyval", 579 "keyval", 580 "unit", 581 "bool", 582 ) 583 TYPE_CHECKER = Option.TYPE_CHECKER.copy() 584 TYPE_CHECKER["identkeyval"] = check_ident_key_val 585 TYPE_CHECKER["keyval"] = check_key_val 586 TYPE_CHECKER["unit"] = check_unit 587 TYPE_CHECKER["bool"] = check_bool
588 589 590 # optparse.py sets make_option, so we do it for our own option class, too 591 cli_option = CliOption 592 593 594 _YORNO = "yes|no" 595 596 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count", 597 help="Increase debugging level") 598 599 NOHDR_OPT = cli_option("--no-headers", default=False, 600 action="store_true", dest="no_headers", 601 help="Don't display column headers") 602 603 SEP_OPT = cli_option("--separator", default=None, 604 action="store", dest="separator", 605 help=("Separator between output fields" 606 " (defaults to one space)")) 607 608 USEUNITS_OPT = cli_option("--units", default=None, 609 dest="units", choices=('h', 'm', 'g', 't'), 610 help="Specify units for output (one of h/m/g/t)") 611 612 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store", 613 type="string", metavar="FIELDS", 614 help="Comma separated list of output fields") 615 616 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true", 617 default=False, help="Force the operation") 618 619 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true", 620 default=False, help="Do not require confirmation") 621 622 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline", 623 action="store_true", default=False, 624 help=("Ignore offline nodes and do as much" 625 " as possible")) 626 627 TAG_SRC_OPT = cli_option("--from", dest="tags_source", 628 default=None, help="File with tag names") 629 630 SUBMIT_OPT = cli_option("--submit", dest="submit_only", 631 default=False, action="store_true", 632 help=("Submit the job and return the job ID, but" 633 " don't wait for the job to finish")) 634 635 SYNC_OPT = cli_option("--sync", dest="do_locking", 636 default=False, action="store_true", 637 help=("Grab locks while doing the queries" 638 " in order to ensure more consistent results")) 639 640 DRY_RUN_OPT = cli_option("--dry-run", default=False, 641 action="store_true", 642 help=("Do not execute the operation, just run the" 643 " check steps and verify it it could be" 644 " executed")) 645 646 VERBOSE_OPT = cli_option("-v", "--verbose", default=False, 647 action="store_true", 648 help="Increase the verbosity of the operation") 649 650 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False, 651 action="store_true", dest="simulate_errors", 652 help="Debugging option that makes the operation" 653 " treat most runtime checks as failed") 654 655 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync", 656 default=True, action="store_false", 657 help="Don't wait for sync (DANGEROUS!)") 658 659 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template", 660 help="Custom disk setup (diskless, file," 661 " plain or drbd)", 662 default=None, metavar="TEMPL", 663 choices=list(constants.DISK_TEMPLATES)) 664 665 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true", 666 help="Do not create any network cards for" 667 " the instance") 668 669 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", 670 help="Relative path under default cluster-wide" 671 " file storage dir to store file-based disks", 672 default=None, metavar="<DIR>") 673 674 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver", 675 help="Driver to use for image files", 676 default="loop", metavar="<DRIVER>", 677 choices=list(constants.FILE_DRIVER)) 678 679 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>", 680 help="Select nodes for the instance automatically" 681 " using the <NAME> iallocator plugin", 682 default=None, type="string", 683 completion_suggest=OPT_COMPL_ONE_IALLOCATOR) 684 685 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator", 686 metavar="<NAME>", 687 help="Set the default instance allocator plugin", 688 default=None, type="string", 689 completion_suggest=OPT_COMPL_ONE_IALLOCATOR) 690 691 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run", 692 metavar="<os>", 693 completion_suggest=OPT_COMPL_ONE_OS) 694 695 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams", 696 type="keyval", default={}, 697 help="OS parameters") 698 699 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant", 700 action="store_true", default=False, 701 help="Force an unknown variant") 702 703 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install", 704 action="store_true", default=False, 705 help="Do not install the OS (will" 706 " enable no-start)") 707 708 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams", 709 type="keyval", default={}, 710 help="Backend parameters") 711 712 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval", 713 default={}, dest="hvparams", 714 help="Hypervisor parameters") 715 716 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor", 717 help="Hypervisor and hypervisor options, in the" 718 " format hypervisor:option=value,option=value,...", 719 default=None, type="identkeyval") 720 721 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams", 722 help="Hypervisor and hypervisor options, in the" 723 " format hypervisor:option=value,option=value,...", 724 default=[], action="append", type="identkeyval") 725 726 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True, 727 action="store_false", 728 help="Don't check that the instance's IP" 729 " is alive") 730 731 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check", 732 default=True, action="store_false", 733 help="Don't check that the instance's name" 734 " is resolvable") 735 736 NET_OPT = cli_option("--net", 737 help="NIC parameters", default=[], 738 dest="nics", action="append", type="identkeyval") 739 740 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[], 741 dest="disks", action="append", type="identkeyval") 742 743 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None, 744 help="Comma-separated list of disks" 745 " indices to act on (e.g. 0,2) (optional," 746 " defaults to all disks)") 747 748 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size", 749 help="Enforces a single-disk configuration using the" 750 " given disk size, in MiB unless a suffix is used", 751 default=None, type="unit", metavar="<size>") 752 753 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency", 754 dest="ignore_consistency", 755 action="store_true", default=False, 756 help="Ignore the consistency of the disks on" 757 " the secondary") 758 759 NONLIVE_OPT = cli_option("--non-live", dest="live", 760 default=True, action="store_false", 761 help="Do a non-live migration (this usually means" 762 " freeze the instance, save the state, transfer and" 763 " only then resume running on the secondary node)") 764 765 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode", 766 default=None, 767 choices=list(constants.HT_MIGRATION_MODES), 768 help="Override default migration mode (choose" 769 " either live or non-live") 770 771 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node", 772 help="Target node and optional secondary node", 773 metavar="<pnode>[:<snode>]", 774 completion_suggest=OPT_COMPL_INST_ADD_NODES) 775 776 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[], 777 action="append", metavar="<node>", 778 help="Use only this node (can be used multiple" 779 " times, if not given defaults to all nodes)", 780 completion_suggest=OPT_COMPL_ONE_NODE) 781 782 NODEGROUP_OPT = cli_option("-g", "--node-group", 783 dest="nodegroup", 784 help="Node group (name or uuid)", 785 metavar="<nodegroup>", 786 default=None, type="string", 787 completion_suggest=OPT_COMPL_ONE_NODEGROUP) 788 789 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node", 790 metavar="<node>", 791 completion_suggest=OPT_COMPL_ONE_NODE) 792 793 NOSTART_OPT = cli_option("--no-start", dest="start", default=True, 794 action="store_false", 795 help="Don't start the instance after creation") 796 797 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command", 798 action="store_true", default=False, 799 help="Show command instead of executing it") 800 801 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup", 802 default=False, action="store_true", 803 help="Instead of performing the migration, try to" 804 " recover from a failed cleanup. This is safe" 805 " to run even if the instance is healthy, but it" 806 " will create extra replication traffic and " 807 " disrupt briefly the replication (like during the" 808 " migration") 809 810 STATIC_OPT = cli_option("-s", "--static", dest="static", 811 action="store_true", default=False, 812 help="Only show configuration data, not runtime data") 813 814 ALL_OPT = cli_option("--all", dest="show_all", 815 default=False, action="store_true", 816 help="Show info on all instances on the cluster." 817 " This can take a long time to run, use wisely") 818 819 SELECT_OS_OPT = cli_option("--select-os", dest="select_os", 820 action="store_true", default=False, 821 help="Interactive OS reinstall, lists available" 822 " OS templates for selection") 823 824 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures", 825 action="store_true", default=False, 826 help="Remove the instance from the cluster" 827 " configuration even if there are failures" 828 " during the removal process") 829 830 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures", 831 dest="ignore_remove_failures", 832 action="store_true", default=False, 833 help="Remove the instance from the" 834 " cluster configuration even if there" 835 " are failures during the removal" 836 " process") 837 838 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance", 839 action="store_true", default=False, 840 help="Remove the instance from the cluster") 841 842 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node", 843 help="Specifies the new secondary node", 844 metavar="NODE", default=None, 845 completion_suggest=OPT_COMPL_ONE_NODE) 846 847 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary", 848 default=False, action="store_true", 849 help="Replace the disk(s) on the primary" 850 " node (only for the drbd template)") 851 852 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary", 853 default=False, action="store_true", 854 help="Replace the disk(s) on the secondary" 855 " node (only for the drbd template)") 856 857 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote", 858 default=False, action="store_true", 859 help="Lock all nodes and auto-promote as needed" 860 " to MC status") 861 862 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto", 863 default=False, action="store_true", 864 help="Automatically replace faulty disks" 865 " (only for the drbd template)") 866 867 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size", 868 default=False, action="store_true", 869 help="Ignore current recorded size" 870 " (useful for forcing activation when" 871 " the recorded size is wrong)") 872 873 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node", 874 metavar="<node>", 875 completion_suggest=OPT_COMPL_ONE_NODE) 876 877 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory", 878 metavar="<dir>") 879 880 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip", 881 help="Specify the secondary ip for the node", 882 metavar="ADDRESS", default=None) 883 884 READD_OPT = cli_option("--readd", dest="readd", 885 default=False, action="store_true", 886 help="Readd old node after replacing it") 887 888 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check", 889 default=True, action="store_false", 890 help="Disable SSH key fingerprint checking") 891 892 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join", 893 default=False, action="store_true", 894 help="Force the joining of a node") 895 896 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate", 897 type="bool", default=None, metavar=_YORNO, 898 help="Set the master_candidate flag on the node") 899 900 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO, 901 type="bool", default=None, 902 help=("Set the offline flag on the node" 903 " (cluster does not communicate with offline" 904 " nodes)")) 905 906 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO, 907 type="bool", default=None, 908 help=("Set the drained flag on the node" 909 " (excluded from allocation operations)")) 910 911 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable", 912 type="bool", default=None, metavar=_YORNO, 913 help="Set the master_capable flag on the node") 914 915 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable", 916 type="bool", default=None, metavar=_YORNO, 917 help="Set the vm_capable flag on the node") 918 919 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable", 920 type="bool", default=None, metavar=_YORNO, 921 help="Set the allocatable flag on a volume") 922 923 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage", 924 help="Disable support for lvm based instances" 925 " (cluster-wide)", 926 action="store_false", default=True) 927 928 ENABLED_HV_OPT = cli_option("--enabled-hypervisors", 929 dest="enabled_hypervisors", 930 help="Comma-separated list of hypervisors", 931 type="string", default=None) 932 933 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams", 934 type="keyval", default={}, 935 help="NIC parameters") 936 937 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None, 938 dest="candidate_pool_size", type="int", 939 help="Set the candidate pool size") 940 941 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name", 942 help=("Enables LVM and specifies the volume group" 943 " name (cluster-wide) for disk allocation" 944 " [%s]" % constants.DEFAULT_VG), 945 metavar="VG", default=None) 946 947 YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it", 948 help="Destroy cluster", action="store_true") 949 950 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting", 951 help="Skip node agreement check (dangerous)", 952 action="store_true", default=False) 953 954 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix", 955 help="Specify the mac prefix for the instance IP" 956 " addresses, in the format XX:XX:XX", 957 metavar="PREFIX", 958 default=None) 959 960 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev", 961 help="Specify the node interface (cluster-wide)" 962 " on which the master IP address will be added" 963 " (cluster init default: %s)" % 964 constants.DEFAULT_BRIDGE, 965 metavar="NETDEV", 966 default=None) 967 968 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", 969 help="Specify the default directory (cluster-" 970 "wide) for storing the file-based disks [%s]" % 971 constants.DEFAULT_FILE_STORAGE_DIR, 972 metavar="DIR", 973 default=constants.DEFAULT_FILE_STORAGE_DIR) 974 975 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts", 976 help="Don't modify /etc/hosts", 977 action="store_false", default=True) 978 979 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup", 980 help="Don't initialize SSH keys", 981 action="store_false", default=True) 982 983 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes", 984 help="Enable parseable error messages", 985 action="store_true", default=False) 986 987 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem", 988 help="Skip N+1 memory redundancy tests", 989 action="store_true", default=False) 990 991 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type", 992 help="Type of reboot: soft/hard/full", 993 default=constants.INSTANCE_REBOOT_HARD, 994 metavar="<REBOOT>", 995 choices=list(constants.REBOOT_TYPES)) 996 997 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries", 998 dest="ignore_secondaries", 999 default=False, action="store_true", 1000 help="Ignore errors from secondaries") 1001 1002 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown", 1003 action="store_false", default=True, 1004 help="Don't shutdown the instance (unsafe)") 1005 1006 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int", 1007 default=constants.DEFAULT_SHUTDOWN_TIMEOUT, 1008 help="Maximum time to wait") 1009 1010 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout", 1011 dest="shutdown_timeout", type="int", 1012 default=constants.DEFAULT_SHUTDOWN_TIMEOUT, 1013 help="Maximum time to wait for instance shutdown") 1014 1015 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int", 1016 default=None, 1017 help=("Number of seconds between repetions of the" 1018 " command")) 1019 1020 EARLY_RELEASE_OPT = cli_option("--early-release", 1021 dest="early_release", default=False, 1022 action="store_true", 1023 help="Release the locks on the secondary" 1024 " node(s) early") 1025 1026 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate", 1027 dest="new_cluster_cert", 1028 default=False, action="store_true", 1029 help="Generate a new cluster certificate") 1030 1031 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert", 1032 default=None, 1033 help="File containing new RAPI certificate") 1034 1035 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert", 1036 default=None, action="store_true", 1037 help=("Generate a new self-signed RAPI" 1038 " certificate")) 1039 1040 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key", 1041 dest="new_confd_hmac_key", 1042 default=False, action="store_true", 1043 help=("Create a new HMAC key for %s" % 1044 constants.CONFD)) 1045 1046 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret", 1047 dest="cluster_domain_secret", 1048 default=None, 1049 help=("Load new new cluster domain" 1050 " secret from file")) 1051 1052 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret", 1053 dest="new_cluster_domain_secret", 1054 default=False, action="store_true", 1055 help=("Create a new cluster domain" 1056 " secret")) 1057 1058 USE_REPL_NET_OPT = cli_option("--use-replication-network", 1059 dest="use_replication_network", 1060 help="Whether to use the replication network" 1061 " for talking to the nodes", 1062 action="store_true", default=False) 1063 1064 MAINTAIN_NODE_HEALTH_OPT = \ 1065 cli_option("--maintain-node-health", dest="maintain_node_health", 1066 metavar=_YORNO, default=None, type="bool", 1067 help="Configure the cluster to automatically maintain node" 1068 " health, by shutting down unknown instances, shutting down" 1069 " unknown DRBD devices, etc.") 1070 1071 IDENTIFY_DEFAULTS_OPT = \ 1072 cli_option("--identify-defaults", dest="identify_defaults", 1073 default=False, action="store_true", 1074 help="Identify which saved instance parameters are equal to" 1075 " the current cluster defaults and set them as such, instead" 1076 " of marking them as overridden") 1077 1078 UIDPOOL_OPT = cli_option("--uid-pool", default=None, 1079 action="store", dest="uid_pool", 1080 help=("A list of user-ids or user-id" 1081 " ranges separated by commas")) 1082 1083 ADD_UIDS_OPT = cli_option("--add-uids", default=None, 1084 action="store", dest="add_uids", 1085 help=("A list of user-ids or user-id" 1086 " ranges separated by commas, to be" 1087 " added to the user-id pool")) 1088 1089 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None, 1090 action="store", dest="remove_uids", 1091 help=("A list of user-ids or user-id" 1092 " ranges separated by commas, to be" 1093 " removed from the user-id pool")) 1094 1095 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None, 1096 action="store", dest="reserved_lvs", 1097 help=("A comma-separated list of reserved" 1098 " logical volumes names, that will be" 1099 " ignored by cluster verify")) 1100 1101 ROMAN_OPT = cli_option("--roman", 1102 dest="roman_integers", default=False, 1103 action="store_true", 1104 help="Use roman numbers for positive integers") 1105 1106 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper", 1107 action="store", default=None, 1108 help="Specifies usermode helper for DRBD") 1109 1110 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage", 1111 action="store_false", default=True, 1112 help="Disable support for DRBD") 1113 1114 PRIMARY_IP_VERSION_OPT = \ 1115 cli_option("--primary-ip-version", default=constants.IP4_VERSION, 1116 action="store", dest="primary_ip_version", 1117 metavar="%d|%d" % (constants.IP4_VERSION, 1118 constants.IP6_VERSION), 1119 help="Cluster-wide IP version for primary IP") 1120 1121 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority", 1122 metavar="|".join(name for name, _ in _PRIORITY_NAMES), 1123 choices=_PRIONAME_TO_VALUE.keys(), 1124 help="Priority for opcode processing") 1125 1126 HID_OS_OPT = cli_option("--hidden", dest="hidden", 1127 type="bool", default=None, metavar=_YORNO, 1128 help="Sets the hidden flag on the OS") 1129 1130 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted", 1131 type="bool", default=None, metavar=_YORNO, 1132 help="Sets the blacklisted flag on the OS") 1133 1134 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None, 1135 type="bool", metavar=_YORNO, 1136 dest="prealloc_wipe_disks", 1137 help=("Wipe disks prior to instance" 1138 " creation")) 1139 1140 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams", 1141 type="keyval", default=None, 1142 help="Node parameters") 1143 1144 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy", 1145 action="store", metavar="POLICY", default=None, 1146 help="Allocation policy for the node group") 1147 1148 NODE_POWERED_OPT = cli_option("--node-powered", default=None, 1149 type="bool", metavar=_YORNO, 1150 dest="node_powered", 1151 help="Specify if the SoR for node is powered") 1152 1153 NO_REMEMBER_OPT = cli_option("--no-remember", 1154 dest="no_remember", 1155 action="store_true", default=False, 1156 help="Perform but do not record the change" 1157 " in the configuration") 1158 1159 1160 #: Options provided by all commands 1161 COMMON_OPTS = [DEBUG_OPT] 1162 1163 # common options for creating instances. add and import then add their own 1164 # specific ones. 1165 COMMON_CREATE_OPTS = [ 1166 BACKEND_OPT, 1167 DISK_OPT, 1168 DISK_TEMPLATE_OPT, 1169 FILESTORE_DIR_OPT, 1170 FILESTORE_DRIVER_OPT, 1171 HYPERVISOR_OPT, 1172 IALLOCATOR_OPT, 1173 NET_OPT, 1174 NODE_PLACEMENT_OPT, 1175 NOIPCHECK_OPT, 1176 NONAMECHECK_OPT, 1177 NONICS_OPT, 1178 NWSYNC_OPT, 1179 OSPARAMS_OPT, 1180 OS_SIZE_OPT, 1181 SUBMIT_OPT, 1182 DRY_RUN_OPT, 1183 PRIORITY_OPT, 1184 ] 1185 1186
1187 -def _ParseArgs(argv, commands, aliases):
1188 """Parser for the command line arguments. 1189 1190 This function parses the arguments and returns the function which 1191 must be executed together with its (modified) arguments. 1192 1193 @param argv: the command line 1194 @param commands: dictionary with special contents, see the design 1195 doc for cmdline handling 1196 @param aliases: dictionary with command aliases {'alias': 'target, ...} 1197 1198 """ 1199 if len(argv) == 0: 1200 binary = "<command>" 1201 else: 1202 binary = argv[0].split("/")[-1] 1203 1204 if len(argv) > 1 and argv[1] == "--version": 1205 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION, 1206 constants.RELEASE_VERSION) 1207 # Quit right away. That way we don't have to care about this special 1208 # argument. optparse.py does it the same. 1209 sys.exit(0) 1210 1211 if len(argv) < 2 or not (argv[1] in commands or 1212 argv[1] in aliases): 1213 # let's do a nice thing 1214 sortedcmds = commands.keys() 1215 sortedcmds.sort() 1216 1217 ToStdout("Usage: %s {command} [options...] [argument...]", binary) 1218 ToStdout("%s <command> --help to see details, or man %s", binary, binary) 1219 ToStdout("") 1220 1221 # compute the max line length for cmd + usage 1222 mlen = max([len(" %s" % cmd) for cmd in commands]) 1223 mlen = min(60, mlen) # should not get here... 1224 1225 # and format a nice command list 1226 ToStdout("Commands:") 1227 for cmd in sortedcmds: 1228 cmdstr = " %s" % (cmd,) 1229 help_text = commands[cmd][4] 1230 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen) 1231 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0)) 1232 for line in help_lines: 1233 ToStdout("%-*s %s", mlen, "", line) 1234 1235 ToStdout("") 1236 1237 return None, None, None 1238 1239 # get command, unalias it, and look it up in commands 1240 cmd = argv.pop(1) 1241 if cmd in aliases: 1242 if cmd in commands: 1243 raise errors.ProgrammerError("Alias '%s' overrides an existing" 1244 " command" % cmd) 1245 1246 if aliases[cmd] not in commands: 1247 raise errors.ProgrammerError("Alias '%s' maps to non-existing" 1248 " command '%s'" % (cmd, aliases[cmd])) 1249 1250 cmd = aliases[cmd] 1251 1252 func, args_def, parser_opts, usage, description = commands[cmd] 1253 parser = OptionParser(option_list=parser_opts + COMMON_OPTS, 1254 description=description, 1255 formatter=TitledHelpFormatter(), 1256 usage="%%prog %s %s" % (cmd, usage)) 1257 parser.disable_interspersed_args() 1258 options, args = parser.parse_args() 1259 1260 if not _CheckArguments(cmd, args_def, args): 1261 return None, None, None 1262 1263 return func, options, args
1264 1265
1266 -def _CheckArguments(cmd, args_def, args):
1267 """Verifies the arguments using the argument definition. 1268 1269 Algorithm: 1270 1271 1. Abort with error if values specified by user but none expected. 1272 1273 1. For each argument in definition 1274 1275 1. Keep running count of minimum number of values (min_count) 1276 1. Keep running count of maximum number of values (max_count) 1277 1. If it has an unlimited number of values 1278 1279 1. Abort with error if it's not the last argument in the definition 1280 1281 1. If last argument has limited number of values 1282 1283 1. Abort with error if number of values doesn't match or is too large 1284 1285 1. Abort with error if user didn't pass enough values (min_count) 1286 1287 """ 1288 if args and not args_def: 1289 ToStderr("Error: Command %s expects no arguments", cmd) 1290 return False 1291 1292 min_count = None 1293 max_count = None 1294 check_max = None 1295 1296 last_idx = len(args_def) - 1 1297 1298 for idx, arg in enumerate(args_def): 1299 if min_count is None: 1300 min_count = arg.min 1301 elif arg.min is not None: 1302 min_count += arg.min 1303 1304 if max_count is None: 1305 max_count = arg.max 1306 elif arg.max is not None: 1307 max_count += arg.max 1308 1309 if idx == last_idx: 1310 check_max = (arg.max is not None) 1311 1312 elif arg.max is None: 1313 raise errors.ProgrammerError("Only the last argument can have max=None") 1314 1315 if check_max: 1316 # Command with exact number of arguments 1317 if (min_count is not None and max_count is not None and 1318 min_count == max_count and len(args) != min_count): 1319 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count) 1320 return False 1321 1322 # Command with limited number of arguments 1323 if max_count is not None and len(args) > max_count: 1324 ToStderr("Error: Command %s expects only %d argument(s)", 1325 cmd, max_count) 1326 return False 1327 1328 # Command with some required arguments 1329 if min_count is not None and len(args) < min_count: 1330 ToStderr("Error: Command %s expects at least %d argument(s)", 1331 cmd, min_count) 1332 return False 1333 1334 return True
1335 1336
1337 -def SplitNodeOption(value):
1338 """Splits the value of a --node option. 1339 1340 """ 1341 if value and ':' in value: 1342 return value.split(':', 1) 1343 else: 1344 return (value, None)
1345 1346
1347 -def CalculateOSNames(os_name, os_variants):
1348 """Calculates all the names an OS can be called, according to its variants. 1349 1350 @type os_name: string 1351 @param os_name: base name of the os 1352 @type os_variants: list or None 1353 @param os_variants: list of supported variants 1354 @rtype: list 1355 @return: list of valid names 1356 1357 """ 1358 if os_variants: 1359 return ['%s+%s' % (os_name, v) for v in os_variants] 1360 else: 1361 return [os_name]
1362 1363
1364 -def ParseFields(selected, default):
1365 """Parses the values of "--field"-like options. 1366 1367 @type selected: string or None 1368 @param selected: User-selected options 1369 @type default: list 1370 @param default: Default fields 1371 1372 """ 1373 if selected is None: 1374 return default 1375 1376 if selected.startswith("+"): 1377 return default + selected[1:].split(",") 1378 1379 return selected.split(",")
1380 1381 1382 UsesRPC = rpc.RunWithRPC 1383 1384
1385 -def AskUser(text, choices=None):
1386 """Ask the user a question. 1387 1388 @param text: the question to ask 1389 1390 @param choices: list with elements tuples (input_char, return_value, 1391 description); if not given, it will default to: [('y', True, 1392 'Perform the operation'), ('n', False, 'Do no do the operation')]; 1393 note that the '?' char is reserved for help 1394 1395 @return: one of the return values from the choices list; if input is 1396 not possible (i.e. not running with a tty, we return the last 1397 entry from the list 1398 1399 """ 1400 if choices is None: 1401 choices = [('y', True, 'Perform the operation'), 1402 ('n', False, 'Do not perform the operation')] 1403 if not choices or not isinstance(choices, list): 1404 raise errors.ProgrammerError("Invalid choices argument to AskUser") 1405 for entry in choices: 1406 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?': 1407 raise errors.ProgrammerError("Invalid choices element to AskUser") 1408 1409 answer = choices[-1][1] 1410 new_text = [] 1411 for line in text.splitlines(): 1412 new_text.append(textwrap.fill(line, 70, replace_whitespace=False)) 1413 text = "\n".join(new_text) 1414 try: 1415 f = file("/dev/tty", "a+") 1416 except IOError: 1417 return answer 1418 try: 1419 chars = [entry[0] for entry in choices] 1420 chars[-1] = "[%s]" % chars[-1] 1421 chars.append('?') 1422 maps = dict([(entry[0], entry[1]) for entry in choices]) 1423 while True: 1424 f.write(text) 1425 f.write('\n') 1426 f.write("/".join(chars)) 1427 f.write(": ") 1428 line = f.readline(2).strip().lower() 1429 if line in maps: 1430 answer = maps[line] 1431 break 1432 elif line == '?': 1433 for entry in choices: 1434 f.write(" %s - %s\n" % (entry[0], entry[2])) 1435 f.write("\n") 1436 continue 1437 finally: 1438 f.close() 1439 return answer
1440 1441
1442 -class JobSubmittedException(Exception):
1443 """Job was submitted, client should exit. 1444 1445 This exception has one argument, the ID of the job that was 1446 submitted. The handler should print this ID. 1447 1448 This is not an error, just a structured way to exit from clients. 1449 1450 """
1451 1452
1453 -def SendJob(ops, cl=None):
1454 """Function to submit an opcode without waiting for the results. 1455 1456 @type ops: list 1457 @param ops: list of opcodes 1458 @type cl: luxi.Client 1459 @param cl: the luxi client to use for communicating with the master; 1460 if None, a new client will be created 1461 1462 """ 1463 if cl is None: 1464 cl = GetClient() 1465 1466 job_id = cl.SubmitJob(ops) 1467 1468 return job_id
1469 1470
1471 -def GenericPollJob(job_id, cbs, report_cbs):
1472 """Generic job-polling function. 1473 1474 @type job_id: number 1475 @param job_id: Job ID 1476 @type cbs: Instance of L{JobPollCbBase} 1477 @param cbs: Data callbacks 1478 @type report_cbs: Instance of L{JobPollReportCbBase} 1479 @param report_cbs: Reporting callbacks 1480 1481 """ 1482 prev_job_info = None 1483 prev_logmsg_serial = None 1484 1485 status = None 1486 1487 while True: 1488 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info, 1489 prev_logmsg_serial) 1490 if not result: 1491 # job not found, go away! 1492 raise errors.JobLost("Job with id %s lost" % job_id) 1493 1494 if result == constants.JOB_NOTCHANGED: 1495 report_cbs.ReportNotChanged(job_id, status) 1496 1497 # Wait again 1498 continue 1499 1500 # Split result, a tuple of (field values, log entries) 1501 (job_info, log_entries) = result 1502 (status, ) = job_info 1503 1504 if log_entries: 1505 for log_entry in log_entries: 1506 (serial, timestamp, log_type, message) = log_entry 1507 report_cbs.ReportLogMessage(job_id, serial, timestamp, 1508 log_type, message) 1509 prev_logmsg_serial = max(prev_logmsg_serial, serial) 1510 1511 # TODO: Handle canceled and archived jobs 1512 elif status in (constants.JOB_STATUS_SUCCESS, 1513 constants.JOB_STATUS_ERROR, 1514 constants.JOB_STATUS_CANCELING, 1515 constants.JOB_STATUS_CANCELED): 1516 break 1517 1518 prev_job_info = job_info 1519 1520 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"]) 1521 if not jobs: 1522 raise errors.JobLost("Job with id %s lost" % job_id) 1523 1524 status, opstatus, result = jobs[0] 1525 1526 if status == constants.JOB_STATUS_SUCCESS: 1527 return result 1528 1529 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED): 1530 raise errors.OpExecError("Job was canceled") 1531 1532 has_ok = False 1533 for idx, (status, msg) in enumerate(zip(opstatus, result)): 1534 if status == constants.OP_STATUS_SUCCESS: 1535 has_ok = True 1536 elif status == constants.OP_STATUS_ERROR: 1537 errors.MaybeRaise(msg) 1538 1539 if has_ok: 1540 raise errors.OpExecError("partial failure (opcode %d): %s" % 1541 (idx, msg)) 1542 1543 raise errors.OpExecError(str(msg)) 1544 1545 # default failure mode 1546 raise errors.OpExecError(result)
1547 1548
1549 -class JobPollCbBase:
1550 """Base class for L{GenericPollJob} callbacks. 1551 1552 """
1553 - def __init__(self):
1554 """Initializes this class. 1555 1556 """
1557
1558 - def WaitForJobChangeOnce(self, job_id, fields, 1559 prev_job_info, prev_log_serial):
1560 """Waits for changes on a job. 1561 1562 """ 1563 raise NotImplementedError()
1564
1565 - def QueryJobs(self, job_ids, fields):
1566 """Returns the selected fields for the selected job IDs. 1567 1568 @type job_ids: list of numbers 1569 @param job_ids: Job IDs 1570 @type fields: list of strings 1571 @param fields: Fields 1572 1573 """ 1574 raise NotImplementedError()
1575 1576
1577 -class JobPollReportCbBase:
1578 """Base class for L{GenericPollJob} reporting callbacks. 1579 1580 """
1581 - def __init__(self):
1582 """Initializes this class. 1583 1584 """
1585
1586 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1587 """Handles a log message. 1588 1589 """ 1590 raise NotImplementedError()
1591
1592 - def ReportNotChanged(self, job_id, status):
1593 """Called for if a job hasn't changed in a while. 1594 1595 @type job_id: number 1596 @param job_id: Job ID 1597 @type status: string or None 1598 @param status: Job status if available 1599 1600 """ 1601 raise NotImplementedError()
1602 1603
1604 -class _LuxiJobPollCb(JobPollCbBase):
1605 - def __init__(self, cl):
1606 """Initializes this class. 1607 1608 """ 1609 JobPollCbBase.__init__(self) 1610 self.cl = cl
1611
1612 - def WaitForJobChangeOnce(self, job_id, fields, 1613 prev_job_info, prev_log_serial):
1614 """Waits for changes on a job. 1615 1616 """ 1617 return self.cl.WaitForJobChangeOnce(job_id, fields, 1618 prev_job_info, prev_log_serial)
1619
1620 - def QueryJobs(self, job_ids, fields):
1621 """Returns the selected fields for the selected job IDs. 1622 1623 """ 1624 return self.cl.QueryJobs(job_ids, fields)
1625 1626
1627 -class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1628 - def __init__(self, feedback_fn):
1629 """Initializes this class. 1630 1631 """ 1632 JobPollReportCbBase.__init__(self) 1633 1634 self.feedback_fn = feedback_fn 1635 1636 assert callable(feedback_fn)
1637
1638 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1639 """Handles a log message. 1640 1641 """ 1642 self.feedback_fn((timestamp, log_type, log_msg))
1643
1644 - def ReportNotChanged(self, job_id, status):
1645 """Called if a job hasn't changed in a while. 1646 1647 """
1648 # Ignore 1649 1650
1651 -class StdioJobPollReportCb(JobPollReportCbBase):
1652 - def __init__(self):
1653 """Initializes this class. 1654 1655 """ 1656 JobPollReportCbBase.__init__(self) 1657 1658 self.notified_queued = False 1659 self.notified_waitlock = False
1660
1661 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1662 """Handles a log message. 1663 1664 """ 1665 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)), 1666 FormatLogMessage(log_type, log_msg))
1667
1668 - def ReportNotChanged(self, job_id, status):
1669 """Called if a job hasn't changed in a while. 1670 1671 """ 1672 if status is None: 1673 return 1674 1675 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued: 1676 ToStderr("Job %s is waiting in queue", job_id) 1677 self.notified_queued = True 1678 1679 elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock: 1680 ToStderr("Job %s is trying to acquire all necessary locks", job_id) 1681 self.notified_waitlock = True
1682 1683
1684 -def FormatLogMessage(log_type, log_msg):
1685 """Formats a job message according to its type. 1686 1687 """ 1688 if log_type != constants.ELOG_MESSAGE: 1689 log_msg = str(log_msg) 1690 1691 return utils.SafeEncode(log_msg)
1692 1693
1694 -def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1695 """Function to poll for the result of a job. 1696 1697 @type job_id: job identified 1698 @param job_id: the job to poll for results 1699 @type cl: luxi.Client 1700 @param cl: the luxi client to use for communicating with the master; 1701 if None, a new client will be created 1702 1703 """ 1704 if cl is None: 1705 cl = GetClient() 1706 1707 if reporter is None: 1708 if feedback_fn: 1709 reporter = FeedbackFnJobPollReportCb(feedback_fn) 1710 else: 1711 reporter = StdioJobPollReportCb() 1712 elif feedback_fn: 1713 raise errors.ProgrammerError("Can't specify reporter and feedback function") 1714 1715 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1716 1717
1718 -def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1719 """Legacy function to submit an opcode. 1720 1721 This is just a simple wrapper over the construction of the processor 1722 instance. It should be extended to better handle feedback and 1723 interaction functions. 1724 1725 """ 1726 if cl is None: 1727 cl = GetClient() 1728 1729 SetGenericOpcodeOpts([op], opts) 1730 1731 job_id = SendJob([op], cl=cl) 1732 1733 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn, 1734 reporter=reporter) 1735 1736 return op_results[0]
1737 1738
1739 -def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1740 """Wrapper around SubmitOpCode or SendJob. 1741 1742 This function will decide, based on the 'opts' parameter, whether to 1743 submit and wait for the result of the opcode (and return it), or 1744 whether to just send the job and print its identifier. It is used in 1745 order to simplify the implementation of the '--submit' option. 1746 1747 It will also process the opcodes if we're sending the via SendJob 1748 (otherwise SubmitOpCode does it). 1749 1750 """ 1751 if opts and opts.submit_only: 1752 job = [op] 1753 SetGenericOpcodeOpts(job, opts) 1754 job_id = SendJob(job, cl=cl) 1755 raise JobSubmittedException(job_id) 1756 else: 1757 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1758 1759
1760 -def SetGenericOpcodeOpts(opcode_list, options):
1761 """Processor for generic options. 1762 1763 This function updates the given opcodes based on generic command 1764 line options (like debug, dry-run, etc.). 1765 1766 @param opcode_list: list of opcodes 1767 @param options: command line options or None 1768 @return: None (in-place modification) 1769 1770 """ 1771 if not options: 1772 return 1773 for op in opcode_list: 1774 op.debug_level = options.debug 1775 if hasattr(options, "dry_run"): 1776 op.dry_run = options.dry_run 1777 if getattr(options, "priority", None) is not None: 1778 op.priority = _PRIONAME_TO_VALUE[options.priority]
1779 1780
1781 -def GetClient():
1782 # TODO: Cache object? 1783 try: 1784 client = luxi.Client() 1785 except luxi.NoMasterError: 1786 ss = ssconf.SimpleStore() 1787 1788 # Try to read ssconf file 1789 try: 1790 ss.GetMasterNode() 1791 except errors.ConfigurationError: 1792 raise errors.OpPrereqError("Cluster not initialized or this machine is" 1793 " not part of a cluster") 1794 1795 master, myself = ssconf.GetMasterAndMyself(ss=ss) 1796 if master != myself: 1797 raise errors.OpPrereqError("This is not the master node, please connect" 1798 " to node '%s' and rerun the command" % 1799 master) 1800 raise 1801 return client
1802 1803
1804 -def FormatError(err):
1805 """Return a formatted error message for a given error. 1806 1807 This function takes an exception instance and returns a tuple 1808 consisting of two values: first, the recommended exit code, and 1809 second, a string describing the error message (not 1810 newline-terminated). 1811 1812 """ 1813 retcode = 1 1814 obuf = StringIO() 1815 msg = str(err) 1816 if isinstance(err, errors.ConfigurationError): 1817 txt = "Corrupt configuration file: %s" % msg 1818 logging.error(txt) 1819 obuf.write(txt + "\n") 1820 obuf.write("Aborting.") 1821 retcode = 2 1822 elif isinstance(err, errors.HooksAbort): 1823 obuf.write("Failure: hooks execution failed:\n") 1824 for node, script, out in err.args[0]: 1825 if out: 1826 obuf.write(" node: %s, script: %s, output: %s\n" % 1827 (node, script, out)) 1828 else: 1829 obuf.write(" node: %s, script: %s (no output)\n" % 1830 (node, script)) 1831 elif isinstance(err, errors.HooksFailure): 1832 obuf.write("Failure: hooks general failure: %s" % msg) 1833 elif isinstance(err, errors.ResolverError): 1834 this_host = netutils.Hostname.GetSysName() 1835 if err.args[0] == this_host: 1836 msg = "Failure: can't resolve my own hostname ('%s')" 1837 else: 1838 msg = "Failure: can't resolve hostname '%s'" 1839 obuf.write(msg % err.args[0]) 1840 elif isinstance(err, errors.OpPrereqError): 1841 if len(err.args) == 2: 1842 obuf.write("Failure: prerequisites not met for this" 1843 " operation:\nerror type: %s, error details:\n%s" % 1844 (err.args[1], err.args[0])) 1845 else: 1846 obuf.write("Failure: prerequisites not met for this" 1847 " operation:\n%s" % msg) 1848 elif isinstance(err, errors.OpExecError): 1849 obuf.write("Failure: command execution error:\n%s" % msg) 1850 elif isinstance(err, errors.TagError): 1851 obuf.write("Failure: invalid tag(s) given:\n%s" % msg) 1852 elif isinstance(err, errors.JobQueueDrainError): 1853 obuf.write("Failure: the job queue is marked for drain and doesn't" 1854 " accept new requests\n") 1855 elif isinstance(err, errors.JobQueueFull): 1856 obuf.write("Failure: the job queue is full and doesn't accept new" 1857 " job submissions until old jobs are archived\n") 1858 elif isinstance(err, errors.TypeEnforcementError): 1859 obuf.write("Parameter Error: %s" % msg) 1860 elif isinstance(err, errors.ParameterError): 1861 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg) 1862 elif isinstance(err, luxi.NoMasterError): 1863 obuf.write("Cannot communicate with the master daemon.\nIs it running" 1864 " and listening for connections?") 1865 elif isinstance(err, luxi.TimeoutError): 1866 obuf.write("Timeout while talking to the master daemon. Jobs might have" 1867 " been submitted and will continue to run even if the call" 1868 " timed out. Useful commands in this situation are \"gnt-job" 1869 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n") 1870 obuf.write(msg) 1871 elif isinstance(err, luxi.PermissionError): 1872 obuf.write("It seems you don't have permissions to connect to the" 1873 " master daemon.\nPlease retry as a different user.") 1874 elif isinstance(err, luxi.ProtocolError): 1875 obuf.write("Unhandled protocol error while talking to the master daemon:\n" 1876 "%s" % msg) 1877 elif isinstance(err, errors.JobLost): 1878 obuf.write("Error checking job status: %s" % msg) 1879 elif isinstance(err, errors.GenericError): 1880 obuf.write("Unhandled Ganeti error: %s" % msg) 1881 elif isinstance(err, JobSubmittedException): 1882 obuf.write("JobID: %s\n" % err.args[0]) 1883 retcode = 0 1884 else: 1885 obuf.write("Unhandled exception: %s" % msg) 1886 return retcode, obuf.getvalue().rstrip('\n')
1887 1888
1889 -def GenericMain(commands, override=None, aliases=None):
1890 """Generic main function for all the gnt-* commands. 1891 1892 Arguments: 1893 - commands: a dictionary with a special structure, see the design doc 1894 for command line handling. 1895 - override: if not None, we expect a dictionary with keys that will 1896 override command line options; this can be used to pass 1897 options from the scripts to generic functions 1898 - aliases: dictionary with command aliases {'alias': 'target, ...} 1899 1900 """ 1901 # save the program name and the entire command line for later logging 1902 if sys.argv: 1903 binary = os.path.basename(sys.argv[0]) or sys.argv[0] 1904 if len(sys.argv) >= 2: 1905 binary += " " + sys.argv[1] 1906 old_cmdline = " ".join(sys.argv[2:]) 1907 else: 1908 old_cmdline = "" 1909 else: 1910 binary = "<unknown program>" 1911 old_cmdline = "" 1912 1913 if aliases is None: 1914 aliases = {} 1915 1916 try: 1917 func, options, args = _ParseArgs(sys.argv, commands, aliases) 1918 except errors.ParameterError, err: 1919 result, err_msg = FormatError(err) 1920 ToStderr(err_msg) 1921 return 1 1922 1923 if func is None: # parse error 1924 return 1 1925 1926 if override is not None: 1927 for key, val in override.iteritems(): 1928 setattr(options, key, val) 1929 1930 utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug, 1931 stderr_logging=True) 1932 1933 if old_cmdline: 1934 logging.info("run with arguments '%s'", old_cmdline) 1935 else: 1936 logging.info("run with no arguments") 1937 1938 try: 1939 result = func(options, args) 1940 except (errors.GenericError, luxi.ProtocolError, 1941 JobSubmittedException), err: 1942 result, err_msg = FormatError(err) 1943 logging.exception("Error during command processing") 1944 ToStderr(err_msg) 1945 except KeyboardInterrupt: 1946 result = constants.EXIT_FAILURE 1947 ToStderr("Aborted. Note that if the operation created any jobs, they" 1948 " might have been submitted and" 1949 " will continue to run in the background.") 1950 except IOError, err: 1951 if err.errno == errno.EPIPE: 1952 # our terminal went away, we'll exit 1953 sys.exit(constants.EXIT_FAILURE) 1954 else: 1955 raise 1956 1957 return result
1958 1959
1960 -def ParseNicOption(optvalue):
1961 """Parses the value of the --net option(s). 1962 1963 """ 1964 try: 1965 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue) 1966 except (TypeError, ValueError), err: 1967 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err)) 1968 1969 nics = [{}] * nic_max 1970 for nidx, ndict in optvalue: 1971 nidx = int(nidx) 1972 1973 if not isinstance(ndict, dict): 1974 raise errors.OpPrereqError("Invalid nic/%d value: expected dict," 1975 " got %s" % (nidx, ndict)) 1976 1977 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES) 1978 1979 nics[nidx] = ndict 1980 1981 return nics
1982 1983
1984 -def GenericInstanceCreate(mode, opts, args):
1985 """Add an instance to the cluster via either creation or import. 1986 1987 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT 1988 @param opts: the command line options selected by the user 1989 @type args: list 1990 @param args: should contain only one element, the new instance name 1991 @rtype: int 1992 @return: the desired exit code 1993 1994 """ 1995 instance = args[0] 1996 1997 (pnode, snode) = SplitNodeOption(opts.node) 1998 1999 hypervisor = None 2000 hvparams = {} 2001 if opts.hypervisor: 2002 hypervisor, hvparams = opts.hypervisor 2003 2004 if opts.nics: 2005 nics = ParseNicOption(opts.nics) 2006 elif opts.no_nics: 2007 # no nics 2008 nics = [] 2009 elif mode == constants.INSTANCE_CREATE: 2010 # default of one nic, all auto 2011 nics = [{}] 2012 else: 2013 # mode == import 2014 nics = [] 2015 2016 if opts.disk_template == constants.DT_DISKLESS: 2017 if opts.disks or opts.sd_size is not None: 2018 raise errors.OpPrereqError("Diskless instance but disk" 2019 " information passed") 2020 disks = [] 2021 else: 2022 if (not opts.disks and not opts.sd_size 2023 and mode == constants.INSTANCE_CREATE): 2024 raise errors.OpPrereqError("No disk information specified") 2025 if opts.disks and opts.sd_size is not None: 2026 raise errors.OpPrereqError("Please use either the '--disk' or" 2027 " '-s' option") 2028 if opts.sd_size is not None: 2029 opts.disks = [(0, {"size": opts.sd_size})] 2030 2031 if opts.disks: 2032 try: 2033 disk_max = max(int(didx[0]) + 1 for didx in opts.disks) 2034 except ValueError, err: 2035 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err)) 2036 disks = [{}] * disk_max 2037 else: 2038 disks = [] 2039 for didx, ddict in opts.disks: 2040 didx = int(didx) 2041 if not isinstance(ddict, dict): 2042 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict) 2043 raise errors.OpPrereqError(msg) 2044 elif "size" in ddict: 2045 if "adopt" in ddict: 2046 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed" 2047 " (disk %d)" % didx) 2048 try: 2049 ddict["size"] = utils.ParseUnit(ddict["size"]) 2050 except ValueError, err: 2051 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" % 2052 (didx, err)) 2053 elif "adopt" in ddict: 2054 if mode == constants.INSTANCE_IMPORT: 2055 raise errors.OpPrereqError("Disk adoption not allowed for instance" 2056 " import") 2057 ddict["size"] = 0 2058 else: 2059 raise errors.OpPrereqError("Missing size or adoption source for" 2060 " disk %d" % didx) 2061 disks[didx] = ddict 2062 2063 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES) 2064 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES) 2065 2066 if mode == constants.INSTANCE_CREATE: 2067 start = opts.start 2068 os_type = opts.os 2069 force_variant = opts.force_variant 2070 src_node = None 2071 src_path = None 2072 no_install = opts.no_install 2073 identify_defaults = False 2074 elif mode == constants.INSTANCE_IMPORT: 2075 start = False 2076 os_type = None 2077 force_variant = False 2078 src_node = opts.src_node 2079 src_path = opts.src_dir 2080 no_install = None 2081 identify_defaults = opts.identify_defaults 2082 else: 2083 raise errors.ProgrammerError("Invalid creation mode %s" % mode) 2084 2085 op = opcodes.OpInstanceCreate(instance_name=instance, 2086 disks=disks, 2087 disk_template=opts.disk_template, 2088 nics=nics, 2089 pnode=pnode, snode=snode, 2090 ip_check=opts.ip_check, 2091 name_check=opts.name_check, 2092 wait_for_sync=opts.wait_for_sync, 2093 file_storage_dir=opts.file_storage_dir, 2094 file_driver=opts.file_driver, 2095 iallocator=opts.iallocator, 2096 hypervisor=hypervisor, 2097 hvparams=hvparams, 2098 beparams=opts.beparams, 2099 osparams=opts.osparams, 2100 mode=mode, 2101 start=start, 2102 os_type=os_type, 2103 force_variant=force_variant, 2104 src_node=src_node, 2105 src_path=src_path, 2106 no_install=no_install, 2107 identify_defaults=identify_defaults) 2108 2109 SubmitOrSend(op, opts) 2110 return 0
2111 2112
2113 -class _RunWhileClusterStoppedHelper:
2114 """Helper class for L{RunWhileClusterStopped} to simplify state management 2115 2116 """
2117 - def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2118 """Initializes this class. 2119 2120 @type feedback_fn: callable 2121 @param feedback_fn: Feedback function 2122 @type cluster_name: string 2123 @param cluster_name: Cluster name 2124 @type master_node: string 2125 @param master_node Master node name 2126 @type online_nodes: list 2127 @param online_nodes: List of names of online nodes 2128 2129 """ 2130 self.feedback_fn = feedback_fn 2131 self.cluster_name = cluster_name 2132 self.master_node = master_node 2133 self.online_nodes = online_nodes 2134 2135 self.ssh = ssh.SshRunner(self.cluster_name) 2136 2137 self.nonmaster_nodes = [name for name in online_nodes 2138 if name != master_node] 2139 2140 assert self.master_node not in self.nonmaster_nodes
2141
2142 - def _RunCmd(self, node_name, cmd):
2143 """Runs a command on the local or a remote machine. 2144 2145 @type node_name: string 2146 @param node_name: Machine name 2147 @type cmd: list 2148 @param cmd: Command 2149 2150 """ 2151 if node_name is None or node_name == self.master_node: 2152 # No need to use SSH 2153 result = utils.RunCmd(cmd) 2154 else: 2155 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd)) 2156 2157 if result.failed: 2158 errmsg = ["Failed to run command %s" % result.cmd] 2159 if node_name: 2160 errmsg.append("on node %s" % node_name) 2161 errmsg.append(": exitcode %s and error %s" % 2162 (result.exit_code, result.output)) 2163 raise errors.OpExecError(" ".join(errmsg))
2164
2165 - def Call(self, fn, *args):
2166 """Call function while all daemons are stopped. 2167 2168 @type fn: callable 2169 @param fn: Function to be called 2170 2171 """ 2172 # Pause watcher by acquiring an exclusive lock on watcher state file 2173 self.feedback_fn("Blocking watcher") 2174 watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE) 2175 try: 2176 # TODO: Currently, this just blocks. There's no timeout. 2177 # TODO: Should it be a shared lock? 2178 watcher_block.Exclusive(blocking=True) 2179 2180 # Stop master daemons, so that no new jobs can come in and all running 2181 # ones are finished 2182 self.feedback_fn("Stopping master daemons") 2183 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"]) 2184 try: 2185 # Stop daemons on all nodes 2186 for node_name in self.online_nodes: 2187 self.feedback_fn("Stopping daemons on %s" % node_name) 2188 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"]) 2189 2190 # All daemons are shut down now 2191 try: 2192 return fn(self, *args) 2193 except Exception, err: 2194 _, errmsg = FormatError(err) 2195 logging.exception("Caught exception") 2196 self.feedback_fn(errmsg) 2197 raise 2198 finally: 2199 # Start cluster again, master node last 2200 for node_name in self.nonmaster_nodes + [self.master_node]: 2201 self.feedback_fn("Starting daemons on %s" % node_name) 2202 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"]) 2203 finally: 2204 # Resume watcher 2205 watcher_block.Close()
2206 2207
2208 -def RunWhileClusterStopped(feedback_fn, fn, *args):
2209 """Calls a function while all cluster daemons are stopped. 2210 2211 @type feedback_fn: callable 2212 @param feedback_fn: Feedback function 2213 @type fn: callable 2214 @param fn: Function to be called when daemons are stopped 2215 2216 """ 2217 feedback_fn("Gathering cluster information") 2218 2219 # This ensures we're running on the master daemon 2220 cl = GetClient() 2221 2222 (cluster_name, master_node) = \ 2223 cl.QueryConfigValues(["cluster_name", "master_node"]) 2224 2225 online_nodes = GetOnlineNodes([], cl=cl) 2226 2227 # Don't keep a reference to the client. The master daemon will go away. 2228 del cl 2229 2230 assert master_node in online_nodes 2231 2232 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node, 2233 online_nodes).Call(fn, *args)
2234 2235
2236 -def GenerateTable(headers, fields, separator, data, 2237 numfields=None, unitfields=None, 2238 units=None):
2239 """Prints a table with headers and different fields. 2240 2241 @type headers: dict 2242 @param headers: dictionary mapping field names to headers for 2243 the table 2244 @type fields: list 2245 @param fields: the field names corresponding to each row in 2246 the data field 2247 @param separator: the separator to be used; if this is None, 2248 the default 'smart' algorithm is used which computes optimal 2249 field width, otherwise just the separator is used between 2250 each field 2251 @type data: list 2252 @param data: a list of lists, each sublist being one row to be output 2253 @type numfields: list 2254 @param numfields: a list with the fields that hold numeric 2255 values and thus should be right-aligned 2256 @type unitfields: list 2257 @param unitfields: a list with the fields that hold numeric 2258 values that should be formatted with the units field 2259 @type units: string or None 2260 @param units: the units we should use for formatting, or None for 2261 automatic choice (human-readable for non-separator usage, otherwise 2262 megabytes); this is a one-letter string 2263 2264 """ 2265 if units is None: 2266 if separator: 2267 units = "m" 2268 else: 2269 units = "h" 2270 2271 if numfields is None: 2272 numfields = [] 2273 if unitfields is None: 2274 unitfields = [] 2275 2276 numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142 2277 unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142 2278 2279 format_fields = [] 2280 for field in fields: 2281 if headers and field not in headers: 2282 # TODO: handle better unknown fields (either revert to old 2283 # style of raising exception, or deal more intelligently with 2284 # variable fields) 2285 headers[field] = field 2286 if separator is not None: 2287 format_fields.append("%s") 2288 elif numfields.Matches(field): 2289 format_fields.append("%*s") 2290 else: 2291 format_fields.append("%-*s") 2292 2293 if separator is None: 2294 mlens = [0 for name in fields] 2295 format_str = ' '.join(format_fields) 2296 else: 2297 format_str = separator.replace("%", "%%").join(format_fields) 2298 2299 for row in data: 2300 if row is None: 2301 continue 2302 for idx, val in enumerate(row): 2303 if unitfields.Matches(fields[idx]): 2304 try: 2305 val = int(val) 2306 except (TypeError, ValueError): 2307 pass 2308 else: 2309 val = row[idx] = utils.FormatUnit(val, units) 2310 val = row[idx] = str(val) 2311 if separator is None: 2312 mlens[idx] = max(mlens[idx], len(val)) 2313 2314 result = [] 2315 if headers: 2316 args = [] 2317 for idx, name in enumerate(fields): 2318 hdr = headers[name] 2319 if separator is None: 2320 mlens[idx] = max(mlens[idx], len(hdr)) 2321 args.append(mlens[idx]) 2322 args.append(hdr) 2323 result.append(format_str % tuple(args)) 2324 2325 if separator is None: 2326 assert len(mlens) == len(fields) 2327 2328 if fields and not numfields.Matches(fields[-1]): 2329 mlens[-1] = 0 2330 2331 for line in data: 2332 args = [] 2333 if line is None: 2334 line = ['-' for _ in fields] 2335 for idx in range(len(fields)): 2336 if separator is None: 2337 args.append(mlens[idx]) 2338 args.append(line[idx]) 2339 result.append(format_str % tuple(args)) 2340 2341 return result
2342 2343
2344 -def _FormatBool(value):
2345 """Formats a boolean value as a string. 2346 2347 """ 2348 if value: 2349 return "Y" 2350 return "N"
2351 2352 2353 #: Default formatting for query results; (callback, align right) 2354 _DEFAULT_FORMAT_QUERY = { 2355 constants.QFT_TEXT: (str, False), 2356 constants.QFT_BOOL: (_FormatBool, False), 2357 constants.QFT_NUMBER: (str, True), 2358 constants.QFT_TIMESTAMP: (utils.FormatTime, False), 2359 constants.QFT_OTHER: (str, False), 2360 constants.QFT_UNKNOWN: (str, False), 2361 } 2362 2363
2364 -def _GetColumnFormatter(fdef, override, unit):
2365 """Returns formatting function for a field. 2366 2367 @type fdef: L{objects.QueryFieldDefinition} 2368 @type override: dict 2369 @param override: Dictionary for overriding field formatting functions, 2370 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 2371 @type unit: string 2372 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} 2373 @rtype: tuple; (callable, bool) 2374 @return: Returns the function to format a value (takes one parameter) and a 2375 boolean for aligning the value on the right-hand side 2376 2377 """ 2378 fmt = override.get(fdef.name, None) 2379 if fmt is not None: 2380 return fmt 2381 2382 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY 2383 2384 if fdef.kind == constants.QFT_UNIT: 2385 # Can't keep this information in the static dictionary 2386 return (lambda value: utils.FormatUnit(value, unit), True) 2387 2388 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None) 2389 if fmt is not None: 2390 return fmt 2391 2392 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
2393 2394
2395 -class _QueryColumnFormatter:
2396 """Callable class for formatting fields of a query. 2397 2398 """
2399 - def __init__(self, fn, status_fn, verbose):
2400 """Initializes this class. 2401 2402 @type fn: callable 2403 @param fn: Formatting function 2404 @type status_fn: callable 2405 @param status_fn: Function to report fields' status 2406 @type verbose: boolean 2407 @param verbose: whether to use verbose field descriptions or not 2408 2409 """ 2410 self._fn = fn 2411 self._status_fn = status_fn 2412 if verbose: 2413 self._desc_index = 0 2414 else: 2415 self._desc_index = 1
2416
2417 - def __call__(self, data):
2418 """Returns a field's string representation. 2419 2420 """ 2421 (status, value) = data 2422 2423 # Report status 2424 self._status_fn(status) 2425 2426 if status == constants.RS_NORMAL: 2427 return self._fn(value) 2428 2429 assert value is None, \ 2430 "Found value %r for abnormal status %s" % (value, status) 2431 2432 if status in constants.RSS_DESCRIPTION: 2433 return constants.RSS_DESCRIPTION[status][self._desc_index] 2434 2435 raise NotImplementedError("Unknown status %s" % status)
2436 2437
2438 -def FormatQueryResult(result, unit=None, format_override=None, separator=None, 2439 header=False, verbose=False):
2440 """Formats data in L{objects.QueryResponse}. 2441 2442 @type result: L{objects.QueryResponse} 2443 @param result: result of query operation 2444 @type unit: string 2445 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}, 2446 see L{utils.text.FormatUnit} 2447 @type format_override: dict 2448 @param format_override: Dictionary for overriding field formatting functions, 2449 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 2450 @type separator: string or None 2451 @param separator: String used to separate fields 2452 @type header: bool 2453 @param header: Whether to output header row 2454 @type verbose: boolean 2455 @param verbose: whether to use verbose field descriptions or not 2456 2457 """ 2458 if unit is None: 2459 if separator: 2460 unit = "m" 2461 else: 2462 unit = "h" 2463 2464 if format_override is None: 2465 format_override = {} 2466 2467 stats = dict.fromkeys(constants.RS_ALL, 0) 2468 2469 def _RecordStatus(status): 2470 if status in stats: 2471 stats[status] += 1
2472 2473 columns = [] 2474 for fdef in result.fields: 2475 assert fdef.title and fdef.name 2476 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit) 2477 columns.append(TableColumn(fdef.title, 2478 _QueryColumnFormatter(fn, _RecordStatus, 2479 verbose), 2480 align_right)) 2481 2482 table = FormatTable(result.data, columns, header, separator) 2483 2484 # Collect statistics 2485 assert len(stats) == len(constants.RS_ALL) 2486 assert compat.all(count >= 0 for count in stats.values()) 2487 2488 # Determine overall status. If there was no data, unknown fields must be 2489 # detected via the field definitions. 2490 if (stats[constants.RS_UNKNOWN] or 2491 (not result.data and _GetUnknownFields(result.fields))): 2492 status = QR_UNKNOWN 2493 elif compat.any(count > 0 for key, count in stats.items() 2494 if key != constants.RS_NORMAL): 2495 status = QR_INCOMPLETE 2496 else: 2497 status = QR_NORMAL 2498 2499 return (status, table) 2500 2501
2502 -def _GetUnknownFields(fdefs):
2503 """Returns list of unknown fields included in C{fdefs}. 2504 2505 @type fdefs: list of L{objects.QueryFieldDefinition} 2506 2507 """ 2508 return [fdef for fdef in fdefs 2509 if fdef.kind == constants.QFT_UNKNOWN]
2510 2511
2512 -def _WarnUnknownFields(fdefs):
2513 """Prints a warning to stderr if a query included unknown fields. 2514 2515 @type fdefs: list of L{objects.QueryFieldDefinition} 2516 2517 """ 2518 unknown = _GetUnknownFields(fdefs) 2519 if unknown: 2520 ToStderr("Warning: Queried for unknown fields %s", 2521 utils.CommaJoin(fdef.name for fdef in unknown)) 2522 return True 2523 2524 return False
2525 2526
2527 -def GenericList(resource, fields, names, unit, separator, header, cl=None, 2528 format_override=None, verbose=False):
2529 """Generic implementation for listing all items of a resource. 2530 2531 @param resource: One of L{constants.QR_OP_LUXI} 2532 @type fields: list of strings 2533 @param fields: List of fields to query for 2534 @type names: list of strings 2535 @param names: Names of items to query for 2536 @type unit: string or None 2537 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or 2538 None for automatic choice (human-readable for non-separator usage, 2539 otherwise megabytes); this is a one-letter string 2540 @type separator: string or None 2541 @param separator: String used to separate fields 2542 @type header: bool 2543 @param header: Whether to show header row 2544 @type format_override: dict 2545 @param format_override: Dictionary for overriding field formatting functions, 2546 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 2547 @type verbose: boolean 2548 @param verbose: whether to use verbose field descriptions or not 2549 2550 """ 2551 if cl is None: 2552 cl = GetClient() 2553 2554 if not names: 2555 names = None 2556 2557 response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names)) 2558 2559 found_unknown = _WarnUnknownFields(response.fields) 2560 2561 (status, data) = FormatQueryResult(response, unit=unit, separator=separator, 2562 header=header, 2563 format_override=format_override, 2564 verbose=verbose) 2565 2566 for line in data: 2567 ToStdout(line) 2568 2569 assert ((found_unknown and status == QR_UNKNOWN) or 2570 (not found_unknown and status != QR_UNKNOWN)) 2571 2572 if status == QR_UNKNOWN: 2573 return constants.EXIT_UNKNOWN_FIELD 2574 2575 # TODO: Should the list command fail if not all data could be collected? 2576 return constants.EXIT_SUCCESS
2577 2578
2579 -def GenericListFields(resource, fields, separator, header, cl=None):
2580 """Generic implementation for listing fields for a resource. 2581 2582 @param resource: One of L{constants.QR_OP_LUXI} 2583 @type fields: list of strings 2584 @param fields: List of fields to query for 2585 @type separator: string or None 2586 @param separator: String used to separate fields 2587 @type header: bool 2588 @param header: Whether to show header row 2589 2590 """ 2591 if cl is None: 2592 cl = GetClient() 2593 2594 if not fields: 2595 fields = None 2596 2597 response = cl.QueryFields(resource, fields) 2598 2599 found_unknown = _WarnUnknownFields(response.fields) 2600 2601 columns = [ 2602 TableColumn("Name", str, False), 2603 TableColumn("Title", str, False), 2604 # TODO: Add field description to master daemon 2605 ] 2606 2607 rows = [[fdef.name, fdef.title] for fdef in response.fields] 2608 2609 for line in FormatTable(rows, columns, header, separator): 2610 ToStdout(line) 2611 2612 if found_unknown: 2613 return constants.EXIT_UNKNOWN_FIELD 2614 2615 return constants.EXIT_SUCCESS
2616 2617
2618 -class TableColumn:
2619 """Describes a column for L{FormatTable}. 2620 2621 """
2622 - def __init__(self, title, fn, align_right):
2623 """Initializes this class. 2624 2625 @type title: string 2626 @param title: Column title 2627 @type fn: callable 2628 @param fn: Formatting function 2629 @type align_right: bool 2630 @param align_right: Whether to align values on the right-hand side 2631 2632 """ 2633 self.title = title 2634 self.format = fn 2635 self.align_right = align_right
2636 2637
2638 -def _GetColFormatString(width, align_right):
2639 """Returns the format string for a field. 2640 2641 """ 2642 if align_right: 2643 sign = "" 2644 else: 2645 sign = "-" 2646 2647 return "%%%s%ss" % (sign, width)
2648 2649
2650 -def FormatTable(rows, columns, header, separator):
2651 """Formats data as a table. 2652 2653 @type rows: list of lists 2654 @param rows: Row data, one list per row 2655 @type columns: list of L{TableColumn} 2656 @param columns: Column descriptions 2657 @type header: bool 2658 @param header: Whether to show header row 2659 @type separator: string or None 2660 @param separator: String used to separate columns 2661 2662 """ 2663 if header: 2664 data = [[col.title for col in columns]] 2665 colwidth = [len(col.title) for col in columns] 2666 else: 2667 data = [] 2668 colwidth = [0 for _ in columns] 2669 2670 # Format row data 2671 for row in rows: 2672 assert len(row) == len(columns) 2673 2674 formatted = [col.format(value) for value, col in zip(row, columns)] 2675 2676 if separator is None: 2677 # Update column widths 2678 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)): 2679 # Modifying a list's items while iterating is fine 2680 colwidth[idx] = max(oldwidth, len(value)) 2681 2682 data.append(formatted) 2683 2684 if separator is not None: 2685 # Return early if a separator is used 2686 return [separator.join(row) for row in data] 2687 2688 if columns and not columns[-1].align_right: 2689 # Avoid unnecessary spaces at end of line 2690 colwidth[-1] = 0 2691 2692 # Build format string 2693 fmt = " ".join([_GetColFormatString(width, col.align_right) 2694 for col, width in zip(columns, colwidth)]) 2695 2696 return [fmt % tuple(row) for row in data]
2697 2698
2699 -def FormatTimestamp(ts):
2700 """Formats a given timestamp. 2701 2702 @type ts: timestamp 2703 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds 2704 2705 @rtype: string 2706 @return: a string with the formatted timestamp 2707 2708 """ 2709 if not isinstance (ts, (tuple, list)) or len(ts) != 2: 2710 return '?' 2711 sec, usec = ts 2712 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2713 2714
2715 -def ParseTimespec(value):
2716 """Parse a time specification. 2717 2718 The following suffixed will be recognized: 2719 2720 - s: seconds 2721 - m: minutes 2722 - h: hours 2723 - d: day 2724 - w: weeks 2725 2726 Without any suffix, the value will be taken to be in seconds. 2727 2728 """ 2729 value = str(value) 2730 if not value: 2731 raise errors.OpPrereqError("Empty time specification passed") 2732 suffix_map = { 2733 's': 1, 2734 'm': 60, 2735 'h': 3600, 2736 'd': 86400, 2737 'w': 604800, 2738 } 2739 if value[-1] not in suffix_map: 2740 try: 2741 value = int(value) 2742 except (TypeError, ValueError): 2743 raise errors.OpPrereqError("Invalid time specification '%s'" % value) 2744 else: 2745 multiplier = suffix_map[value[-1]] 2746 value = value[:-1] 2747 if not value: # no data left after stripping the suffix 2748 raise errors.OpPrereqError("Invalid time specification (only" 2749 " suffix passed)") 2750 try: 2751 value = int(value) * multiplier 2752 except (TypeError, ValueError): 2753 raise errors.OpPrereqError("Invalid time specification '%s'" % value) 2754 return value
2755 2756
2757 -def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False, 2758 filter_master=False):
2759 """Returns the names of online nodes. 2760 2761 This function will also log a warning on stderr with the names of 2762 the online nodes. 2763 2764 @param nodes: if not empty, use only this subset of nodes (minus the 2765 offline ones) 2766 @param cl: if not None, luxi client to use 2767 @type nowarn: boolean 2768 @param nowarn: by default, this function will output a note with the 2769 offline nodes that are skipped; if this parameter is True the 2770 note is not displayed 2771 @type secondary_ips: boolean 2772 @param secondary_ips: if True, return the secondary IPs instead of the 2773 names, useful for doing network traffic over the replication interface 2774 (if any) 2775 @type filter_master: boolean 2776 @param filter_master: if True, do not return the master node in the list 2777 (useful in coordination with secondary_ips where we cannot check our 2778 node name against the list) 2779 2780 """ 2781 if cl is None: 2782 cl = GetClient() 2783 2784 if secondary_ips: 2785 name_idx = 2 2786 else: 2787 name_idx = 0 2788 2789 if filter_master: 2790 master_node = cl.QueryConfigValues(["master_node"])[0] 2791 filter_fn = lambda x: x != master_node 2792 else: 2793 filter_fn = lambda _: True 2794 2795 result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"], 2796 use_locking=False) 2797 offline = [row[0] for row in result if row[1]] 2798 if offline and not nowarn: 2799 ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline)) 2800 return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2801 2802
2803 -def _ToStream(stream, txt, *args):
2804 """Write a message to a stream, bypassing the logging system 2805 2806 @type stream: file object 2807 @param stream: the file to which we should write 2808 @type txt: str 2809 @param txt: the message 2810 2811 """ 2812 try: 2813 if args: 2814 args = tuple(args) 2815 stream.write(txt % args) 2816 else: 2817 stream.write(txt) 2818 stream.write('\n') 2819 stream.flush() 2820 except IOError, err: 2821 if err.errno == errno.EPIPE: 2822 # our terminal went away, we'll exit 2823 sys.exit(constants.EXIT_FAILURE) 2824 else: 2825 raise
2826 2827
2828 -def ToStdout(txt, *args):
2829 """Write a message to stdout only, bypassing the logging system 2830 2831 This is just a wrapper over _ToStream. 2832 2833 @type txt: str 2834 @param txt: the message 2835 2836 """ 2837 _ToStream(sys.stdout, txt, *args)
2838 2839
2840 -def ToStderr(txt, *args):
2841 """Write a message to stderr only, bypassing the logging system 2842 2843 This is just a wrapper over _ToStream. 2844 2845 @type txt: str 2846 @param txt: the message 2847 2848 """ 2849 _ToStream(sys.stderr, txt, *args)
2850 2851
2852 -class JobExecutor(object):
2853 """Class which manages the submission and execution of multiple jobs. 2854 2855 Note that instances of this class should not be reused between 2856 GetResults() calls. 2857 2858 """
2859 - def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2860 self.queue = [] 2861 if cl is None: 2862 cl = GetClient() 2863 self.cl = cl 2864 self.verbose = verbose 2865 self.jobs = [] 2866 self.opts = opts 2867 self.feedback_fn = feedback_fn
2868
2869 - def QueueJob(self, name, *ops):
2870 """Record a job for later submit. 2871 2872 @type name: string 2873 @param name: a description of the job, will be used in WaitJobSet 2874 """ 2875 SetGenericOpcodeOpts(ops, self.opts) 2876 self.queue.append((name, ops))
2877
2878 - def SubmitPending(self, each=False):
2879 """Submit all pending jobs. 2880 2881 """ 2882 if each: 2883 results = [] 2884 for row in self.queue: 2885 # SubmitJob will remove the success status, but raise an exception if 2886 # the submission fails, so we'll notice that anyway. 2887 results.append([True, self.cl.SubmitJob(row[1])]) 2888 else: 2889 results = self.cl.SubmitManyJobs([row[1] for row in self.queue]) 2890 for (idx, ((status, data), (name, _))) in enumerate(zip(results, 2891 self.queue)): 2892 self.jobs.append((idx, status, data, name))
2893
2894 - def _ChooseJob(self):
2895 """Choose a non-waiting/queued job to poll next. 2896 2897 """ 2898 assert self.jobs, "_ChooseJob called with empty job list" 2899 2900 result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"]) 2901 assert result 2902 2903 for job_data, status in zip(self.jobs, result): 2904 if (isinstance(status, list) and status and 2905 status[0] in (constants.JOB_STATUS_QUEUED, 2906 constants.JOB_STATUS_WAITLOCK, 2907 constants.JOB_STATUS_CANCELING)): 2908 # job is still present and waiting 2909 continue 2910 # good candidate found (either running job or lost job) 2911 self.jobs.remove(job_data) 2912 return job_data 2913 2914 # no job found 2915 return self.jobs.pop(0)
2916
2917 - def GetResults(self):
2918 """Wait for and return the results of all jobs. 2919 2920 @rtype: list 2921 @return: list of tuples (success, job results), in the same order 2922 as the submitted jobs; if a job has failed, instead of the result 2923 there will be the error message 2924 2925 """ 2926 if not self.jobs: 2927 self.SubmitPending() 2928 results = [] 2929 if self.verbose: 2930 ok_jobs = [row[2] for row in self.jobs if row[1]] 2931 if ok_jobs: 2932 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs)) 2933 2934 # first, remove any non-submitted jobs 2935 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1]) 2936 for idx, _, jid, name in failures: 2937 ToStderr("Failed to submit job for %s: %s", name, jid) 2938 results.append((idx, False, jid)) 2939 2940 while self.jobs: 2941 (idx, _, jid, name) = self._ChooseJob() 2942 ToStdout("Waiting for job %s for %s...", jid, name) 2943 try: 2944 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn) 2945 success = True 2946 except errors.JobLost, err: 2947 _, job_result = FormatError(err) 2948 ToStderr("Job %s for %s has been archived, cannot check its result", 2949 jid, name) 2950 success = False 2951 except (errors.GenericError, luxi.ProtocolError), err: 2952 _, job_result = FormatError(err) 2953 success = False 2954 # the error message will always be shown, verbose or not 2955 ToStderr("Job %s for %s has failed: %s", jid, name, job_result) 2956 2957 results.append((idx, success, job_result)) 2958 2959 # sort based on the index, then drop it 2960 results.sort() 2961 results = [i[1:] for i in results] 2962 2963 return results
2964
2965 - def WaitOrShow(self, wait):
2966 """Wait for job results or only print the job IDs. 2967 2968 @type wait: boolean 2969 @param wait: whether to wait or not 2970 2971 """ 2972 if wait: 2973 return self.GetResults() 2974 else: 2975 if not self.jobs: 2976 self.SubmitPending() 2977 for _, status, result, name in self.jobs: 2978 if status: 2979 ToStdout("%s: %s", result, name) 2980 else: 2981 ToStderr("Failure for %s: %s", name, result) 2982 return [row[1:3] for row in self.jobs]
2983 2984
2985 -def FormatParameterDict(buf, param_dict, actual, level=1):
2986 """Formats a parameter dictionary. 2987 2988 @type buf: L{StringIO} 2989 @param buf: the buffer into which to write 2990 @type param_dict: dict 2991 @param param_dict: the own parameters 2992 @type actual: dict 2993 @param actual: the current parameter set (including defaults) 2994 @param level: Level of indent 2995 2996 """ 2997 indent = " " * level 2998 for key in sorted(actual): 2999 val = param_dict.get(key, "default (%s)" % actual[key]) 3000 buf.write("%s- %s: %s\n" % (indent, key, val))
3001