Package ganeti :: Module cli
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cli

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc. 
   5  # 
   6  # This program is free software; you can redistribute it and/or modify 
   7  # it under the terms of the GNU General Public License as published by 
   8  # the Free Software Foundation; either version 2 of the License, or 
   9  # (at your option) any later version. 
  10  # 
  11  # This program is distributed in the hope that it will be useful, but 
  12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
  13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
  14  # General Public License for more details. 
  15  # 
  16  # You should have received a copy of the GNU General Public License 
  17  # along with this program; if not, write to the Free Software 
  18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
  19  # 02110-1301, USA. 
  20   
  21   
  22  """Module dealing with command line parsing""" 
  23   
  24   
  25  import sys 
  26  import textwrap 
  27  import os.path 
  28  import time 
  29  import logging 
  30  from cStringIO import StringIO 
  31   
  32  from ganeti import utils 
  33  from ganeti import errors 
  34  from ganeti import constants 
  35  from ganeti import opcodes 
  36  from ganeti import luxi 
  37  from ganeti import ssconf 
  38  from ganeti import rpc 
  39  from ganeti import ssh 
  40  from ganeti import compat 
  41  from ganeti import netutils 
  42   
  43  from optparse import (OptionParser, TitledHelpFormatter, 
  44                        Option, OptionValueError) 
  45   
  46   
  47  __all__ = [ 
  48    # Command line options 
  49    "ADD_UIDS_OPT", 
  50    "ALLOCATABLE_OPT", 
  51    "ALL_OPT", 
  52    "AUTO_PROMOTE_OPT", 
  53    "AUTO_REPLACE_OPT", 
  54    "BACKEND_OPT", 
  55    "BLK_OS_OPT", 
  56    "CAPAB_MASTER_OPT", 
  57    "CAPAB_VM_OPT", 
  58    "CLEANUP_OPT", 
  59    "CLUSTER_DOMAIN_SECRET_OPT", 
  60    "CONFIRM_OPT", 
  61    "CP_SIZE_OPT", 
  62    "DEBUG_OPT", 
  63    "DEBUG_SIMERR_OPT", 
  64    "DISKIDX_OPT", 
  65    "DISK_OPT", 
  66    "DISK_TEMPLATE_OPT", 
  67    "DRAINED_OPT", 
  68    "DRY_RUN_OPT", 
  69    "DRBD_HELPER_OPT", 
  70    "EARLY_RELEASE_OPT", 
  71    "ENABLED_HV_OPT", 
  72    "ERROR_CODES_OPT", 
  73    "FIELDS_OPT", 
  74    "FILESTORE_DIR_OPT", 
  75    "FILESTORE_DRIVER_OPT", 
  76    "FORCE_OPT", 
  77    "FORCE_VARIANT_OPT", 
  78    "GLOBAL_FILEDIR_OPT", 
  79    "HID_OS_OPT", 
  80    "HVLIST_OPT", 
  81    "HVOPTS_OPT", 
  82    "HYPERVISOR_OPT", 
  83    "IALLOCATOR_OPT", 
  84    "DEFAULT_IALLOCATOR_OPT", 
  85    "IDENTIFY_DEFAULTS_OPT", 
  86    "IGNORE_CONSIST_OPT", 
  87    "IGNORE_FAILURES_OPT", 
  88    "IGNORE_OFFLINE_OPT", 
  89    "IGNORE_REMOVE_FAILURES_OPT", 
  90    "IGNORE_SECONDARIES_OPT", 
  91    "IGNORE_SIZE_OPT", 
  92    "INTERVAL_OPT", 
  93    "MAC_PREFIX_OPT", 
  94    "MAINTAIN_NODE_HEALTH_OPT", 
  95    "MASTER_NETDEV_OPT", 
  96    "MC_OPT", 
  97    "MIGRATION_MODE_OPT", 
  98    "NET_OPT", 
  99    "NEW_CLUSTER_CERT_OPT", 
 100    "NEW_CLUSTER_DOMAIN_SECRET_OPT", 
 101    "NEW_CONFD_HMAC_KEY_OPT", 
 102    "NEW_RAPI_CERT_OPT", 
 103    "NEW_SECONDARY_OPT", 
 104    "NIC_PARAMS_OPT", 
 105    "NODE_LIST_OPT", 
 106    "NODE_PLACEMENT_OPT", 
 107    "NODEGROUP_OPT", 
 108    "NODRBD_STORAGE_OPT", 
 109    "NOHDR_OPT", 
 110    "NOIPCHECK_OPT", 
 111    "NO_INSTALL_OPT", 
 112    "NONAMECHECK_OPT", 
 113    "NOLVM_STORAGE_OPT", 
 114    "NOMODIFY_ETCHOSTS_OPT", 
 115    "NOMODIFY_SSH_SETUP_OPT", 
 116    "NONICS_OPT", 
 117    "NONLIVE_OPT", 
 118    "NONPLUS1_OPT", 
 119    "NOSHUTDOWN_OPT", 
 120    "NOSTART_OPT", 
 121    "NOSSH_KEYCHECK_OPT", 
 122    "NOVOTING_OPT", 
 123    "NWSYNC_OPT", 
 124    "ON_PRIMARY_OPT", 
 125    "ON_SECONDARY_OPT", 
 126    "OFFLINE_OPT", 
 127    "OSPARAMS_OPT", 
 128    "OS_OPT", 
 129    "OS_SIZE_OPT", 
 130    "PREALLOC_WIPE_DISKS_OPT", 
 131    "PRIMARY_IP_VERSION_OPT", 
 132    "PRIORITY_OPT", 
 133    "RAPI_CERT_OPT", 
 134    "READD_OPT", 
 135    "REBOOT_TYPE_OPT", 
 136    "REMOVE_INSTANCE_OPT", 
 137    "REMOVE_UIDS_OPT", 
 138    "RESERVED_LVS_OPT", 
 139    "ROMAN_OPT", 
 140    "SECONDARY_IP_OPT", 
 141    "SELECT_OS_OPT", 
 142    "SEP_OPT", 
 143    "SHOWCMD_OPT", 
 144    "SHUTDOWN_TIMEOUT_OPT", 
 145    "SINGLE_NODE_OPT", 
 146    "SRC_DIR_OPT", 
 147    "SRC_NODE_OPT", 
 148    "SUBMIT_OPT", 
 149    "STATIC_OPT", 
 150    "SYNC_OPT", 
 151    "TAG_SRC_OPT", 
 152    "TIMEOUT_OPT", 
 153    "UIDPOOL_OPT", 
 154    "USEUNITS_OPT", 
 155    "USE_REPL_NET_OPT", 
 156    "VERBOSE_OPT", 
 157    "VG_NAME_OPT", 
 158    "YES_DOIT_OPT", 
 159    # Generic functions for CLI programs 
 160    "GenericMain", 
 161    "GenericInstanceCreate", 
 162    "GetClient", 
 163    "GetOnlineNodes", 
 164    "JobExecutor", 
 165    "JobSubmittedException", 
 166    "ParseTimespec", 
 167    "RunWhileClusterStopped", 
 168    "SubmitOpCode", 
 169    "SubmitOrSend", 
 170    "UsesRPC", 
 171    # Formatting functions 
 172    "ToStderr", "ToStdout", 
 173    "FormatError", 
 174    "GenerateTable", 
 175    "AskUser", 
 176    "FormatTimestamp", 
 177    "FormatLogMessage", 
 178    # Tags functions 
 179    "ListTags", 
 180    "AddTags", 
 181    "RemoveTags", 
 182    # command line options support infrastructure 
 183    "ARGS_MANY_INSTANCES", 
 184    "ARGS_MANY_NODES", 
 185    "ARGS_NONE", 
 186    "ARGS_ONE_INSTANCE", 
 187    "ARGS_ONE_NODE", 
 188    "ARGS_ONE_OS", 
 189    "ArgChoice", 
 190    "ArgCommand", 
 191    "ArgFile", 
 192    "ArgHost", 
 193    "ArgInstance", 
 194    "ArgJobId", 
 195    "ArgNode", 
 196    "ArgOs", 
 197    "ArgSuggest", 
 198    "ArgUnknown", 
 199    "OPT_COMPL_INST_ADD_NODES", 
 200    "OPT_COMPL_MANY_NODES", 
 201    "OPT_COMPL_ONE_IALLOCATOR", 
 202    "OPT_COMPL_ONE_INSTANCE", 
 203    "OPT_COMPL_ONE_NODE", 
 204    "OPT_COMPL_ONE_NODEGROUP", 
 205    "OPT_COMPL_ONE_OS", 
 206    "cli_option", 
 207    "SplitNodeOption", 
 208    "CalculateOSNames", 
 209    "ParseFields", 
 210    "COMMON_CREATE_OPTS", 
 211    ] 
 212   
 213  NO_PREFIX = "no_" 
 214  UN_PREFIX = "-" 
 215   
 216  #: Priorities (sorted) 
 217  _PRIORITY_NAMES = [ 
 218    ("low", constants.OP_PRIO_LOW), 
 219    ("normal", constants.OP_PRIO_NORMAL), 
 220    ("high", constants.OP_PRIO_HIGH), 
 221    ] 
 222   
 223  #: Priority dictionary for easier lookup 
 224  # TODO: Replace this and _PRIORITY_NAMES with a single sorted dictionary once 
 225  # we migrate to Python 2.6 
 226  _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES) 
 227   
 228   
229 -class _Argument:
230 - def __init__(self, min=0, max=None): # pylint: disable-msg=W0622
231 self.min = min 232 self.max = max
233
234 - def __repr__(self):
235 return ("<%s min=%s max=%s>" % 236 (self.__class__.__name__, self.min, self.max))
237 238
239 -class ArgSuggest(_Argument):
240 """Suggesting argument. 241 242 Value can be any of the ones passed to the constructor. 243 244 """ 245 # pylint: disable-msg=W0622
246 - def __init__(self, min=0, max=None, choices=None):
247 _Argument.__init__(self, min=min, max=max) 248 self.choices = choices
249
250 - def __repr__(self):
251 return ("<%s min=%s max=%s choices=%r>" % 252 (self.__class__.__name__, self.min, self.max, self.choices))
253 254
255 -class ArgChoice(ArgSuggest):
256 """Choice argument. 257 258 Value can be any of the ones passed to the constructor. Like L{ArgSuggest}, 259 but value must be one of the choices. 260 261 """
262 263
264 -class ArgUnknown(_Argument):
265 """Unknown argument to program (e.g. determined at runtime). 266 267 """
268 269
270 -class ArgInstance(_Argument):
271 """Instances argument. 272 273 """
274 275
276 -class ArgNode(_Argument):
277 """Node argument. 278 279 """
280
281 -class ArgJobId(_Argument):
282 """Job ID argument. 283 284 """
285 286
287 -class ArgFile(_Argument):
288 """File path argument. 289 290 """
291 292
293 -class ArgCommand(_Argument):
294 """Command argument. 295 296 """
297 298
299 -class ArgHost(_Argument):
300 """Host argument. 301 302 """
303 304
305 -class ArgOs(_Argument):
306 """OS argument. 307 308 """
309 310 311 ARGS_NONE = [] 312 ARGS_MANY_INSTANCES = [ArgInstance()] 313 ARGS_MANY_NODES = [ArgNode()] 314 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)] 315 ARGS_ONE_NODE = [ArgNode(min=1, max=1)] 316 ARGS_ONE_OS = [ArgOs(min=1, max=1)] 317 318
319 -def _ExtractTagsObject(opts, args):
320 """Extract the tag type object. 321 322 Note that this function will modify its args parameter. 323 324 """ 325 if not hasattr(opts, "tag_type"): 326 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject") 327 kind = opts.tag_type 328 if kind == constants.TAG_CLUSTER: 329 retval = kind, kind 330 elif kind == constants.TAG_NODE or kind == constants.TAG_INSTANCE: 331 if not args: 332 raise errors.OpPrereqError("no arguments passed to the command") 333 name = args.pop(0) 334 retval = kind, name 335 else: 336 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind) 337 return retval
338 339
340 -def _ExtendTags(opts, args):
341 """Extend the args if a source file has been given. 342 343 This function will extend the tags with the contents of the file 344 passed in the 'tags_source' attribute of the opts parameter. A file 345 named '-' will be replaced by stdin. 346 347 """ 348 fname = opts.tags_source 349 if fname is None: 350 return 351 if fname == "-": 352 new_fh = sys.stdin 353 else: 354 new_fh = open(fname, "r") 355 new_data = [] 356 try: 357 # we don't use the nice 'new_data = [line.strip() for line in fh]' 358 # because of python bug 1633941 359 while True: 360 line = new_fh.readline() 361 if not line: 362 break 363 new_data.append(line.strip()) 364 finally: 365 new_fh.close() 366 args.extend(new_data)
367 368
369 -def ListTags(opts, args):
370 """List the tags on a given object. 371 372 This is a generic implementation that knows how to deal with all 373 three cases of tag objects (cluster, node, instance). The opts 374 argument is expected to contain a tag_type field denoting what 375 object type we work on. 376 377 """ 378 kind, name = _ExtractTagsObject(opts, args) 379 cl = GetClient() 380 result = cl.QueryTags(kind, name) 381 result = list(result) 382 result.sort() 383 for tag in result: 384 ToStdout(tag)
385 386
387 -def AddTags(opts, args):
388 """Add tags on a given object. 389 390 This is a generic implementation that knows how to deal with all 391 three cases of tag objects (cluster, node, instance). The opts 392 argument is expected to contain a tag_type field denoting what 393 object type we work on. 394 395 """ 396 kind, name = _ExtractTagsObject(opts, args) 397 _ExtendTags(opts, args) 398 if not args: 399 raise errors.OpPrereqError("No tags to be added") 400 op = opcodes.OpAddTags(kind=kind, name=name, tags=args) 401 SubmitOpCode(op, opts=opts)
402 403
404 -def RemoveTags(opts, args):
405 """Remove tags from a given object. 406 407 This is a generic implementation that knows how to deal with all 408 three cases of tag objects (cluster, node, instance). The opts 409 argument is expected to contain a tag_type field denoting what 410 object type we work on. 411 412 """ 413 kind, name = _ExtractTagsObject(opts, args) 414 _ExtendTags(opts, args) 415 if not args: 416 raise errors.OpPrereqError("No tags to be removed") 417 op = opcodes.OpDelTags(kind=kind, name=name, tags=args) 418 SubmitOpCode(op, opts=opts)
419 420
421 -def check_unit(option, opt, value): # pylint: disable-msg=W0613
422 """OptParsers custom converter for units. 423 424 """ 425 try: 426 return utils.ParseUnit(value) 427 except errors.UnitParseError, err: 428 raise OptionValueError("option %s: %s" % (opt, err)) 429 430
431 -def _SplitKeyVal(opt, data):
432 """Convert a KeyVal string into a dict. 433 434 This function will convert a key=val[,...] string into a dict. Empty 435 values will be converted specially: keys which have the prefix 'no_' 436 will have the value=False and the prefix stripped, the others will 437 have value=True. 438 439 @type opt: string 440 @param opt: a string holding the option name for which we process the 441 data, used in building error messages 442 @type data: string 443 @param data: a string of the format key=val,key=val,... 444 @rtype: dict 445 @return: {key=val, key=val} 446 @raises errors.ParameterError: if there are duplicate keys 447 448 """ 449 kv_dict = {} 450 if data: 451 for elem in utils.UnescapeAndSplit(data, sep=","): 452 if "=" in elem: 453 key, val = elem.split("=", 1) 454 else: 455 if elem.startswith(NO_PREFIX): 456 key, val = elem[len(NO_PREFIX):], False 457 elif elem.startswith(UN_PREFIX): 458 key, val = elem[len(UN_PREFIX):], None 459 else: 460 key, val = elem, True 461 if key in kv_dict: 462 raise errors.ParameterError("Duplicate key '%s' in option %s" % 463 (key, opt)) 464 kv_dict[key] = val 465 return kv_dict
466 467
468 -def check_ident_key_val(option, opt, value): # pylint: disable-msg=W0613
469 """Custom parser for ident:key=val,key=val options. 470 471 This will store the parsed values as a tuple (ident, {key: val}). As such, 472 multiple uses of this option via action=append is possible. 473 474 """ 475 if ":" not in value: 476 ident, rest = value, '' 477 else: 478 ident, rest = value.split(":", 1) 479 480 if ident.startswith(NO_PREFIX): 481 if rest: 482 msg = "Cannot pass options when removing parameter groups: %s" % value 483 raise errors.ParameterError(msg) 484 retval = (ident[len(NO_PREFIX):], False) 485 elif ident.startswith(UN_PREFIX): 486 if rest: 487 msg = "Cannot pass options when removing parameter groups: %s" % value 488 raise errors.ParameterError(msg) 489 retval = (ident[len(UN_PREFIX):], None) 490 else: 491 kv_dict = _SplitKeyVal(opt, rest) 492 retval = (ident, kv_dict) 493 return retval 494 495
496 -def check_key_val(option, opt, value): # pylint: disable-msg=W0613
497 """Custom parser class for key=val,key=val options. 498 499 This will store the parsed values as a dict {key: val}. 500 501 """ 502 return _SplitKeyVal(opt, value) 503 504
505 -def check_bool(option, opt, value): # pylint: disable-msg=W0613
506 """Custom parser for yes/no options. 507 508 This will store the parsed value as either True or False. 509 510 """ 511 value = value.lower() 512 if value == constants.VALUE_FALSE or value == "no": 513 return False 514 elif value == constants.VALUE_TRUE or value == "yes": 515 return True 516 else: 517 raise errors.ParameterError("Invalid boolean value '%s'" % value) 518 519 520 # completion_suggestion is normally a list. Using numeric values not evaluating 521 # to False for dynamic completion. 522 (OPT_COMPL_MANY_NODES, 523 OPT_COMPL_ONE_NODE, 524 OPT_COMPL_ONE_INSTANCE, 525 OPT_COMPL_ONE_OS, 526 OPT_COMPL_ONE_IALLOCATOR, 527 OPT_COMPL_INST_ADD_NODES, 528 OPT_COMPL_ONE_NODEGROUP) = range(100, 107) 529 530 OPT_COMPL_ALL = frozenset([ 531 OPT_COMPL_MANY_NODES, 532 OPT_COMPL_ONE_NODE, 533 OPT_COMPL_ONE_INSTANCE, 534 OPT_COMPL_ONE_OS, 535 OPT_COMPL_ONE_IALLOCATOR, 536 OPT_COMPL_INST_ADD_NODES, 537 OPT_COMPL_ONE_NODEGROUP, 538 ]) 539 540
541 -class CliOption(Option):
542 """Custom option class for optparse. 543 544 """ 545 ATTRS = Option.ATTRS + [ 546 "completion_suggest", 547 ] 548 TYPES = Option.TYPES + ( 549 "identkeyval", 550 "keyval", 551 "unit", 552 "bool", 553 ) 554 TYPE_CHECKER = Option.TYPE_CHECKER.copy() 555 TYPE_CHECKER["identkeyval"] = check_ident_key_val 556 TYPE_CHECKER["keyval"] = check_key_val 557 TYPE_CHECKER["unit"] = check_unit 558 TYPE_CHECKER["bool"] = check_bool
559 560 561 # optparse.py sets make_option, so we do it for our own option class, too 562 cli_option = CliOption 563 564 565 _YORNO = "yes|no" 566 567 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count", 568 help="Increase debugging level") 569 570 NOHDR_OPT = cli_option("--no-headers", default=False, 571 action="store_true", dest="no_headers", 572 help="Don't display column headers") 573 574 SEP_OPT = cli_option("--separator", default=None, 575 action="store", dest="separator", 576 help=("Separator between output fields" 577 " (defaults to one space)")) 578 579 USEUNITS_OPT = cli_option("--units", default=None, 580 dest="units", choices=('h', 'm', 'g', 't'), 581 help="Specify units for output (one of hmgt)") 582 583 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store", 584 type="string", metavar="FIELDS", 585 help="Comma separated list of output fields") 586 587 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true", 588 default=False, help="Force the operation") 589 590 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true", 591 default=False, help="Do not require confirmation") 592 593 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline", 594 action="store_true", default=False, 595 help=("Ignore offline nodes and do as much" 596 " as possible")) 597 598 TAG_SRC_OPT = cli_option("--from", dest="tags_source", 599 default=None, help="File with tag names") 600 601 SUBMIT_OPT = cli_option("--submit", dest="submit_only", 602 default=False, action="store_true", 603 help=("Submit the job and return the job ID, but" 604 " don't wait for the job to finish")) 605 606 SYNC_OPT = cli_option("--sync", dest="do_locking", 607 default=False, action="store_true", 608 help=("Grab locks while doing the queries" 609 " in order to ensure more consistent results")) 610 611 DRY_RUN_OPT = cli_option("--dry-run", default=False, 612 action="store_true", 613 help=("Do not execute the operation, just run the" 614 " check steps and verify it it could be" 615 " executed")) 616 617 VERBOSE_OPT = cli_option("-v", "--verbose", default=False, 618 action="store_true", 619 help="Increase the verbosity of the operation") 620 621 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False, 622 action="store_true", dest="simulate_errors", 623 help="Debugging option that makes the operation" 624 " treat most runtime checks as failed") 625 626 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync", 627 default=True, action="store_false", 628 help="Don't wait for sync (DANGEROUS!)") 629 630 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template", 631 help="Custom disk setup (diskless, file," 632 " plain or drbd)", 633 default=None, metavar="TEMPL", 634 choices=list(constants.DISK_TEMPLATES)) 635 636 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true", 637 help="Do not create any network cards for" 638 " the instance") 639 640 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", 641 help="Relative path under default cluster-wide" 642 " file storage dir to store file-based disks", 643 default=None, metavar="<DIR>") 644 645 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver", 646 help="Driver to use for image files", 647 default="loop", metavar="<DRIVER>", 648 choices=list(constants.FILE_DRIVER)) 649 650 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>", 651 help="Select nodes for the instance automatically" 652 " using the <NAME> iallocator plugin", 653 default=None, type="string", 654 completion_suggest=OPT_COMPL_ONE_IALLOCATOR) 655 656 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator", 657 metavar="<NAME>", 658 help="Set the default instance allocator plugin", 659 default=None, type="string", 660 completion_suggest=OPT_COMPL_ONE_IALLOCATOR) 661 662 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run", 663 metavar="<os>", 664 completion_suggest=OPT_COMPL_ONE_OS) 665 666 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams", 667 type="keyval", default={}, 668 help="OS parameters") 669 670 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant", 671 action="store_true", default=False, 672 help="Force an unknown variant") 673 674 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install", 675 action="store_true", default=False, 676 help="Do not install the OS (will" 677 " enable no-start)") 678 679 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams", 680 type="keyval", default={}, 681 help="Backend parameters") 682 683 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval", 684 default={}, dest="hvparams", 685 help="Hypervisor parameters") 686 687 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor", 688 help="Hypervisor and hypervisor options, in the" 689 " format hypervisor:option=value,option=value,...", 690 default=None, type="identkeyval") 691 692 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams", 693 help="Hypervisor and hypervisor options, in the" 694 " format hypervisor:option=value,option=value,...", 695 default=[], action="append", type="identkeyval") 696 697 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True, 698 action="store_false", 699 help="Don't check that the instance's IP" 700 " is alive") 701 702 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check", 703 default=True, action="store_false", 704 help="Don't check that the instance's name" 705 " is resolvable") 706 707 NET_OPT = cli_option("--net", 708 help="NIC parameters", default=[], 709 dest="nics", action="append", type="identkeyval") 710 711 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[], 712 dest="disks", action="append", type="identkeyval") 713 714 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None, 715 help="Comma-separated list of disks" 716 " indices to act on (e.g. 0,2) (optional," 717 " defaults to all disks)") 718 719 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size", 720 help="Enforces a single-disk configuration using the" 721 " given disk size, in MiB unless a suffix is used", 722 default=None, type="unit", metavar="<size>") 723 724 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency", 725 dest="ignore_consistency", 726 action="store_true", default=False, 727 help="Ignore the consistency of the disks on" 728 " the secondary") 729 730 NONLIVE_OPT = cli_option("--non-live", dest="live", 731 default=True, action="store_false", 732 help="Do a non-live migration (this usually means" 733 " freeze the instance, save the state, transfer and" 734 " only then resume running on the secondary node)") 735 736 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode", 737 default=None, 738 choices=list(constants.HT_MIGRATION_MODES), 739 help="Override default migration mode (choose" 740 " either live or non-live") 741 742 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node", 743 help="Target node and optional secondary node", 744 metavar="<pnode>[:<snode>]", 745 completion_suggest=OPT_COMPL_INST_ADD_NODES) 746 747 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[], 748 action="append", metavar="<node>", 749 help="Use only this node (can be used multiple" 750 " times, if not given defaults to all nodes)", 751 completion_suggest=OPT_COMPL_ONE_NODE) 752 753 NODEGROUP_OPT = cli_option("-g", "--node-group", 754 dest="nodegroup", 755 help="Node group (name or uuid)", 756 metavar="<nodegroup>", 757 default=None, type="string", 758 completion_suggest=OPT_COMPL_ONE_NODEGROUP) 759 760 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node", 761 metavar="<node>", 762 completion_suggest=OPT_COMPL_ONE_NODE) 763 764 NOSTART_OPT = cli_option("--no-start", dest="start", default=True, 765 action="store_false", 766 help="Don't start the instance after creation") 767 768 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command", 769 action="store_true", default=False, 770 help="Show command instead of executing it") 771 772 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup", 773 default=False, action="store_true", 774 help="Instead of performing the migration, try to" 775 " recover from a failed cleanup. This is safe" 776 " to run even if the instance is healthy, but it" 777 " will create extra replication traffic and " 778 " disrupt briefly the replication (like during the" 779 " migration") 780 781 STATIC_OPT = cli_option("-s", "--static", dest="static", 782 action="store_true", default=False, 783 help="Only show configuration data, not runtime data") 784 785 ALL_OPT = cli_option("--all", dest="show_all", 786 default=False, action="store_true", 787 help="Show info on all instances on the cluster." 788 " This can take a long time to run, use wisely") 789 790 SELECT_OS_OPT = cli_option("--select-os", dest="select_os", 791 action="store_true", default=False, 792 help="Interactive OS reinstall, lists available" 793 " OS templates for selection") 794 795 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures", 796 action="store_true", default=False, 797 help="Remove the instance from the cluster" 798 " configuration even if there are failures" 799 " during the removal process") 800 801 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures", 802 dest="ignore_remove_failures", 803 action="store_true", default=False, 804 help="Remove the instance from the" 805 " cluster configuration even if there" 806 " are failures during the removal" 807 " process") 808 809 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance", 810 action="store_true", default=False, 811 help="Remove the instance from the cluster") 812 813 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node", 814 help="Specifies the new secondary node", 815 metavar="NODE", default=None, 816 completion_suggest=OPT_COMPL_ONE_NODE) 817 818 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary", 819 default=False, action="store_true", 820 help="Replace the disk(s) on the primary" 821 " node (only for the drbd template)") 822 823 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary", 824 default=False, action="store_true", 825 help="Replace the disk(s) on the secondary" 826 " node (only for the drbd template)") 827 828 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote", 829 default=False, action="store_true", 830 help="Lock all nodes and auto-promote as needed" 831 " to MC status") 832 833 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto", 834 default=False, action="store_true", 835 help="Automatically replace faulty disks" 836 " (only for the drbd template)") 837 838 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size", 839 default=False, action="store_true", 840 help="Ignore current recorded size" 841 " (useful for forcing activation when" 842 " the recorded size is wrong)") 843 844 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node", 845 metavar="<node>", 846 completion_suggest=OPT_COMPL_ONE_NODE) 847 848 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory", 849 metavar="<dir>") 850 851 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip", 852 help="Specify the secondary ip for the node", 853 metavar="ADDRESS", default=None) 854 855 READD_OPT = cli_option("--readd", dest="readd", 856 default=False, action="store_true", 857 help="Readd old node after replacing it") 858 859 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check", 860 default=True, action="store_false", 861 help="Disable SSH key fingerprint checking") 862 863 864 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate", 865 type="bool", default=None, metavar=_YORNO, 866 help="Set the master_candidate flag on the node") 867 868 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO, 869 type="bool", default=None, 870 help="Set the offline flag on the node") 871 872 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO, 873 type="bool", default=None, 874 help="Set the drained flag on the node") 875 876 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable", 877 type="bool", default=None, metavar=_YORNO, 878 help="Set the master_capable flag on the node") 879 880 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable", 881 type="bool", default=None, metavar=_YORNO, 882 help="Set the vm_capable flag on the node") 883 884 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable", 885 type="bool", default=None, metavar=_YORNO, 886 help="Set the allocatable flag on a volume") 887 888 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage", 889 help="Disable support for lvm based instances" 890 " (cluster-wide)", 891 action="store_false", default=True) 892 893 ENABLED_HV_OPT = cli_option("--enabled-hypervisors", 894 dest="enabled_hypervisors", 895 help="Comma-separated list of hypervisors", 896 type="string", default=None) 897 898 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams", 899 type="keyval", default={}, 900 help="NIC parameters") 901 902 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None, 903 dest="candidate_pool_size", type="int", 904 help="Set the candidate pool size") 905 906 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name", 907 help="Enables LVM and specifies the volume group" 908 " name (cluster-wide) for disk allocation [xenvg]", 909 metavar="VG", default=None) 910 911 YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it", 912 help="Destroy cluster", action="store_true") 913 914 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting", 915 help="Skip node agreement check (dangerous)", 916 action="store_true", default=False) 917 918 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix", 919 help="Specify the mac prefix for the instance IP" 920 " addresses, in the format XX:XX:XX", 921 metavar="PREFIX", 922 default=None) 923 924 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev", 925 help="Specify the node interface (cluster-wide)" 926 " on which the master IP address will be added " 927 " [%s]" % constants.DEFAULT_BRIDGE, 928 metavar="NETDEV", 929 default=constants.DEFAULT_BRIDGE) 930 931 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir", 932 help="Specify the default directory (cluster-" 933 "wide) for storing the file-based disks [%s]" % 934 constants.DEFAULT_FILE_STORAGE_DIR, 935 metavar="DIR", 936 default=constants.DEFAULT_FILE_STORAGE_DIR) 937 938 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts", 939 help="Don't modify /etc/hosts", 940 action="store_false", default=True) 941 942 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup", 943 help="Don't initialize SSH keys", 944 action="store_false", default=True) 945 946 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes", 947 help="Enable parseable error messages", 948 action="store_true", default=False) 949 950 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem", 951 help="Skip N+1 memory redundancy tests", 952 action="store_true", default=False) 953 954 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type", 955 help="Type of reboot: soft/hard/full", 956 default=constants.INSTANCE_REBOOT_HARD, 957 metavar="<REBOOT>", 958 choices=list(constants.REBOOT_TYPES)) 959 960 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries", 961 dest="ignore_secondaries", 962 default=False, action="store_true", 963 help="Ignore errors from secondaries") 964 965 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown", 966 action="store_false", default=True, 967 help="Don't shutdown the instance (unsafe)") 968 969 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int", 970 default=constants.DEFAULT_SHUTDOWN_TIMEOUT, 971 help="Maximum time to wait") 972 973 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout", 974 dest="shutdown_timeout", type="int", 975 default=constants.DEFAULT_SHUTDOWN_TIMEOUT, 976 help="Maximum time to wait for instance shutdown") 977 978 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int", 979 default=None, 980 help=("Number of seconds between repetions of the" 981 " command")) 982 983 EARLY_RELEASE_OPT = cli_option("--early-release", 984 dest="early_release", default=False, 985 action="store_true", 986 help="Release the locks on the secondary" 987 " node(s) early") 988 989 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate", 990 dest="new_cluster_cert", 991 default=False, action="store_true", 992 help="Generate a new cluster certificate") 993 994 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert", 995 default=None, 996 help="File containing new RAPI certificate") 997 998 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert", 999 default=None, action="store_true", 1000 help=("Generate a new self-signed RAPI" 1001 " certificate")) 1002 1003 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key", 1004 dest="new_confd_hmac_key", 1005 default=False, action="store_true", 1006 help=("Create a new HMAC key for %s" % 1007 constants.CONFD)) 1008 1009 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret", 1010 dest="cluster_domain_secret", 1011 default=None, 1012 help=("Load new new cluster domain" 1013 " secret from file")) 1014 1015 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret", 1016 dest="new_cluster_domain_secret", 1017 default=False, action="store_true", 1018 help=("Create a new cluster domain" 1019 " secret")) 1020 1021 USE_REPL_NET_OPT = cli_option("--use-replication-network", 1022 dest="use_replication_network", 1023 help="Whether to use the replication network" 1024 " for talking to the nodes", 1025 action="store_true", default=False) 1026 1027 MAINTAIN_NODE_HEALTH_OPT = \ 1028 cli_option("--maintain-node-health", dest="maintain_node_health", 1029 metavar=_YORNO, default=None, type="bool", 1030 help="Configure the cluster to automatically maintain node" 1031 " health, by shutting down unknown instances, shutting down" 1032 " unknown DRBD devices, etc.") 1033 1034 IDENTIFY_DEFAULTS_OPT = \ 1035 cli_option("--identify-defaults", dest="identify_defaults", 1036 default=False, action="store_true", 1037 help="Identify which saved instance parameters are equal to" 1038 " the current cluster defaults and set them as such, instead" 1039 " of marking them as overridden") 1040 1041 UIDPOOL_OPT = cli_option("--uid-pool", default=None, 1042 action="store", dest="uid_pool", 1043 help=("A list of user-ids or user-id" 1044 " ranges separated by commas")) 1045 1046 ADD_UIDS_OPT = cli_option("--add-uids", default=None, 1047 action="store", dest="add_uids", 1048 help=("A list of user-ids or user-id" 1049 " ranges separated by commas, to be" 1050 " added to the user-id pool")) 1051 1052 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None, 1053 action="store", dest="remove_uids", 1054 help=("A list of user-ids or user-id" 1055 " ranges separated by commas, to be" 1056 " removed from the user-id pool")) 1057 1058 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None, 1059 action="store", dest="reserved_lvs", 1060 help=("A comma-separated list of reserved" 1061 " logical volumes names, that will be" 1062 " ignored by cluster verify")) 1063 1064 ROMAN_OPT = cli_option("--roman", 1065 dest="roman_integers", default=False, 1066 action="store_true", 1067 help="Use roman numbers for positive integers") 1068 1069 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper", 1070 action="store", default=None, 1071 help="Specifies usermode helper for DRBD") 1072 1073 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage", 1074 action="store_false", default=True, 1075 help="Disable support for DRBD") 1076 1077 PRIMARY_IP_VERSION_OPT = \ 1078 cli_option("--primary-ip-version", default=constants.IP4_VERSION, 1079 action="store", dest="primary_ip_version", 1080 metavar="%d|%d" % (constants.IP4_VERSION, 1081 constants.IP6_VERSION), 1082 help="Cluster-wide IP version for primary IP") 1083 1084 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority", 1085 metavar="|".join(name for name, _ in _PRIORITY_NAMES), 1086 choices=_PRIONAME_TO_VALUE.keys(), 1087 help="Priority for opcode processing") 1088 1089 HID_OS_OPT = cli_option("--hidden", dest="hidden", 1090 type="bool", default=None, metavar=_YORNO, 1091 help="Sets the hidden flag on the OS") 1092 1093 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted", 1094 type="bool", default=None, metavar=_YORNO, 1095 help="Sets the blacklisted flag on the OS") 1096 1097 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None, 1098 type="bool", metavar=_YORNO, 1099 dest="prealloc_wipe_disks", 1100 help=("Wipe disks prior to instance" 1101 " creation")) 1102 1103 1104 #: Options provided by all commands 1105 COMMON_OPTS = [DEBUG_OPT] 1106 1107 # common options for creating instances. add and import then add their own 1108 # specific ones. 1109 COMMON_CREATE_OPTS = [ 1110 BACKEND_OPT, 1111 DISK_OPT, 1112 DISK_TEMPLATE_OPT, 1113 FILESTORE_DIR_OPT, 1114 FILESTORE_DRIVER_OPT, 1115 HYPERVISOR_OPT, 1116 IALLOCATOR_OPT, 1117 NET_OPT, 1118 NODE_PLACEMENT_OPT, 1119 NOIPCHECK_OPT, 1120 NONAMECHECK_OPT, 1121 NONICS_OPT, 1122 NWSYNC_OPT, 1123 OSPARAMS_OPT, 1124 OS_SIZE_OPT, 1125 SUBMIT_OPT, 1126 DRY_RUN_OPT, 1127 PRIORITY_OPT, 1128 ] 1129 1130
1131 -def _ParseArgs(argv, commands, aliases):
1132 """Parser for the command line arguments. 1133 1134 This function parses the arguments and returns the function which 1135 must be executed together with its (modified) arguments. 1136 1137 @param argv: the command line 1138 @param commands: dictionary with special contents, see the design 1139 doc for cmdline handling 1140 @param aliases: dictionary with command aliases {'alias': 'target, ...} 1141 1142 """ 1143 if len(argv) == 0: 1144 binary = "<command>" 1145 else: 1146 binary = argv[0].split("/")[-1] 1147 1148 if len(argv) > 1 and argv[1] == "--version": 1149 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION, 1150 constants.RELEASE_VERSION) 1151 # Quit right away. That way we don't have to care about this special 1152 # argument. optparse.py does it the same. 1153 sys.exit(0) 1154 1155 if len(argv) < 2 or not (argv[1] in commands or 1156 argv[1] in aliases): 1157 # let's do a nice thing 1158 sortedcmds = commands.keys() 1159 sortedcmds.sort() 1160 1161 ToStdout("Usage: %s {command} [options...] [argument...]", binary) 1162 ToStdout("%s <command> --help to see details, or man %s", binary, binary) 1163 ToStdout("") 1164 1165 # compute the max line length for cmd + usage 1166 mlen = max([len(" %s" % cmd) for cmd in commands]) 1167 mlen = min(60, mlen) # should not get here... 1168 1169 # and format a nice command list 1170 ToStdout("Commands:") 1171 for cmd in sortedcmds: 1172 cmdstr = " %s" % (cmd,) 1173 help_text = commands[cmd][4] 1174 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen) 1175 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0)) 1176 for line in help_lines: 1177 ToStdout("%-*s %s", mlen, "", line) 1178 1179 ToStdout("") 1180 1181 return None, None, None 1182 1183 # get command, unalias it, and look it up in commands 1184 cmd = argv.pop(1) 1185 if cmd in aliases: 1186 if cmd in commands: 1187 raise errors.ProgrammerError("Alias '%s' overrides an existing" 1188 " command" % cmd) 1189 1190 if aliases[cmd] not in commands: 1191 raise errors.ProgrammerError("Alias '%s' maps to non-existing" 1192 " command '%s'" % (cmd, aliases[cmd])) 1193 1194 cmd = aliases[cmd] 1195 1196 func, args_def, parser_opts, usage, description = commands[cmd] 1197 parser = OptionParser(option_list=parser_opts + COMMON_OPTS, 1198 description=description, 1199 formatter=TitledHelpFormatter(), 1200 usage="%%prog %s %s" % (cmd, usage)) 1201 parser.disable_interspersed_args() 1202 options, args = parser.parse_args() 1203 1204 if not _CheckArguments(cmd, args_def, args): 1205 return None, None, None 1206 1207 return func, options, args
1208 1209
1210 -def _CheckArguments(cmd, args_def, args):
1211 """Verifies the arguments using the argument definition. 1212 1213 Algorithm: 1214 1215 1. Abort with error if values specified by user but none expected. 1216 1217 1. For each argument in definition 1218 1219 1. Keep running count of minimum number of values (min_count) 1220 1. Keep running count of maximum number of values (max_count) 1221 1. If it has an unlimited number of values 1222 1223 1. Abort with error if it's not the last argument in the definition 1224 1225 1. If last argument has limited number of values 1226 1227 1. Abort with error if number of values doesn't match or is too large 1228 1229 1. Abort with error if user didn't pass enough values (min_count) 1230 1231 """ 1232 if args and not args_def: 1233 ToStderr("Error: Command %s expects no arguments", cmd) 1234 return False 1235 1236 min_count = None 1237 max_count = None 1238 check_max = None 1239 1240 last_idx = len(args_def) - 1 1241 1242 for idx, arg in enumerate(args_def): 1243 if min_count is None: 1244 min_count = arg.min 1245 elif arg.min is not None: 1246 min_count += arg.min 1247 1248 if max_count is None: 1249 max_count = arg.max 1250 elif arg.max is not None: 1251 max_count += arg.max 1252 1253 if idx == last_idx: 1254 check_max = (arg.max is not None) 1255 1256 elif arg.max is None: 1257 raise errors.ProgrammerError("Only the last argument can have max=None") 1258 1259 if check_max: 1260 # Command with exact number of arguments 1261 if (min_count is not None and max_count is not None and 1262 min_count == max_count and len(args) != min_count): 1263 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count) 1264 return False 1265 1266 # Command with limited number of arguments 1267 if max_count is not None and len(args) > max_count: 1268 ToStderr("Error: Command %s expects only %d argument(s)", 1269 cmd, max_count) 1270 return False 1271 1272 # Command with some required arguments 1273 if min_count is not None and len(args) < min_count: 1274 ToStderr("Error: Command %s expects at least %d argument(s)", 1275 cmd, min_count) 1276 return False 1277 1278 return True
1279 1280
1281 -def SplitNodeOption(value):
1282 """Splits the value of a --node option. 1283 1284 """ 1285 if value and ':' in value: 1286 return value.split(':', 1) 1287 else: 1288 return (value, None)
1289 1290
1291 -def CalculateOSNames(os_name, os_variants):
1292 """Calculates all the names an OS can be called, according to its variants. 1293 1294 @type os_name: string 1295 @param os_name: base name of the os 1296 @type os_variants: list or None 1297 @param os_variants: list of supported variants 1298 @rtype: list 1299 @return: list of valid names 1300 1301 """ 1302 if os_variants: 1303 return ['%s+%s' % (os_name, v) for v in os_variants] 1304 else: 1305 return [os_name]
1306 1307
1308 -def ParseFields(selected, default):
1309 """Parses the values of "--field"-like options. 1310 1311 @type selected: string or None 1312 @param selected: User-selected options 1313 @type default: list 1314 @param default: Default fields 1315 1316 """ 1317 if selected is None: 1318 return default 1319 1320 if selected.startswith("+"): 1321 return default + selected[1:].split(",") 1322 1323 return selected.split(",")
1324 1325 1326 UsesRPC = rpc.RunWithRPC 1327 1328
1329 -def AskUser(text, choices=None):
1330 """Ask the user a question. 1331 1332 @param text: the question to ask 1333 1334 @param choices: list with elements tuples (input_char, return_value, 1335 description); if not given, it will default to: [('y', True, 1336 'Perform the operation'), ('n', False, 'Do no do the operation')]; 1337 note that the '?' char is reserved for help 1338 1339 @return: one of the return values from the choices list; if input is 1340 not possible (i.e. not running with a tty, we return the last 1341 entry from the list 1342 1343 """ 1344 if choices is None: 1345 choices = [('y', True, 'Perform the operation'), 1346 ('n', False, 'Do not perform the operation')] 1347 if not choices or not isinstance(choices, list): 1348 raise errors.ProgrammerError("Invalid choices argument to AskUser") 1349 for entry in choices: 1350 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?': 1351 raise errors.ProgrammerError("Invalid choices element to AskUser") 1352 1353 answer = choices[-1][1] 1354 new_text = [] 1355 for line in text.splitlines(): 1356 new_text.append(textwrap.fill(line, 70, replace_whitespace=False)) 1357 text = "\n".join(new_text) 1358 try: 1359 f = file("/dev/tty", "a+") 1360 except IOError: 1361 return answer 1362 try: 1363 chars = [entry[0] for entry in choices] 1364 chars[-1] = "[%s]" % chars[-1] 1365 chars.append('?') 1366 maps = dict([(entry[0], entry[1]) for entry in choices]) 1367 while True: 1368 f.write(text) 1369 f.write('\n') 1370 f.write("/".join(chars)) 1371 f.write(": ") 1372 line = f.readline(2).strip().lower() 1373 if line in maps: 1374 answer = maps[line] 1375 break 1376 elif line == '?': 1377 for entry in choices: 1378 f.write(" %s - %s\n" % (entry[0], entry[2])) 1379 f.write("\n") 1380 continue 1381 finally: 1382 f.close() 1383 return answer
1384 1385
1386 -class JobSubmittedException(Exception):
1387 """Job was submitted, client should exit. 1388 1389 This exception has one argument, the ID of the job that was 1390 submitted. The handler should print this ID. 1391 1392 This is not an error, just a structured way to exit from clients. 1393 1394 """
1395 1396
1397 -def SendJob(ops, cl=None):
1398 """Function to submit an opcode without waiting for the results. 1399 1400 @type ops: list 1401 @param ops: list of opcodes 1402 @type cl: luxi.Client 1403 @param cl: the luxi client to use for communicating with the master; 1404 if None, a new client will be created 1405 1406 """ 1407 if cl is None: 1408 cl = GetClient() 1409 1410 job_id = cl.SubmitJob(ops) 1411 1412 return job_id
1413 1414
1415 -def GenericPollJob(job_id, cbs, report_cbs):
1416 """Generic job-polling function. 1417 1418 @type job_id: number 1419 @param job_id: Job ID 1420 @type cbs: Instance of L{JobPollCbBase} 1421 @param cbs: Data callbacks 1422 @type report_cbs: Instance of L{JobPollReportCbBase} 1423 @param report_cbs: Reporting callbacks 1424 1425 """ 1426 prev_job_info = None 1427 prev_logmsg_serial = None 1428 1429 status = None 1430 1431 while True: 1432 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info, 1433 prev_logmsg_serial) 1434 if not result: 1435 # job not found, go away! 1436 raise errors.JobLost("Job with id %s lost" % job_id) 1437 1438 if result == constants.JOB_NOTCHANGED: 1439 report_cbs.ReportNotChanged(job_id, status) 1440 1441 # Wait again 1442 continue 1443 1444 # Split result, a tuple of (field values, log entries) 1445 (job_info, log_entries) = result 1446 (status, ) = job_info 1447 1448 if log_entries: 1449 for log_entry in log_entries: 1450 (serial, timestamp, log_type, message) = log_entry 1451 report_cbs.ReportLogMessage(job_id, serial, timestamp, 1452 log_type, message) 1453 prev_logmsg_serial = max(prev_logmsg_serial, serial) 1454 1455 # TODO: Handle canceled and archived jobs 1456 elif status in (constants.JOB_STATUS_SUCCESS, 1457 constants.JOB_STATUS_ERROR, 1458 constants.JOB_STATUS_CANCELING, 1459 constants.JOB_STATUS_CANCELED): 1460 break 1461 1462 prev_job_info = job_info 1463 1464 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"]) 1465 if not jobs: 1466 raise errors.JobLost("Job with id %s lost" % job_id) 1467 1468 status, opstatus, result = jobs[0] 1469 1470 if status == constants.JOB_STATUS_SUCCESS: 1471 return result 1472 1473 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED): 1474 raise errors.OpExecError("Job was canceled") 1475 1476 has_ok = False 1477 for idx, (status, msg) in enumerate(zip(opstatus, result)): 1478 if status == constants.OP_STATUS_SUCCESS: 1479 has_ok = True 1480 elif status == constants.OP_STATUS_ERROR: 1481 errors.MaybeRaise(msg) 1482 1483 if has_ok: 1484 raise errors.OpExecError("partial failure (opcode %d): %s" % 1485 (idx, msg)) 1486 1487 raise errors.OpExecError(str(msg)) 1488 1489 # default failure mode 1490 raise errors.OpExecError(result)
1491 1492
1493 -class JobPollCbBase:
1494 """Base class for L{GenericPollJob} callbacks. 1495 1496 """
1497 - def __init__(self):
1498 """Initializes this class. 1499 1500 """
1501
1502 - def WaitForJobChangeOnce(self, job_id, fields, 1503 prev_job_info, prev_log_serial):
1504 """Waits for changes on a job. 1505 1506 """ 1507 raise NotImplementedError()
1508
1509 - def QueryJobs(self, job_ids, fields):
1510 """Returns the selected fields for the selected job IDs. 1511 1512 @type job_ids: list of numbers 1513 @param job_ids: Job IDs 1514 @type fields: list of strings 1515 @param fields: Fields 1516 1517 """ 1518 raise NotImplementedError()
1519 1520
1521 -class JobPollReportCbBase:
1522 """Base class for L{GenericPollJob} reporting callbacks. 1523 1524 """
1525 - def __init__(self):
1526 """Initializes this class. 1527 1528 """
1529
1530 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1531 """Handles a log message. 1532 1533 """ 1534 raise NotImplementedError()
1535
1536 - def ReportNotChanged(self, job_id, status):
1537 """Called for if a job hasn't changed in a while. 1538 1539 @type job_id: number 1540 @param job_id: Job ID 1541 @type status: string or None 1542 @param status: Job status if available 1543 1544 """ 1545 raise NotImplementedError()
1546 1547
1548 -class _LuxiJobPollCb(JobPollCbBase):
1549 - def __init__(self, cl):
1550 """Initializes this class. 1551 1552 """ 1553 JobPollCbBase.__init__(self) 1554 self.cl = cl
1555
1556 - def WaitForJobChangeOnce(self, job_id, fields, 1557 prev_job_info, prev_log_serial):
1558 """Waits for changes on a job. 1559 1560 """ 1561 return self.cl.WaitForJobChangeOnce(job_id, fields, 1562 prev_job_info, prev_log_serial)
1563
1564 - def QueryJobs(self, job_ids, fields):
1565 """Returns the selected fields for the selected job IDs. 1566 1567 """ 1568 return self.cl.QueryJobs(job_ids, fields)
1569 1570
1571 -class FeedbackFnJobPollReportCb(JobPollReportCbBase):
1572 - def __init__(self, feedback_fn):
1573 """Initializes this class. 1574 1575 """ 1576 JobPollReportCbBase.__init__(self) 1577 1578 self.feedback_fn = feedback_fn 1579 1580 assert callable(feedback_fn)
1581
1582 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1583 """Handles a log message. 1584 1585 """ 1586 self.feedback_fn((timestamp, log_type, log_msg))
1587
1588 - def ReportNotChanged(self, job_id, status):
1589 """Called if a job hasn't changed in a while. 1590 1591 """
1592 # Ignore 1593 1594
1595 -class StdioJobPollReportCb(JobPollReportCbBase):
1596 - def __init__(self):
1597 """Initializes this class. 1598 1599 """ 1600 JobPollReportCbBase.__init__(self) 1601 1602 self.notified_queued = False 1603 self.notified_waitlock = False
1604
1605 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
1606 """Handles a log message. 1607 1608 """ 1609 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)), 1610 FormatLogMessage(log_type, log_msg))
1611
1612 - def ReportNotChanged(self, job_id, status):
1613 """Called if a job hasn't changed in a while. 1614 1615 """ 1616 if status is None: 1617 return 1618 1619 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued: 1620 ToStderr("Job %s is waiting in queue", job_id) 1621 self.notified_queued = True 1622 1623 elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock: 1624 ToStderr("Job %s is trying to acquire all necessary locks", job_id) 1625 self.notified_waitlock = True
1626 1627
1628 -def FormatLogMessage(log_type, log_msg):
1629 """Formats a job message according to its type. 1630 1631 """ 1632 if log_type != constants.ELOG_MESSAGE: 1633 log_msg = str(log_msg) 1634 1635 return utils.SafeEncode(log_msg)
1636 1637
1638 -def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1639 """Function to poll for the result of a job. 1640 1641 @type job_id: job identified 1642 @param job_id: the job to poll for results 1643 @type cl: luxi.Client 1644 @param cl: the luxi client to use for communicating with the master; 1645 if None, a new client will be created 1646 1647 """ 1648 if cl is None: 1649 cl = GetClient() 1650 1651 if reporter is None: 1652 if feedback_fn: 1653 reporter = FeedbackFnJobPollReportCb(feedback_fn) 1654 else: 1655 reporter = StdioJobPollReportCb() 1656 elif feedback_fn: 1657 raise errors.ProgrammerError("Can't specify reporter and feedback function") 1658 1659 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1660 1661
1662 -def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1663 """Legacy function to submit an opcode. 1664 1665 This is just a simple wrapper over the construction of the processor 1666 instance. It should be extended to better handle feedback and 1667 interaction functions. 1668 1669 """ 1670 if cl is None: 1671 cl = GetClient() 1672 1673 SetGenericOpcodeOpts([op], opts) 1674 1675 job_id = SendJob([op], cl=cl) 1676 1677 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn, 1678 reporter=reporter) 1679 1680 return op_results[0]
1681 1682
1683 -def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1684 """Wrapper around SubmitOpCode or SendJob. 1685 1686 This function will decide, based on the 'opts' parameter, whether to 1687 submit and wait for the result of the opcode (and return it), or 1688 whether to just send the job and print its identifier. It is used in 1689 order to simplify the implementation of the '--submit' option. 1690 1691 It will also process the opcodes if we're sending the via SendJob 1692 (otherwise SubmitOpCode does it). 1693 1694 """ 1695 if opts and opts.submit_only: 1696 job = [op] 1697 SetGenericOpcodeOpts(job, opts) 1698 job_id = SendJob(job, cl=cl) 1699 raise JobSubmittedException(job_id) 1700 else: 1701 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1702 1703
1704 -def SetGenericOpcodeOpts(opcode_list, options):
1705 """Processor for generic options. 1706 1707 This function updates the given opcodes based on generic command 1708 line options (like debug, dry-run, etc.). 1709 1710 @param opcode_list: list of opcodes 1711 @param options: command line options or None 1712 @return: None (in-place modification) 1713 1714 """ 1715 if not options: 1716 return 1717 for op in opcode_list: 1718 op.debug_level = options.debug 1719 if hasattr(options, "dry_run"): 1720 op.dry_run = options.dry_run 1721 if getattr(options, "priority", None) is not None: 1722 op.priority = _PRIONAME_TO_VALUE[options.priority]
1723 1724
1725 -def GetClient():
1726 # TODO: Cache object? 1727 try: 1728 client = luxi.Client() 1729 except luxi.NoMasterError: 1730 ss = ssconf.SimpleStore() 1731 1732 # Try to read ssconf file 1733 try: 1734 ss.GetMasterNode() 1735 except errors.ConfigurationError: 1736 raise errors.OpPrereqError("Cluster not initialized or this machine is" 1737 " not part of a cluster") 1738 1739 master, myself = ssconf.GetMasterAndMyself(ss=ss) 1740 if master != myself: 1741 raise errors.OpPrereqError("This is not the master node, please connect" 1742 " to node '%s' and rerun the command" % 1743 master) 1744 raise 1745 return client
1746 1747
1748 -def FormatError(err):
1749 """Return a formatted error message for a given error. 1750 1751 This function takes an exception instance and returns a tuple 1752 consisting of two values: first, the recommended exit code, and 1753 second, a string describing the error message (not 1754 newline-terminated). 1755 1756 """ 1757 retcode = 1 1758 obuf = StringIO() 1759 msg = str(err) 1760 if isinstance(err, errors.ConfigurationError): 1761 txt = "Corrupt configuration file: %s" % msg 1762 logging.error(txt) 1763 obuf.write(txt + "\n") 1764 obuf.write("Aborting.") 1765 retcode = 2 1766 elif isinstance(err, errors.HooksAbort): 1767 obuf.write("Failure: hooks execution failed:\n") 1768 for node, script, out in err.args[0]: 1769 if out: 1770 obuf.write(" node: %s, script: %s, output: %s\n" % 1771 (node, script, out)) 1772 else: 1773 obuf.write(" node: %s, script: %s (no output)\n" % 1774 (node, script)) 1775 elif isinstance(err, errors.HooksFailure): 1776 obuf.write("Failure: hooks general failure: %s" % msg) 1777 elif isinstance(err, errors.ResolverError): 1778 this_host = netutils.Hostname.GetSysName() 1779 if err.args[0] == this_host: 1780 msg = "Failure: can't resolve my own hostname ('%s')" 1781 else: 1782 msg = "Failure: can't resolve hostname '%s'" 1783 obuf.write(msg % err.args[0]) 1784 elif isinstance(err, errors.OpPrereqError): 1785 if len(err.args) == 2: 1786 obuf.write("Failure: prerequisites not met for this" 1787 " operation:\nerror type: %s, error details:\n%s" % 1788 (err.args[1], err.args[0])) 1789 else: 1790 obuf.write("Failure: prerequisites not met for this" 1791 " operation:\n%s" % msg) 1792 elif isinstance(err, errors.OpExecError): 1793 obuf.write("Failure: command execution error:\n%s" % msg) 1794 elif isinstance(err, errors.TagError): 1795 obuf.write("Failure: invalid tag(s) given:\n%s" % msg) 1796 elif isinstance(err, errors.JobQueueDrainError): 1797 obuf.write("Failure: the job queue is marked for drain and doesn't" 1798 " accept new requests\n") 1799 elif isinstance(err, errors.JobQueueFull): 1800 obuf.write("Failure: the job queue is full and doesn't accept new" 1801 " job submissions until old jobs are archived\n") 1802 elif isinstance(err, errors.TypeEnforcementError): 1803 obuf.write("Parameter Error: %s" % msg) 1804 elif isinstance(err, errors.ParameterError): 1805 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg) 1806 elif isinstance(err, luxi.NoMasterError): 1807 obuf.write("Cannot communicate with the master daemon.\nIs it running" 1808 " and listening for connections?") 1809 elif isinstance(err, luxi.TimeoutError): 1810 obuf.write("Timeout while talking to the master daemon. Error:\n" 1811 "%s" % msg) 1812 elif isinstance(err, luxi.PermissionError): 1813 obuf.write("It seems you don't have permissions to connect to the" 1814 " master daemon.\nPlease retry as a different user.") 1815 elif isinstance(err, luxi.ProtocolError): 1816 obuf.write("Unhandled protocol error while talking to the master daemon:\n" 1817 "%s" % msg) 1818 elif isinstance(err, errors.JobLost): 1819 obuf.write("Error checking job status: %s" % msg) 1820 elif isinstance(err, errors.GenericError): 1821 obuf.write("Unhandled Ganeti error: %s" % msg) 1822 elif isinstance(err, JobSubmittedException): 1823 obuf.write("JobID: %s\n" % err.args[0]) 1824 retcode = 0 1825 else: 1826 obuf.write("Unhandled exception: %s" % msg) 1827 return retcode, obuf.getvalue().rstrip('\n')
1828 1829
1830 -def GenericMain(commands, override=None, aliases=None):
1831 """Generic main function for all the gnt-* commands. 1832 1833 Arguments: 1834 - commands: a dictionary with a special structure, see the design doc 1835 for command line handling. 1836 - override: if not None, we expect a dictionary with keys that will 1837 override command line options; this can be used to pass 1838 options from the scripts to generic functions 1839 - aliases: dictionary with command aliases {'alias': 'target, ...} 1840 1841 """ 1842 # save the program name and the entire command line for later logging 1843 if sys.argv: 1844 binary = os.path.basename(sys.argv[0]) or sys.argv[0] 1845 if len(sys.argv) >= 2: 1846 binary += " " + sys.argv[1] 1847 old_cmdline = " ".join(sys.argv[2:]) 1848 else: 1849 old_cmdline = "" 1850 else: 1851 binary = "<unknown program>" 1852 old_cmdline = "" 1853 1854 if aliases is None: 1855 aliases = {} 1856 1857 try: 1858 func, options, args = _ParseArgs(sys.argv, commands, aliases) 1859 except errors.ParameterError, err: 1860 result, err_msg = FormatError(err) 1861 ToStderr(err_msg) 1862 return 1 1863 1864 if func is None: # parse error 1865 return 1 1866 1867 if override is not None: 1868 for key, val in override.iteritems(): 1869 setattr(options, key, val) 1870 1871 utils.SetupLogging(constants.LOG_COMMANDS, debug=options.debug, 1872 stderr_logging=True, program=binary) 1873 1874 if old_cmdline: 1875 logging.info("run with arguments '%s'", old_cmdline) 1876 else: 1877 logging.info("run with no arguments") 1878 1879 try: 1880 result = func(options, args) 1881 except (errors.GenericError, luxi.ProtocolError, 1882 JobSubmittedException), err: 1883 result, err_msg = FormatError(err) 1884 logging.exception("Error during command processing") 1885 ToStderr(err_msg) 1886 1887 return result
1888 1889
1890 -def ParseNicOption(optvalue):
1891 """Parses the value of the --net option(s). 1892 1893 """ 1894 try: 1895 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue) 1896 except (TypeError, ValueError), err: 1897 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err)) 1898 1899 nics = [{}] * nic_max 1900 for nidx, ndict in optvalue: 1901 nidx = int(nidx) 1902 1903 if not isinstance(ndict, dict): 1904 raise errors.OpPrereqError("Invalid nic/%d value: expected dict," 1905 " got %s" % (nidx, ndict)) 1906 1907 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES) 1908 1909 nics[nidx] = ndict 1910 1911 return nics
1912 1913
1914 -def GenericInstanceCreate(mode, opts, args):
1915 """Add an instance to the cluster via either creation or import. 1916 1917 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT 1918 @param opts: the command line options selected by the user 1919 @type args: list 1920 @param args: should contain only one element, the new instance name 1921 @rtype: int 1922 @return: the desired exit code 1923 1924 """ 1925 instance = args[0] 1926 1927 (pnode, snode) = SplitNodeOption(opts.node) 1928 1929 hypervisor = None 1930 hvparams = {} 1931 if opts.hypervisor: 1932 hypervisor, hvparams = opts.hypervisor 1933 1934 if opts.nics: 1935 nics = ParseNicOption(opts.nics) 1936 elif opts.no_nics: 1937 # no nics 1938 nics = [] 1939 elif mode == constants.INSTANCE_CREATE: 1940 # default of one nic, all auto 1941 nics = [{}] 1942 else: 1943 # mode == import 1944 nics = [] 1945 1946 if opts.disk_template == constants.DT_DISKLESS: 1947 if opts.disks or opts.sd_size is not None: 1948 raise errors.OpPrereqError("Diskless instance but disk" 1949 " information passed") 1950 disks = [] 1951 else: 1952 if (not opts.disks and not opts.sd_size 1953 and mode == constants.INSTANCE_CREATE): 1954 raise errors.OpPrereqError("No disk information specified") 1955 if opts.disks and opts.sd_size is not None: 1956 raise errors.OpPrereqError("Please use either the '--disk' or" 1957 " '-s' option") 1958 if opts.sd_size is not None: 1959 opts.disks = [(0, {"size": opts.sd_size})] 1960 1961 if opts.disks: 1962 try: 1963 disk_max = max(int(didx[0]) + 1 for didx in opts.disks) 1964 except ValueError, err: 1965 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err)) 1966 disks = [{}] * disk_max 1967 else: 1968 disks = [] 1969 for didx, ddict in opts.disks: 1970 didx = int(didx) 1971 if not isinstance(ddict, dict): 1972 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict) 1973 raise errors.OpPrereqError(msg) 1974 elif "size" in ddict: 1975 if "adopt" in ddict: 1976 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed" 1977 " (disk %d)" % didx) 1978 try: 1979 ddict["size"] = utils.ParseUnit(ddict["size"]) 1980 except ValueError, err: 1981 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" % 1982 (didx, err)) 1983 elif "adopt" in ddict: 1984 if mode == constants.INSTANCE_IMPORT: 1985 raise errors.OpPrereqError("Disk adoption not allowed for instance" 1986 " import") 1987 ddict["size"] = 0 1988 else: 1989 raise errors.OpPrereqError("Missing size or adoption source for" 1990 " disk %d" % didx) 1991 disks[didx] = ddict 1992 1993 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES) 1994 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES) 1995 1996 if mode == constants.INSTANCE_CREATE: 1997 start = opts.start 1998 os_type = opts.os 1999 force_variant = opts.force_variant 2000 src_node = None 2001 src_path = None 2002 no_install = opts.no_install 2003 identify_defaults = False 2004 elif mode == constants.INSTANCE_IMPORT: 2005 start = False 2006 os_type = None 2007 force_variant = False 2008 src_node = opts.src_node 2009 src_path = opts.src_dir 2010 no_install = None 2011 identify_defaults = opts.identify_defaults 2012 else: 2013 raise errors.ProgrammerError("Invalid creation mode %s" % mode) 2014 2015 op = opcodes.OpCreateInstance(instance_name=instance, 2016 disks=disks, 2017 disk_template=opts.disk_template, 2018 nics=nics, 2019 pnode=pnode, snode=snode, 2020 ip_check=opts.ip_check, 2021 name_check=opts.name_check, 2022 wait_for_sync=opts.wait_for_sync, 2023 file_storage_dir=opts.file_storage_dir, 2024 file_driver=opts.file_driver, 2025 iallocator=opts.iallocator, 2026 hypervisor=hypervisor, 2027 hvparams=hvparams, 2028 beparams=opts.beparams, 2029 osparams=opts.osparams, 2030 mode=mode, 2031 start=start, 2032 os_type=os_type, 2033 force_variant=force_variant, 2034 src_node=src_node, 2035 src_path=src_path, 2036 no_install=no_install, 2037 identify_defaults=identify_defaults) 2038 2039 SubmitOrSend(op, opts) 2040 return 0
2041 2042
2043 -class _RunWhileClusterStoppedHelper:
2044 """Helper class for L{RunWhileClusterStopped} to simplify state management 2045 2046 """
2047 - def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2048 """Initializes this class. 2049 2050 @type feedback_fn: callable 2051 @param feedback_fn: Feedback function 2052 @type cluster_name: string 2053 @param cluster_name: Cluster name 2054 @type master_node: string 2055 @param master_node Master node name 2056 @type online_nodes: list 2057 @param online_nodes: List of names of online nodes 2058 2059 """ 2060 self.feedback_fn = feedback_fn 2061 self.cluster_name = cluster_name 2062 self.master_node = master_node 2063 self.online_nodes = online_nodes 2064 2065 self.ssh = ssh.SshRunner(self.cluster_name) 2066 2067 self.nonmaster_nodes = [name for name in online_nodes 2068 if name != master_node] 2069 2070 assert self.master_node not in self.nonmaster_nodes
2071
2072 - def _RunCmd(self, node_name, cmd):
2073 """Runs a command on the local or a remote machine. 2074 2075 @type node_name: string 2076 @param node_name: Machine name 2077 @type cmd: list 2078 @param cmd: Command 2079 2080 """ 2081 if node_name is None or node_name == self.master_node: 2082 # No need to use SSH 2083 result = utils.RunCmd(cmd) 2084 else: 2085 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd)) 2086 2087 if result.failed: 2088 errmsg = ["Failed to run command %s" % result.cmd] 2089 if node_name: 2090 errmsg.append("on node %s" % node_name) 2091 errmsg.append(": exitcode %s and error %s" % 2092 (result.exit_code, result.output)) 2093 raise errors.OpExecError(" ".join(errmsg))
2094
2095 - def Call(self, fn, *args):
2096 """Call function while all daemons are stopped. 2097 2098 @type fn: callable 2099 @param fn: Function to be called 2100 2101 """ 2102 # Pause watcher by acquiring an exclusive lock on watcher state file 2103 self.feedback_fn("Blocking watcher") 2104 watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE) 2105 try: 2106 # TODO: Currently, this just blocks. There's no timeout. 2107 # TODO: Should it be a shared lock? 2108 watcher_block.Exclusive(blocking=True) 2109 2110 # Stop master daemons, so that no new jobs can come in and all running 2111 # ones are finished 2112 self.feedback_fn("Stopping master daemons") 2113 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"]) 2114 try: 2115 # Stop daemons on all nodes 2116 for node_name in self.online_nodes: 2117 self.feedback_fn("Stopping daemons on %s" % node_name) 2118 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"]) 2119 2120 # All daemons are shut down now 2121 try: 2122 return fn(self, *args) 2123 except Exception, err: 2124 _, errmsg = FormatError(err) 2125 logging.exception("Caught exception") 2126 self.feedback_fn(errmsg) 2127 raise 2128 finally: 2129 # Start cluster again, master node last 2130 for node_name in self.nonmaster_nodes + [self.master_node]: 2131 self.feedback_fn("Starting daemons on %s" % node_name) 2132 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"]) 2133 finally: 2134 # Resume watcher 2135 watcher_block.Close()
2136 2137
2138 -def RunWhileClusterStopped(feedback_fn, fn, *args):
2139 """Calls a function while all cluster daemons are stopped. 2140 2141 @type feedback_fn: callable 2142 @param feedback_fn: Feedback function 2143 @type fn: callable 2144 @param fn: Function to be called when daemons are stopped 2145 2146 """ 2147 feedback_fn("Gathering cluster information") 2148 2149 # This ensures we're running on the master daemon 2150 cl = GetClient() 2151 2152 (cluster_name, master_node) = \ 2153 cl.QueryConfigValues(["cluster_name", "master_node"]) 2154 2155 online_nodes = GetOnlineNodes([], cl=cl) 2156 2157 # Don't keep a reference to the client. The master daemon will go away. 2158 del cl 2159 2160 assert master_node in online_nodes 2161 2162 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node, 2163 online_nodes).Call(fn, *args)
2164 2165
2166 -def GenerateTable(headers, fields, separator, data, 2167 numfields=None, unitfields=None, 2168 units=None):
2169 """Prints a table with headers and different fields. 2170 2171 @type headers: dict 2172 @param headers: dictionary mapping field names to headers for 2173 the table 2174 @type fields: list 2175 @param fields: the field names corresponding to each row in 2176 the data field 2177 @param separator: the separator to be used; if this is None, 2178 the default 'smart' algorithm is used which computes optimal 2179 field width, otherwise just the separator is used between 2180 each field 2181 @type data: list 2182 @param data: a list of lists, each sublist being one row to be output 2183 @type numfields: list 2184 @param numfields: a list with the fields that hold numeric 2185 values and thus should be right-aligned 2186 @type unitfields: list 2187 @param unitfields: a list with the fields that hold numeric 2188 values that should be formatted with the units field 2189 @type units: string or None 2190 @param units: the units we should use for formatting, or None for 2191 automatic choice (human-readable for non-separator usage, otherwise 2192 megabytes); this is a one-letter string 2193 2194 """ 2195 if units is None: 2196 if separator: 2197 units = "m" 2198 else: 2199 units = "h" 2200 2201 if numfields is None: 2202 numfields = [] 2203 if unitfields is None: 2204 unitfields = [] 2205 2206 numfields = utils.FieldSet(*numfields) # pylint: disable-msg=W0142 2207 unitfields = utils.FieldSet(*unitfields) # pylint: disable-msg=W0142 2208 2209 format_fields = [] 2210 for field in fields: 2211 if headers and field not in headers: 2212 # TODO: handle better unknown fields (either revert to old 2213 # style of raising exception, or deal more intelligently with 2214 # variable fields) 2215 headers[field] = field 2216 if separator is not None: 2217 format_fields.append("%s") 2218 elif numfields.Matches(field): 2219 format_fields.append("%*s") 2220 else: 2221 format_fields.append("%-*s") 2222 2223 if separator is None: 2224 mlens = [0 for name in fields] 2225 format_str = ' '.join(format_fields) 2226 else: 2227 format_str = separator.replace("%", "%%").join(format_fields) 2228 2229 for row in data: 2230 if row is None: 2231 continue 2232 for idx, val in enumerate(row): 2233 if unitfields.Matches(fields[idx]): 2234 try: 2235 val = int(val) 2236 except (TypeError, ValueError): 2237 pass 2238 else: 2239 val = row[idx] = utils.FormatUnit(val, units) 2240 val = row[idx] = str(val) 2241 if separator is None: 2242 mlens[idx] = max(mlens[idx], len(val)) 2243 2244 result = [] 2245 if headers: 2246 args = [] 2247 for idx, name in enumerate(fields): 2248 hdr = headers[name] 2249 if separator is None: 2250 mlens[idx] = max(mlens[idx], len(hdr)) 2251 args.append(mlens[idx]) 2252 args.append(hdr) 2253 result.append(format_str % tuple(args)) 2254 2255 if separator is None: 2256 assert len(mlens) == len(fields) 2257 2258 if fields and not numfields.Matches(fields[-1]): 2259 mlens[-1] = 0 2260 2261 for line in data: 2262 args = [] 2263 if line is None: 2264 line = ['-' for _ in fields] 2265 for idx in range(len(fields)): 2266 if separator is None: 2267 args.append(mlens[idx]) 2268 args.append(line[idx]) 2269 result.append(format_str % tuple(args)) 2270 2271 return result
2272 2273
2274 -def FormatTimestamp(ts):
2275 """Formats a given timestamp. 2276 2277 @type ts: timestamp 2278 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds 2279 2280 @rtype: string 2281 @return: a string with the formatted timestamp 2282 2283 """ 2284 if not isinstance (ts, (tuple, list)) or len(ts) != 2: 2285 return '?' 2286 sec, usec = ts 2287 return time.strftime("%F %T", time.localtime(sec)) + ".%06d" % usec
2288 2289
2290 -def ParseTimespec(value):
2291 """Parse a time specification. 2292 2293 The following suffixed will be recognized: 2294 2295 - s: seconds 2296 - m: minutes 2297 - h: hours 2298 - d: day 2299 - w: weeks 2300 2301 Without any suffix, the value will be taken to be in seconds. 2302 2303 """ 2304 value = str(value) 2305 if not value: 2306 raise errors.OpPrereqError("Empty time specification passed") 2307 suffix_map = { 2308 's': 1, 2309 'm': 60, 2310 'h': 3600, 2311 'd': 86400, 2312 'w': 604800, 2313 } 2314 if value[-1] not in suffix_map: 2315 try: 2316 value = int(value) 2317 except (TypeError, ValueError): 2318 raise errors.OpPrereqError("Invalid time specification '%s'" % value) 2319 else: 2320 multiplier = suffix_map[value[-1]] 2321 value = value[:-1] 2322 if not value: # no data left after stripping the suffix 2323 raise errors.OpPrereqError("Invalid time specification (only" 2324 " suffix passed)") 2325 try: 2326 value = int(value) * multiplier 2327 except (TypeError, ValueError): 2328 raise errors.OpPrereqError("Invalid time specification '%s'" % value) 2329 return value
2330 2331
2332 -def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False, 2333 filter_master=False):
2334 """Returns the names of online nodes. 2335 2336 This function will also log a warning on stderr with the names of 2337 the online nodes. 2338 2339 @param nodes: if not empty, use only this subset of nodes (minus the 2340 offline ones) 2341 @param cl: if not None, luxi client to use 2342 @type nowarn: boolean 2343 @param nowarn: by default, this function will output a note with the 2344 offline nodes that are skipped; if this parameter is True the 2345 note is not displayed 2346 @type secondary_ips: boolean 2347 @param secondary_ips: if True, return the secondary IPs instead of the 2348 names, useful for doing network traffic over the replication interface 2349 (if any) 2350 @type filter_master: boolean 2351 @param filter_master: if True, do not return the master node in the list 2352 (useful in coordination with secondary_ips where we cannot check our 2353 node name against the list) 2354 2355 """ 2356 if cl is None: 2357 cl = GetClient() 2358 2359 if secondary_ips: 2360 name_idx = 2 2361 else: 2362 name_idx = 0 2363 2364 if filter_master: 2365 master_node = cl.QueryConfigValues(["master_node"])[0] 2366 filter_fn = lambda x: x != master_node 2367 else: 2368 filter_fn = lambda _: True 2369 2370 result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"], 2371 use_locking=False) 2372 offline = [row[0] for row in result if row[1]] 2373 if offline and not nowarn: 2374 ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline)) 2375 return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2376 2377
2378 -def _ToStream(stream, txt, *args):
2379 """Write a message to a stream, bypassing the logging system 2380 2381 @type stream: file object 2382 @param stream: the file to which we should write 2383 @type txt: str 2384 @param txt: the message 2385 2386 """ 2387 if args: 2388 args = tuple(args) 2389 stream.write(txt % args) 2390 else: 2391 stream.write(txt) 2392 stream.write('\n') 2393 stream.flush()
2394 2395
2396 -def ToStdout(txt, *args):
2397 """Write a message to stdout only, bypassing the logging system 2398 2399 This is just a wrapper over _ToStream. 2400 2401 @type txt: str 2402 @param txt: the message 2403 2404 """ 2405 _ToStream(sys.stdout, txt, *args)
2406 2407
2408 -def ToStderr(txt, *args):
2409 """Write a message to stderr only, bypassing the logging system 2410 2411 This is just a wrapper over _ToStream. 2412 2413 @type txt: str 2414 @param txt: the message 2415 2416 """ 2417 _ToStream(sys.stderr, txt, *args)
2418 2419
2420 -class JobExecutor(object):
2421 """Class which manages the submission and execution of multiple jobs. 2422 2423 Note that instances of this class should not be reused between 2424 GetResults() calls. 2425 2426 """
2427 - def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2428 self.queue = [] 2429 if cl is None: 2430 cl = GetClient() 2431 self.cl = cl 2432 self.verbose = verbose 2433 self.jobs = [] 2434 self.opts = opts 2435 self.feedback_fn = feedback_fn
2436
2437 - def QueueJob(self, name, *ops):
2438 """Record a job for later submit. 2439 2440 @type name: string 2441 @param name: a description of the job, will be used in WaitJobSet 2442 """ 2443 SetGenericOpcodeOpts(ops, self.opts) 2444 self.queue.append((name, ops))
2445
2446 - def SubmitPending(self, each=False):
2447 """Submit all pending jobs. 2448 2449 """ 2450 if each: 2451 results = [] 2452 for row in self.queue: 2453 # SubmitJob will remove the success status, but raise an exception if 2454 # the submission fails, so we'll notice that anyway. 2455 results.append([True, self.cl.SubmitJob(row[1])]) 2456 else: 2457 results = self.cl.SubmitManyJobs([row[1] for row in self.queue]) 2458 for (idx, ((status, data), (name, _))) in enumerate(zip(results, 2459 self.queue)): 2460 self.jobs.append((idx, status, data, name))
2461
2462 - def _ChooseJob(self):
2463 """Choose a non-waiting/queued job to poll next. 2464 2465 """ 2466 assert self.jobs, "_ChooseJob called with empty job list" 2467 2468 result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"]) 2469 assert result 2470 2471 for job_data, status in zip(self.jobs, result): 2472 if (isinstance(status, list) and status and 2473 status[0] in (constants.JOB_STATUS_QUEUED, 2474 constants.JOB_STATUS_WAITLOCK, 2475 constants.JOB_STATUS_CANCELING)): 2476 # job is still present and waiting 2477 continue 2478 # good candidate found (either running job or lost job) 2479 self.jobs.remove(job_data) 2480 return job_data 2481 2482 # no job found 2483 return self.jobs.pop(0)
2484
2485 - def GetResults(self):
2486 """Wait for and return the results of all jobs. 2487 2488 @rtype: list 2489 @return: list of tuples (success, job results), in the same order 2490 as the submitted jobs; if a job has failed, instead of the result 2491 there will be the error message 2492 2493 """ 2494 if not self.jobs: 2495 self.SubmitPending() 2496 results = [] 2497 if self.verbose: 2498 ok_jobs = [row[2] for row in self.jobs if row[1]] 2499 if ok_jobs: 2500 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs)) 2501 2502 # first, remove any non-submitted jobs 2503 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1]) 2504 for idx, _, jid, name in failures: 2505 ToStderr("Failed to submit job for %s: %s", name, jid) 2506 results.append((idx, False, jid)) 2507 2508 while self.jobs: 2509 (idx, _, jid, name) = self._ChooseJob() 2510 ToStdout("Waiting for job %s for %s...", jid, name) 2511 try: 2512 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn) 2513 success = True 2514 except errors.JobLost, err: 2515 _, job_result = FormatError(err) 2516 ToStderr("Job %s for %s has been archived, cannot check its result", 2517 jid, name) 2518 success = False 2519 except (errors.GenericError, luxi.ProtocolError), err: 2520 _, job_result = FormatError(err) 2521 success = False 2522 # the error message will always be shown, verbose or not 2523 ToStderr("Job %s for %s has failed: %s", jid, name, job_result) 2524 2525 results.append((idx, success, job_result)) 2526 2527 # sort based on the index, then drop it 2528 results.sort() 2529 results = [i[1:] for i in results] 2530 2531 return results
2532
2533 - def WaitOrShow(self, wait):
2534 """Wait for job results or only print the job IDs. 2535 2536 @type wait: boolean 2537 @param wait: whether to wait or not 2538 2539 """ 2540 if wait: 2541 return self.GetResults() 2542 else: 2543 if not self.jobs: 2544 self.SubmitPending() 2545 for _, status, result, name in self.jobs: 2546 if status: 2547 ToStdout("%s: %s", result, name) 2548 else: 2549 ToStderr("Failure for %s: %s", name, result) 2550 return [row[1:3] for row in self.jobs]
2551