Package ganeti :: Module cli
[hide private]
[frames] | no frames]

Source Code for Module ganeti.cli

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Module dealing with command line parsing""" 
  32   
  33   
  34  import sys 
  35  import textwrap 
  36  import os.path 
  37  import time 
  38  import logging 
  39  import errno 
  40  import itertools 
  41  import shlex 
  42  from cStringIO import StringIO 
  43   
  44  from ganeti import utils 
  45  from ganeti import errors 
  46  from ganeti import constants 
  47  from ganeti import opcodes 
  48  import ganeti.rpc.errors as rpcerr 
  49  import ganeti.rpc.node as rpc 
  50  from ganeti import ssh 
  51  from ganeti import compat 
  52  from ganeti import netutils 
  53  from ganeti import qlang 
  54  from ganeti import objects 
  55  from ganeti import pathutils 
  56  from ganeti import serializer 
  57  import ganeti.cli_opts 
  58  # Import constants 
  59  from ganeti.cli_opts import *  # pylint: disable=W0401 
  60   
  61  from ganeti.runtime import (GetClient) 
  62   
  63  from optparse import (OptionParser, TitledHelpFormatter) 
  64   
  65   
  66  __all__ = [ 
  67    # Generic functions for CLI programs 
  68    "ConfirmOperation", 
  69    "CreateIPolicyFromOpts", 
  70    "GenericMain", 
  71    "GenericInstanceCreate", 
  72    "GenericList", 
  73    "GenericListFields", 
  74    "GetClient", 
  75    "GetOnlineNodes", 
  76    "GetNodesSshPorts", 
  77    "GetNodeUUIDs", 
  78    "JobExecutor", 
  79    "JobSubmittedException", 
  80    "ParseTimespec", 
  81    "RunWhileClusterStopped", 
  82    "RunWhileDaemonsStopped", 
  83    "SubmitOpCode", 
  84    "SubmitOpCodeToDrainedQueue", 
  85    "SubmitOrSend", 
  86    "UsesRPC", 
  87    # Formatting functions 
  88    "ToStderr", "ToStdout", 
  89    "ToStdoutAndLoginfo", 
  90    "FormatError", 
  91    "FormatQueryResult", 
  92    "FormatParamsDictInfo", 
  93    "FormatPolicyInfo", 
  94    "PrintIPolicyCommand", 
  95    "PrintGenericInfo", 
  96    "GenerateTable", 
  97    "AskUser", 
  98    "FormatTimestamp", 
  99    "FormatLogMessage", 
 100    # Tags functions 
 101    "ListTags", 
 102    "AddTags", 
 103    "RemoveTags", 
 104    # command line options support infrastructure 
 105    "ARGS_MANY_INSTANCES", 
 106    "ARGS_MANY_NODES", 
 107    "ARGS_MANY_GROUPS", 
 108    "ARGS_MANY_NETWORKS", 
 109    "ARGS_MANY_FILTERS", 
 110    "ARGS_NONE", 
 111    "ARGS_ONE_INSTANCE", 
 112    "ARGS_ONE_NODE", 
 113    "ARGS_ONE_GROUP", 
 114    "ARGS_ONE_OS", 
 115    "ARGS_ONE_NETWORK", 
 116    "ARGS_ONE_FILTER", 
 117    "ArgChoice", 
 118    "ArgCommand", 
 119    "ArgFile", 
 120    "ArgGroup", 
 121    "ArgHost", 
 122    "ArgInstance", 
 123    "ArgJobId", 
 124    "ArgNetwork", 
 125    "ArgNode", 
 126    "ArgOs", 
 127    "ArgExtStorage", 
 128    "ArgFilter", 
 129    "ArgSuggest", 
 130    "ArgUnknown", 
 131    "FixHvParams", 
 132    "SplitNodeOption", 
 133    "CalculateOSNames", 
 134    "ParseFields", 
 135    ] + ganeti.cli_opts.__all__ # Command line options 
 136   
 137  # Query result status for clients 
 138  (QR_NORMAL, 
 139   QR_UNKNOWN, 
 140   QR_INCOMPLETE) = range(3) 
 141   
 142  #: Maximum batch size for ChooseJob 
 143  _CHOOSE_BATCH = 25 
 144   
 145   
 146  # constants used to create InstancePolicy dictionary 
 147  TISPECS_GROUP_TYPES = { 
 148    constants.ISPECS_MIN: constants.VTYPE_INT, 
 149    constants.ISPECS_MAX: constants.VTYPE_INT, 
 150    } 
 151   
 152  TISPECS_CLUSTER_TYPES = { 
 153    constants.ISPECS_MIN: constants.VTYPE_INT, 
 154    constants.ISPECS_MAX: constants.VTYPE_INT, 
 155    constants.ISPECS_STD: constants.VTYPE_INT, 
 156    } 
 157   
 158  #: User-friendly names for query2 field types 
 159  _QFT_NAMES = { 
 160    constants.QFT_UNKNOWN: "Unknown", 
 161    constants.QFT_TEXT: "Text", 
 162    constants.QFT_BOOL: "Boolean", 
 163    constants.QFT_NUMBER: "Number", 
 164    constants.QFT_NUMBER_FLOAT: "Floating-point number", 
 165    constants.QFT_UNIT: "Storage size", 
 166    constants.QFT_TIMESTAMP: "Timestamp", 
 167    constants.QFT_OTHER: "Custom", 
 168    } 
169 170 171 -class _Argument(object):
172 - def __init__(self, min=0, max=None): # pylint: disable=W0622
173 self.min = min 174 self.max = max
175
176 - def __repr__(self):
177 return ("<%s min=%s max=%s>" % 178 (self.__class__.__name__, self.min, self.max))
179
180 181 -class ArgSuggest(_Argument):
182 """Suggesting argument. 183 184 Value can be any of the ones passed to the constructor. 185 186 """ 187 # pylint: disable=W0622
188 - def __init__(self, min=0, max=None, choices=None):
189 _Argument.__init__(self, min=min, max=max) 190 self.choices = choices
191
192 - def __repr__(self):
193 return ("<%s min=%s max=%s choices=%r>" % 194 (self.__class__.__name__, self.min, self.max, self.choices))
195
196 197 -class ArgChoice(ArgSuggest):
198 """Choice argument. 199 200 Value can be any of the ones passed to the constructor. Like L{ArgSuggest}, 201 but value must be one of the choices. 202 203 """
204
205 206 -class ArgUnknown(_Argument):
207 """Unknown argument to program (e.g. determined at runtime). 208 209 """
210
211 212 -class ArgInstance(_Argument):
213 """Instances argument. 214 215 """
216
217 218 -class ArgNode(_Argument):
219 """Node argument. 220 221 """
222
223 224 -class ArgNetwork(_Argument):
225 """Network argument. 226 227 """
228
229 230 -class ArgGroup(_Argument):
231 """Node group argument. 232 233 """
234
235 236 -class ArgJobId(_Argument):
237 """Job ID argument. 238 239 """
240
241 242 -class ArgFile(_Argument):
243 """File path argument. 244 245 """
246
247 248 -class ArgCommand(_Argument):
249 """Command argument. 250 251 """
252
253 254 -class ArgHost(_Argument):
255 """Host argument. 256 257 """
258
259 260 -class ArgOs(_Argument):
261 """OS argument. 262 263 """
264
265 266 -class ArgExtStorage(_Argument):
267 """ExtStorage argument. 268 269 """
270
271 272 -class ArgFilter(_Argument):
273 """Filter UUID argument. 274 275 """
276 277 278 ARGS_NONE = [] 279 ARGS_MANY_INSTANCES = [ArgInstance()] 280 ARGS_MANY_NETWORKS = [ArgNetwork()] 281 ARGS_MANY_NODES = [ArgNode()] 282 ARGS_MANY_GROUPS = [ArgGroup()] 283 ARGS_MANY_FILTERS = [ArgFilter()] 284 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)] 285 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)] 286 ARGS_ONE_NODE = [ArgNode(min=1, max=1)] 287 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)] 288 ARGS_ONE_OS = [ArgOs(min=1, max=1)] 289 ARGS_ONE_FILTER = [ArgFilter(min=1, max=1)]
290 291 292 -def _ExtractTagsObject(opts, args):
293 """Extract the tag type object. 294 295 Note that this function will modify its args parameter. 296 297 """ 298 if not hasattr(opts, "tag_type"): 299 raise errors.ProgrammerError("tag_type not passed to _ExtractTagsObject") 300 kind = opts.tag_type 301 if kind == constants.TAG_CLUSTER: 302 retval = kind, "" 303 elif kind in (constants.TAG_NODEGROUP, 304 constants.TAG_NODE, 305 constants.TAG_NETWORK, 306 constants.TAG_INSTANCE): 307 if not args: 308 raise errors.OpPrereqError("no arguments passed to the command", 309 errors.ECODE_INVAL) 310 name = args.pop(0) 311 retval = kind, name 312 else: 313 raise errors.ProgrammerError("Unhandled tag type '%s'" % kind) 314 return retval
315
316 317 -def _ExtendTags(opts, args):
318 """Extend the args if a source file has been given. 319 320 This function will extend the tags with the contents of the file 321 passed in the 'tags_source' attribute of the opts parameter. A file 322 named '-' will be replaced by stdin. 323 324 """ 325 fname = opts.tags_source 326 if fname is None: 327 return 328 if fname == "-": 329 new_fh = sys.stdin 330 else: 331 new_fh = open(fname, "r") 332 new_data = [] 333 try: 334 # we don't use the nice 'new_data = [line.strip() for line in fh]' 335 # because of python bug 1633941 336 while True: 337 line = new_fh.readline() 338 if not line: 339 break 340 new_data.append(line.strip()) 341 finally: 342 new_fh.close() 343 args.extend(new_data)
344
345 346 -def ListTags(opts, args):
347 """List the tags on a given object. 348 349 This is a generic implementation that knows how to deal with all 350 three cases of tag objects (cluster, node, instance). The opts 351 argument is expected to contain a tag_type field denoting what 352 object type we work on. 353 354 """ 355 kind, name = _ExtractTagsObject(opts, args) 356 cl = GetClient() 357 result = cl.QueryTags(kind, name) 358 result = list(result) 359 result.sort() 360 for tag in result: 361 ToStdout(tag)
362
363 364 -def AddTags(opts, args):
365 """Add tags on a given object. 366 367 This is a generic implementation that knows how to deal with all 368 three cases of tag objects (cluster, node, instance). The opts 369 argument is expected to contain a tag_type field denoting what 370 object type we work on. 371 372 """ 373 kind, name = _ExtractTagsObject(opts, args) 374 _ExtendTags(opts, args) 375 if not args: 376 raise errors.OpPrereqError("No tags to be added", errors.ECODE_INVAL) 377 op = opcodes.OpTagsSet(kind=kind, name=name, tags=args) 378 SubmitOrSend(op, opts)
379
380 381 -def RemoveTags(opts, args):
382 """Remove tags from a given object. 383 384 This is a generic implementation that knows how to deal with all 385 three cases of tag objects (cluster, node, instance). The opts 386 argument is expected to contain a tag_type field denoting what 387 object type we work on. 388 389 """ 390 kind, name = _ExtractTagsObject(opts, args) 391 _ExtendTags(opts, args) 392 if not args: 393 raise errors.OpPrereqError("No tags to be removed", errors.ECODE_INVAL) 394 op = opcodes.OpTagsDel(kind=kind, name=name, tags=args) 395 SubmitOrSend(op, opts)
396
397 398 -class _ShowUsage(Exception):
399 """Exception class for L{_ParseArgs}. 400 401 """
402 - def __init__(self, exit_error):
403 """Initializes instances of this class. 404 405 @type exit_error: bool 406 @param exit_error: Whether to report failure on exit 407 408 """ 409 Exception.__init__(self) 410 self.exit_error = exit_error
411
412 413 -class _ShowVersion(Exception):
414 """Exception class for L{_ParseArgs}. 415 416 """
417
418 419 -def _ParseArgs(binary, argv, commands, aliases, env_override):
420 """Parser for the command line arguments. 421 422 This function parses the arguments and returns the function which 423 must be executed together with its (modified) arguments. 424 425 @param binary: Script name 426 @param argv: Command line arguments 427 @param commands: Dictionary containing command definitions 428 @param aliases: dictionary with command aliases {"alias": "target", ...} 429 @param env_override: list of env variables allowed for default args 430 @raise _ShowUsage: If usage description should be shown 431 @raise _ShowVersion: If version should be shown 432 433 """ 434 assert not (env_override - set(commands)) 435 assert not (set(aliases.keys()) & set(commands.keys())) 436 437 if len(argv) > 1: 438 cmd = argv[1] 439 else: 440 # No option or command given 441 raise _ShowUsage(exit_error=True) 442 443 if cmd == "--version": 444 raise _ShowVersion() 445 elif cmd == "--help": 446 raise _ShowUsage(exit_error=False) 447 elif not (cmd in commands or cmd in aliases): 448 raise _ShowUsage(exit_error=True) 449 450 # get command, unalias it, and look it up in commands 451 if cmd in aliases: 452 if aliases[cmd] not in commands: 453 raise errors.ProgrammerError("Alias '%s' maps to non-existing" 454 " command '%s'" % (cmd, aliases[cmd])) 455 456 cmd = aliases[cmd] 457 458 if cmd in env_override: 459 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper() 460 env_args = os.environ.get(args_env_name) 461 if env_args: 462 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args)) 463 464 func, args_def, parser_opts, usage, description = commands[cmd] 465 parser = OptionParser(option_list=parser_opts + COMMON_OPTS, 466 description=description, 467 formatter=TitledHelpFormatter(), 468 usage="%%prog %s %s" % (cmd, usage)) 469 parser.disable_interspersed_args() 470 options, args = parser.parse_args(args=argv[2:]) 471 472 if not _CheckArguments(cmd, args_def, args): 473 return None, None, None 474 475 return func, options, args
476
477 478 -def _FormatUsage(binary, commands):
479 """Generates a nice description of all commands. 480 481 @param binary: Script name 482 @param commands: Dictionary containing command definitions 483 484 """ 485 # compute the max line length for cmd + usage 486 mlen = min(60, max(map(len, commands))) 487 488 yield "Usage: %s {command} [options...] [argument...]" % binary 489 yield "%s <command> --help to see details, or man %s" % (binary, binary) 490 yield "" 491 yield "Commands:" 492 493 # and format a nice command list 494 for (cmd, (_, _, _, _, help_text)) in sorted(commands.items()): 495 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen) 496 yield " %-*s - %s" % (mlen, cmd, help_lines.pop(0)) 497 for line in help_lines: 498 yield " %-*s %s" % (mlen, "", line) 499 500 yield ""
501
502 503 -def _CheckArguments(cmd, args_def, args):
504 """Verifies the arguments using the argument definition. 505 506 Algorithm: 507 508 1. Abort with error if values specified by user but none expected. 509 510 1. For each argument in definition 511 512 1. Keep running count of minimum number of values (min_count) 513 1. Keep running count of maximum number of values (max_count) 514 1. If it has an unlimited number of values 515 516 1. Abort with error if it's not the last argument in the definition 517 518 1. If last argument has limited number of values 519 520 1. Abort with error if number of values doesn't match or is too large 521 522 1. Abort with error if user didn't pass enough values (min_count) 523 524 """ 525 if args and not args_def: 526 ToStderr("Error: Command %s expects no arguments", cmd) 527 return False 528 529 min_count = None 530 max_count = None 531 check_max = None 532 533 last_idx = len(args_def) - 1 534 535 for idx, arg in enumerate(args_def): 536 if min_count is None: 537 min_count = arg.min 538 elif arg.min is not None: 539 min_count += arg.min 540 541 if max_count is None: 542 max_count = arg.max 543 elif arg.max is not None: 544 max_count += arg.max 545 546 if idx == last_idx: 547 check_max = (arg.max is not None) 548 549 elif arg.max is None: 550 raise errors.ProgrammerError("Only the last argument can have max=None") 551 552 if check_max: 553 # Command with exact number of arguments 554 if (min_count is not None and max_count is not None and 555 min_count == max_count and len(args) != min_count): 556 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count) 557 return False 558 559 # Command with limited number of arguments 560 if max_count is not None and len(args) > max_count: 561 ToStderr("Error: Command %s expects only %d argument(s)", 562 cmd, max_count) 563 return False 564 565 # Command with some required arguments 566 if min_count is not None and len(args) < min_count: 567 ToStderr("Error: Command %s expects at least %d argument(s)", 568 cmd, min_count) 569 return False 570 571 return True
572
573 574 -def SplitNodeOption(value):
575 """Splits the value of a --node option. 576 577 """ 578 if value and ":" in value: 579 return value.split(":", 1) 580 else: 581 return (value, None)
582
583 584 -def CalculateOSNames(os_name, os_variants):
585 """Calculates all the names an OS can be called, according to its variants. 586 587 @type os_name: string 588 @param os_name: base name of the os 589 @type os_variants: list or None 590 @param os_variants: list of supported variants 591 @rtype: list 592 @return: list of valid names 593 594 """ 595 if os_variants: 596 return ["%s+%s" % (os_name, v) for v in os_variants] 597 else: 598 return [os_name]
599
600 601 -def ParseFields(selected, default):
602 """Parses the values of "--field"-like options. 603 604 @type selected: string or None 605 @param selected: User-selected options 606 @type default: list 607 @param default: Default fields 608 609 """ 610 if selected is None: 611 return default 612 613 if selected.startswith("+"): 614 return default + selected[1:].split(",") 615 616 return selected.split(",")
617 618 619 UsesRPC = rpc.RunWithRPC
620 621 622 -def AskUser(text, choices=None):
623 """Ask the user a question. 624 625 @param text: the question to ask 626 627 @param choices: list with elements tuples (input_char, return_value, 628 description); if not given, it will default to: [('y', True, 629 'Perform the operation'), ('n', False, 'Do no do the operation')]; 630 note that the '?' char is reserved for help 631 632 @return: one of the return values from the choices list; if input is 633 not possible (i.e. not running with a tty, we return the last 634 entry from the list 635 636 """ 637 if choices is None: 638 choices = [("y", True, "Perform the operation"), 639 ("n", False, "Do not perform the operation")] 640 if not choices or not isinstance(choices, list): 641 raise errors.ProgrammerError("Invalid choices argument to AskUser") 642 for entry in choices: 643 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?": 644 raise errors.ProgrammerError("Invalid choices element to AskUser") 645 646 answer = choices[-1][1] 647 new_text = [] 648 for line in text.splitlines(): 649 new_text.append(textwrap.fill(line, 70, replace_whitespace=False)) 650 text = "\n".join(new_text) 651 try: 652 f = file("/dev/tty", "a+") 653 except IOError: 654 return answer 655 try: 656 chars = [entry[0] for entry in choices] 657 chars[-1] = "[%s]" % chars[-1] 658 chars.append("?") 659 maps = dict([(entry[0], entry[1]) for entry in choices]) 660 while True: 661 f.write(text) 662 f.write("\n") 663 f.write("/".join(chars)) 664 f.write(": ") 665 line = f.readline(2).strip().lower() 666 if line in maps: 667 answer = maps[line] 668 break 669 elif line == "?": 670 for entry in choices: 671 f.write(" %s - %s\n" % (entry[0], entry[2])) 672 f.write("\n") 673 continue 674 finally: 675 f.close() 676 return answer
677
678 679 -class JobSubmittedException(Exception):
680 """Job was submitted, client should exit. 681 682 This exception has one argument, the ID of the job that was 683 submitted. The handler should print this ID. 684 685 This is not an error, just a structured way to exit from clients. 686 687 """
688
689 690 -def SendJob(ops, cl=None):
691 """Function to submit an opcode without waiting for the results. 692 693 @type ops: list 694 @param ops: list of opcodes 695 @type cl: luxi.Client 696 @param cl: the luxi client to use for communicating with the master; 697 if None, a new client will be created 698 699 """ 700 if cl is None: 701 cl = GetClient() 702 703 job_id = cl.SubmitJob(ops) 704 705 return job_id
706
707 708 -def GenericPollJob(job_id, cbs, report_cbs, cancel_fn=None, 709 update_freq=constants.DEFAULT_WFJC_TIMEOUT):
710 """Generic job-polling function. 711 712 @type job_id: number 713 @param job_id: Job ID 714 @type cbs: Instance of L{JobPollCbBase} 715 @param cbs: Data callbacks 716 @type report_cbs: Instance of L{JobPollReportCbBase} 717 @param report_cbs: Reporting callbacks 718 @type cancel_fn: Function returning a boolean 719 @param cancel_fn: Function to check if we should cancel the running job 720 @type update_freq: int/long 721 @param update_freq: number of seconds between each WFJC reports 722 @return: the opresult of the job 723 @raise errors.JobLost: If job can't be found 724 @raise errors.JobCanceled: If job is canceled 725 @raise errors.OpExecError: If job didn't succeed 726 727 """ 728 prev_job_info = None 729 prev_logmsg_serial = None 730 731 status = None 732 should_cancel = False 733 734 if update_freq <= 0: 735 raise errors.ParameterError("Update frequency must be a positive number") 736 737 while True: 738 if cancel_fn: 739 timer = 0 740 while timer < update_freq: 741 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info, 742 prev_logmsg_serial, 743 timeout=constants.CLI_WFJC_FREQUENCY) 744 should_cancel = cancel_fn() 745 if should_cancel or not result or result != constants.JOB_NOTCHANGED: 746 break 747 timer += constants.CLI_WFJC_FREQUENCY 748 else: 749 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info, 750 prev_logmsg_serial, timeout=update_freq) 751 if not result: 752 # job not found, go away! 753 raise errors.JobLost("Job with id %s lost" % job_id) 754 755 if should_cancel: 756 logging.info("Job %s canceled because the client timed out.", job_id) 757 cbs.CancelJob(job_id) 758 raise errors.JobCanceled("Job was canceled") 759 760 if result == constants.JOB_NOTCHANGED: 761 report_cbs.ReportNotChanged(job_id, status) 762 # Wait again 763 continue 764 765 # Split result, a tuple of (field values, log entries) 766 (job_info, log_entries) = result 767 (status, ) = job_info 768 769 if log_entries: 770 for log_entry in log_entries: 771 (serial, timestamp, log_type, message) = log_entry 772 report_cbs.ReportLogMessage(job_id, serial, timestamp, 773 log_type, message) 774 prev_logmsg_serial = max(prev_logmsg_serial, serial) 775 776 # TODO: Handle canceled and archived jobs 777 elif status in (constants.JOB_STATUS_SUCCESS, 778 constants.JOB_STATUS_ERROR, 779 constants.JOB_STATUS_CANCELING, 780 constants.JOB_STATUS_CANCELED): 781 break 782 783 prev_job_info = job_info 784 785 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"]) 786 if not jobs: 787 raise errors.JobLost("Job with id %s lost" % job_id) 788 789 status, opstatus, result = jobs[0] 790 791 if status == constants.JOB_STATUS_SUCCESS: 792 return result 793 794 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED): 795 raise errors.JobCanceled("Job was canceled") 796 797 has_ok = False 798 for idx, (status, msg) in enumerate(zip(opstatus, result)): 799 if status == constants.OP_STATUS_SUCCESS: 800 has_ok = True 801 elif status == constants.OP_STATUS_ERROR: 802 errors.MaybeRaise(msg) 803 804 if has_ok: 805 raise errors.OpExecError("partial failure (opcode %d): %s" % 806 (idx, msg)) 807 808 raise errors.OpExecError(str(msg)) 809 810 # default failure mode 811 raise errors.OpExecError(result)
812
813 814 -class JobPollCbBase(object):
815 """Base class for L{GenericPollJob} callbacks. 816 817 """
818 - def __init__(self):
819 """Initializes this class. 820 821 """
822
823 - def WaitForJobChangeOnce(self, job_id, fields, 824 prev_job_info, prev_log_serial, 825 timeout=constants.DEFAULT_WFJC_TIMEOUT):
826 """Waits for changes on a job. 827 828 """ 829 raise NotImplementedError()
830
831 - def QueryJobs(self, job_ids, fields):
832 """Returns the selected fields for the selected job IDs. 833 834 @type job_ids: list of numbers 835 @param job_ids: Job IDs 836 @type fields: list of strings 837 @param fields: Fields 838 839 """ 840 raise NotImplementedError()
841
842 - def CancelJob(self, job_id):
843 """Cancels a currently running job. 844 845 @type job_id: number 846 @param job_id: The ID of the Job we want to cancel 847 848 """ 849 raise NotImplementedError()
850
851 852 -class JobPollReportCbBase(object):
853 """Base class for L{GenericPollJob} reporting callbacks. 854 855 """
856 - def __init__(self):
857 """Initializes this class. 858 859 """
860
861 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
862 """Handles a log message. 863 864 """ 865 raise NotImplementedError()
866
867 - def ReportNotChanged(self, job_id, status):
868 """Called for if a job hasn't changed in a while. 869 870 @type job_id: number 871 @param job_id: Job ID 872 @type status: string or None 873 @param status: Job status if available 874 875 """ 876 raise NotImplementedError()
877
878 879 -class _LuxiJobPollCb(JobPollCbBase):
880 - def __init__(self, cl):
881 """Initializes this class. 882 883 """ 884 JobPollCbBase.__init__(self) 885 self.cl = cl
886
887 - def WaitForJobChangeOnce(self, job_id, fields, 888 prev_job_info, prev_log_serial, 889 timeout=constants.DEFAULT_WFJC_TIMEOUT):
890 """Waits for changes on a job. 891 892 """ 893 return self.cl.WaitForJobChangeOnce(job_id, fields, 894 prev_job_info, prev_log_serial, 895 timeout=timeout)
896
897 - def QueryJobs(self, job_ids, fields):
898 """Returns the selected fields for the selected job IDs. 899 900 """ 901 return self.cl.QueryJobs(job_ids, fields)
902
903 - def CancelJob(self, job_id):
904 """Cancels a currently running job. 905 906 """ 907 return self.cl.CancelJob(job_id)
908
909 910 -class FeedbackFnJobPollReportCb(JobPollReportCbBase):
911 - def __init__(self, feedback_fn):
912 """Initializes this class. 913 914 """ 915 JobPollReportCbBase.__init__(self) 916 917 self.feedback_fn = feedback_fn 918 919 assert callable(feedback_fn)
920
921 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
922 """Handles a log message. 923 924 """ 925 self.feedback_fn((timestamp, log_type, log_msg))
926
927 - def ReportNotChanged(self, job_id, status):
928 """Called if a job hasn't changed in a while. 929 930 """
931 # Ignore
932 933 934 -class StdioJobPollReportCb(JobPollReportCbBase):
935 - def __init__(self):
936 """Initializes this class. 937 938 """ 939 JobPollReportCbBase.__init__(self) 940 941 self.notified_queued = False 942 self.notified_waitlock = False
943
944 - def ReportLogMessage(self, job_id, serial, timestamp, log_type, log_msg):
945 """Handles a log message. 946 947 """ 948 ToStdout("%s %s", time.ctime(utils.MergeTime(timestamp)), 949 FormatLogMessage(log_type, log_msg))
950
951 - def ReportNotChanged(self, job_id, status):
952 """Called if a job hasn't changed in a while. 953 954 """ 955 if status is None: 956 return 957 958 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued: 959 ToStderr("Job %s is waiting in queue", job_id) 960 self.notified_queued = True 961 962 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock: 963 ToStderr("Job %s is trying to acquire all necessary locks", job_id) 964 self.notified_waitlock = True
965
966 967 -def FormatLogMessage(log_type, log_msg):
968 """Formats a job message according to its type. 969 970 """ 971 if log_type != constants.ELOG_MESSAGE: 972 log_msg = str(log_msg) 973 974 return utils.SafeEncode(log_msg)
975
976 977 -def PollJob(job_id, cl=None, feedback_fn=None, reporter=None, cancel_fn=None, 978 update_freq=constants.DEFAULT_WFJC_TIMEOUT):
979 """Function to poll for the result of a job. 980 981 @type job_id: job identified 982 @param job_id: the job to poll for results 983 @type cl: luxi.Client 984 @param cl: the luxi client to use for communicating with the master; 985 if None, a new client will be created 986 @type cancel_fn: Function returning a boolean 987 @param cancel_fn: Function to check if we should cancel the running job 988 @type update_freq: int/long 989 @param update_freq: number of seconds between each WFJC report 990 991 """ 992 if cl is None: 993 cl = GetClient() 994 995 if reporter is None: 996 if feedback_fn: 997 reporter = FeedbackFnJobPollReportCb(feedback_fn) 998 else: 999 reporter = StdioJobPollReportCb() 1000 elif feedback_fn: 1001 raise errors.ProgrammerError("Can't specify reporter and feedback function") 1002 1003 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter, 1004 cancel_fn=cancel_fn, update_freq=update_freq)
1005
1006 1007 -def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1008 """Legacy function to submit an opcode. 1009 1010 This is just a simple wrapper over the construction of the processor 1011 instance. It should be extended to better handle feedback and 1012 interaction functions. 1013 1014 """ 1015 if cl is None: 1016 cl = GetClient() 1017 1018 SetGenericOpcodeOpts([op], opts) 1019 1020 job_id = SendJob([op], cl=cl) 1021 if hasattr(opts, "print_jobid") and opts.print_jobid: 1022 ToStdout("%d" % job_id) 1023 1024 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn, 1025 reporter=reporter) 1026 1027 return op_results[0]
1028
1029 1030 -def SubmitOpCodeToDrainedQueue(op):
1031 """Forcefully insert a job in the queue, even if it is drained. 1032 1033 """ 1034 cl = GetClient() 1035 job_id = cl.SubmitJobToDrainedQueue([op]) 1036 op_results = PollJob(job_id, cl=cl) 1037 return op_results[0]
1038
1039 1040 -def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1041 """Wrapper around SubmitOpCode or SendJob. 1042 1043 This function will decide, based on the 'opts' parameter, whether to 1044 submit and wait for the result of the opcode (and return it), or 1045 whether to just send the job and print its identifier. It is used in 1046 order to simplify the implementation of the '--submit' option. 1047 1048 It will also process the opcodes if we're sending the via SendJob 1049 (otherwise SubmitOpCode does it). 1050 1051 """ 1052 if opts and opts.submit_only: 1053 job = [op] 1054 SetGenericOpcodeOpts(job, opts) 1055 job_id = SendJob(job, cl=cl) 1056 if opts.print_jobid: 1057 ToStdout("%d" % job_id) 1058 raise JobSubmittedException(job_id) 1059 else: 1060 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1061
1062 1063 -def _InitReasonTrail(op, opts):
1064 """Builds the first part of the reason trail 1065 1066 Builds the initial part of the reason trail, adding the user provided reason 1067 (if it exists) and the name of the command starting the operation. 1068 1069 @param op: the opcode the reason trail will be added to 1070 @param opts: the command line options selected by the user 1071 1072 """ 1073 assert len(sys.argv) >= 2 1074 trail = [] 1075 1076 if opts.reason: 1077 trail.append((constants.OPCODE_REASON_SRC_USER, 1078 opts.reason, 1079 utils.EpochNano())) 1080 1081 binary = os.path.basename(sys.argv[0]) 1082 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary) 1083 command = sys.argv[1] 1084 trail.append((source, command, utils.EpochNano())) 1085 op.reason = trail
1086
1087 1088 -def SetGenericOpcodeOpts(opcode_list, options):
1089 """Processor for generic options. 1090 1091 This function updates the given opcodes based on generic command 1092 line options (like debug, dry-run, etc.). 1093 1094 @param opcode_list: list of opcodes 1095 @param options: command line options or None 1096 @return: None (in-place modification) 1097 1098 """ 1099 if not options: 1100 return 1101 for op in opcode_list: 1102 op.debug_level = options.debug 1103 if hasattr(options, "dry_run"): 1104 op.dry_run = options.dry_run 1105 if getattr(options, "priority", None) is not None: 1106 op.priority = options.priority 1107 _InitReasonTrail(op, options)
1108
1109 1110 -def FormatError(err):
1111 """Return a formatted error message for a given error. 1112 1113 This function takes an exception instance and returns a tuple 1114 consisting of two values: first, the recommended exit code, and 1115 second, a string describing the error message (not 1116 newline-terminated). 1117 1118 """ 1119 retcode = 1 1120 obuf = StringIO() 1121 msg = str(err) 1122 if isinstance(err, errors.ConfigurationError): 1123 txt = "Corrupt configuration file: %s" % msg 1124 logging.error(txt) 1125 obuf.write(txt + "\n") 1126 obuf.write("Aborting.") 1127 retcode = 2 1128 elif isinstance(err, errors.HooksAbort): 1129 obuf.write("Failure: hooks execution failed:\n") 1130 for node, script, out in err.args[0]: 1131 if out: 1132 obuf.write(" node: %s, script: %s, output: %s\n" % 1133 (node, script, out)) 1134 else: 1135 obuf.write(" node: %s, script: %s (no output)\n" % 1136 (node, script)) 1137 elif isinstance(err, errors.HooksFailure): 1138 obuf.write("Failure: hooks general failure: %s" % msg) 1139 elif isinstance(err, errors.ResolverError): 1140 this_host = netutils.Hostname.GetSysName() 1141 if err.args[0] == this_host: 1142 msg = "Failure: can't resolve my own hostname ('%s')" 1143 else: 1144 msg = "Failure: can't resolve hostname '%s'" 1145 obuf.write(msg % err.args[0]) 1146 elif isinstance(err, errors.OpPrereqError): 1147 if len(err.args) == 2: 1148 obuf.write("Failure: prerequisites not met for this" 1149 " operation:\nerror type: %s, error details:\n%s" % 1150 (err.args[1], err.args[0])) 1151 else: 1152 obuf.write("Failure: prerequisites not met for this" 1153 " operation:\n%s" % msg) 1154 elif isinstance(err, errors.OpExecError): 1155 obuf.write("Failure: command execution error:\n%s" % msg) 1156 elif isinstance(err, errors.TagError): 1157 obuf.write("Failure: invalid tag(s) given:\n%s" % msg) 1158 elif isinstance(err, errors.JobQueueDrainError): 1159 obuf.write("Failure: the job queue is marked for drain and doesn't" 1160 " accept new requests\n") 1161 elif isinstance(err, errors.JobQueueFull): 1162 obuf.write("Failure: the job queue is full and doesn't accept new" 1163 " job submissions until old jobs are archived\n") 1164 elif isinstance(err, errors.TypeEnforcementError): 1165 obuf.write("Parameter Error: %s" % msg) 1166 elif isinstance(err, errors.ParameterError): 1167 obuf.write("Failure: unknown/wrong parameter name '%s'" % msg) 1168 elif isinstance(err, rpcerr.NoMasterError): 1169 if err.args[0] == pathutils.MASTER_SOCKET: 1170 daemon = "the master daemon" 1171 elif err.args[0] == pathutils.QUERY_SOCKET: 1172 daemon = "the config daemon" 1173 else: 1174 daemon = "socket '%s'" % str(err.args[0]) 1175 obuf.write("Cannot communicate with %s.\nIs the process running" 1176 " and listening for connections?" % daemon) 1177 elif isinstance(err, rpcerr.TimeoutError): 1178 obuf.write("Timeout while talking to the master daemon. Jobs might have" 1179 " been submitted and will continue to run even if the call" 1180 " timed out. Useful commands in this situation are \"gnt-job" 1181 " list\", \"gnt-job cancel\" and \"gnt-job watch\". Error:\n") 1182 obuf.write(msg) 1183 elif isinstance(err, rpcerr.PermissionError): 1184 obuf.write("It seems you don't have permissions to connect to the" 1185 " master daemon.\nPlease retry as a different user.") 1186 elif isinstance(err, rpcerr.ProtocolError): 1187 obuf.write("Unhandled protocol error while talking to the master daemon:\n" 1188 "%s" % msg) 1189 elif isinstance(err, errors.JobLost): 1190 obuf.write("Error checking job status: %s" % msg) 1191 elif isinstance(err, errors.QueryFilterParseError): 1192 obuf.write("Error while parsing query filter: %s\n" % err.args[0]) 1193 obuf.write("\n".join(err.GetDetails())) 1194 elif isinstance(err, errors.GenericError): 1195 obuf.write("Unhandled Ganeti error: %s" % msg) 1196 elif isinstance(err, JobSubmittedException): 1197 obuf.write("JobID: %s\n" % err.args[0]) 1198 retcode = 0 1199 else: 1200 obuf.write("Unhandled exception: %s" % msg) 1201 return retcode, obuf.getvalue().rstrip("\n")
1202
1203 1204 -def GenericMain(commands, override=None, aliases=None, 1205 env_override=frozenset()):
1206 """Generic main function for all the gnt-* commands. 1207 1208 @param commands: a dictionary with a special structure, see the design doc 1209 for command line handling. 1210 @param override: if not None, we expect a dictionary with keys that will 1211 override command line options; this can be used to pass 1212 options from the scripts to generic functions 1213 @param aliases: dictionary with command aliases {'alias': 'target, ...} 1214 @param env_override: list of environment names which are allowed to submit 1215 default args for commands 1216 1217 """ 1218 # save the program name and the entire command line for later logging 1219 if sys.argv: 1220 binary = os.path.basename(sys.argv[0]) 1221 if not binary: 1222 binary = sys.argv[0] 1223 1224 if len(sys.argv) >= 2: 1225 logname = utils.ShellQuoteArgs([binary, sys.argv[1]]) 1226 else: 1227 logname = binary 1228 1229 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:]) 1230 else: 1231 binary = "<unknown program>" 1232 cmdline = "<unknown>" 1233 1234 if aliases is None: 1235 aliases = {} 1236 1237 try: 1238 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases, 1239 env_override) 1240 except _ShowVersion: 1241 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION, 1242 constants.RELEASE_VERSION) 1243 return constants.EXIT_SUCCESS 1244 except _ShowUsage, err: 1245 for line in _FormatUsage(binary, commands): 1246 ToStdout(line) 1247 1248 if err.exit_error: 1249 return constants.EXIT_FAILURE 1250 else: 1251 return constants.EXIT_SUCCESS 1252 except errors.ParameterError, err: 1253 result, err_msg = FormatError(err) 1254 ToStderr(err_msg) 1255 return 1 1256 1257 if func is None: # parse error 1258 return 1 1259 1260 if override is not None: 1261 for key, val in override.iteritems(): 1262 setattr(options, key, val) 1263 1264 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug, 1265 stderr_logging=True) 1266 1267 logging.debug("Command line: %s", cmdline) 1268 1269 try: 1270 result = func(options, args) 1271 except (errors.GenericError, rpcerr.ProtocolError, 1272 JobSubmittedException), err: 1273 result, err_msg = FormatError(err) 1274 logging.exception("Error during command processing") 1275 ToStderr(err_msg) 1276 except KeyboardInterrupt: 1277 result = constants.EXIT_FAILURE 1278 ToStderr("Aborted. Note that if the operation created any jobs, they" 1279 " might have been submitted and" 1280 " will continue to run in the background.") 1281 except IOError, err: 1282 if err.errno == errno.EPIPE: 1283 # our terminal went away, we'll exit 1284 sys.exit(constants.EXIT_FAILURE) 1285 else: 1286 raise 1287 1288 return result
1289
1290 1291 -def ParseNicOption(optvalue):
1292 """Parses the value of the --net option(s). 1293 1294 """ 1295 try: 1296 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue) 1297 except (TypeError, ValueError), err: 1298 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err), 1299 errors.ECODE_INVAL) 1300 1301 nics = [{}] * nic_max 1302 for nidx, ndict in optvalue: 1303 nidx = int(nidx) 1304 1305 if not isinstance(ndict, dict): 1306 raise errors.OpPrereqError("Invalid nic/%d value: expected dict," 1307 " got %s" % (nidx, ndict), errors.ECODE_INVAL) 1308 1309 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES) 1310 1311 nics[nidx] = ndict 1312 1313 return nics
1314
1315 1316 -def FixHvParams(hvparams):
1317 # In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from 1318 # comma to space because commas cannot be accepted on the command line 1319 # (they already act as the separator between different hvparams). Still, 1320 # RAPI should be able to accept commas for backwards compatibility. 1321 # Therefore, we convert spaces into commas here, and we keep the old 1322 # parsing logic everywhere else. 1323 try: 1324 new_usb_devices = hvparams[constants.HV_USB_DEVICES].replace(" ", ",") 1325 hvparams[constants.HV_USB_DEVICES] = new_usb_devices 1326 except KeyError: 1327 #No usb_devices, no modification required 1328 pass
1329
1330 1331 -def GenericInstanceCreate(mode, opts, args):
1332 """Add an instance to the cluster via either creation or import. 1333 1334 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT 1335 @param opts: the command line options selected by the user 1336 @type args: list 1337 @param args: should contain only one element, the new instance name 1338 @rtype: int 1339 @return: the desired exit code 1340 1341 """ 1342 instance = args[0] 1343 forthcoming = opts.ensure_value("forthcoming", False) 1344 commit = opts.ensure_value("commit", False) 1345 1346 if forthcoming and commit: 1347 raise errors.OpPrereqError("Creating an instance only forthcoming and" 1348 " commiting it are mutally exclusive", 1349 errors.ECODE_INVAL) 1350 1351 (pnode, snode) = SplitNodeOption(opts.node) 1352 1353 hypervisor = None 1354 hvparams = {} 1355 if opts.hypervisor: 1356 hypervisor, hvparams = opts.hypervisor 1357 1358 if opts.nics: 1359 nics = ParseNicOption(opts.nics) 1360 elif opts.no_nics: 1361 # no nics 1362 nics = [] 1363 elif mode == constants.INSTANCE_CREATE: 1364 # default of one nic, all auto 1365 nics = [{}] 1366 else: 1367 # mode == import 1368 nics = [] 1369 1370 if opts.disk_template == constants.DT_DISKLESS: 1371 if opts.disks or opts.sd_size is not None: 1372 raise errors.OpPrereqError("Diskless instance but disk" 1373 " information passed", errors.ECODE_INVAL) 1374 disks = [] 1375 else: 1376 if (not opts.disks and not opts.sd_size 1377 and mode == constants.INSTANCE_CREATE): 1378 raise errors.OpPrereqError("No disk information specified", 1379 errors.ECODE_INVAL) 1380 if opts.disks and opts.sd_size is not None: 1381 raise errors.OpPrereqError("Please use either the '--disk' or" 1382 " '-s' option", errors.ECODE_INVAL) 1383 if opts.sd_size is not None: 1384 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})] 1385 1386 if opts.disks: 1387 try: 1388 disk_max = max(int(didx[0]) + 1 for didx in opts.disks) 1389 except ValueError, err: 1390 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err), 1391 errors.ECODE_INVAL) 1392 disks = [{}] * disk_max 1393 else: 1394 disks = [] 1395 for didx, ddict in opts.disks: 1396 didx = int(didx) 1397 if not isinstance(ddict, dict): 1398 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict) 1399 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 1400 elif constants.IDISK_SIZE in ddict: 1401 if constants.IDISK_ADOPT in ddict: 1402 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed" 1403 " (disk %d)" % didx, errors.ECODE_INVAL) 1404 try: 1405 ddict[constants.IDISK_SIZE] = \ 1406 utils.ParseUnit(ddict[constants.IDISK_SIZE]) 1407 except ValueError, err: 1408 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" % 1409 (didx, err), errors.ECODE_INVAL) 1410 elif constants.IDISK_ADOPT in ddict: 1411 if constants.IDISK_SPINDLES in ddict: 1412 raise errors.OpPrereqError("spindles is not a valid option when" 1413 " adopting a disk", errors.ECODE_INVAL) 1414 if mode == constants.INSTANCE_IMPORT: 1415 raise errors.OpPrereqError("Disk adoption not allowed for instance" 1416 " import", errors.ECODE_INVAL) 1417 ddict[constants.IDISK_SIZE] = 0 1418 else: 1419 raise errors.OpPrereqError("Missing size or adoption source for" 1420 " disk %d" % didx, errors.ECODE_INVAL) 1421 if constants.IDISK_SPINDLES in ddict: 1422 ddict[constants.IDISK_SPINDLES] = int(ddict[constants.IDISK_SPINDLES]) 1423 1424 disks[didx] = ddict 1425 1426 if opts.tags is not None: 1427 tags = opts.tags.split(",") 1428 else: 1429 tags = [] 1430 1431 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT) 1432 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES) 1433 FixHvParams(hvparams) 1434 1435 osparams_private = opts.osparams_private or serializer.PrivateDict() 1436 osparams_secret = opts.osparams_secret or serializer.PrivateDict() 1437 1438 helper_startup_timeout = opts.helper_startup_timeout 1439 helper_shutdown_timeout = opts.helper_shutdown_timeout 1440 1441 if mode == constants.INSTANCE_CREATE: 1442 start = opts.start 1443 os_type = opts.os 1444 force_variant = opts.force_variant 1445 src_node = None 1446 src_path = None 1447 no_install = opts.no_install 1448 identify_defaults = False 1449 compress = constants.IEC_NONE 1450 if opts.instance_communication is None: 1451 instance_communication = False 1452 else: 1453 instance_communication = opts.instance_communication 1454 elif mode == constants.INSTANCE_IMPORT: 1455 if forthcoming: 1456 raise errors.OpPrereqError("forthcoming instances can only be created," 1457 " not imported") 1458 start = False 1459 os_type = None 1460 force_variant = False 1461 src_node = opts.src_node 1462 src_path = opts.src_dir 1463 no_install = None 1464 identify_defaults = opts.identify_defaults 1465 compress = opts.compress 1466 instance_communication = False 1467 else: 1468 raise errors.ProgrammerError("Invalid creation mode %s" % mode) 1469 1470 op = opcodes.OpInstanceCreate( 1471 forthcoming=forthcoming, 1472 commit=commit, 1473 instance_name=instance, 1474 disks=disks, 1475 disk_template=opts.disk_template, 1476 group_name=opts.nodegroup, 1477 nics=nics, 1478 conflicts_check=opts.conflicts_check, 1479 pnode=pnode, snode=snode, 1480 ip_check=opts.ip_check, 1481 name_check=opts.name_check, 1482 wait_for_sync=opts.wait_for_sync, 1483 file_storage_dir=opts.file_storage_dir, 1484 file_driver=opts.file_driver, 1485 iallocator=opts.iallocator, 1486 hypervisor=hypervisor, 1487 hvparams=hvparams, 1488 beparams=opts.beparams, 1489 osparams=opts.osparams, 1490 osparams_private=osparams_private, 1491 osparams_secret=osparams_secret, 1492 mode=mode, 1493 opportunistic_locking=opts.opportunistic_locking, 1494 start=start, 1495 os_type=os_type, 1496 force_variant=force_variant, 1497 src_node=src_node, 1498 src_path=src_path, 1499 compress=compress, 1500 tags=tags, 1501 no_install=no_install, 1502 identify_defaults=identify_defaults, 1503 ignore_ipolicy=opts.ignore_ipolicy, 1504 instance_communication=instance_communication, 1505 helper_startup_timeout=helper_startup_timeout, 1506 helper_shutdown_timeout=helper_shutdown_timeout) 1507 1508 SubmitOrSend(op, opts) 1509 return 0
1510
1511 1512 -class _RunWhileDaemonsStoppedHelper(object):
1513 """Helper class for L{RunWhileDaemonsStopped} to simplify state management 1514 1515 """
1516 - def __init__(self, feedback_fn, cluster_name, master_node, 1517 online_nodes, ssh_ports, exclude_daemons, debug, 1518 verbose):
1519 """Initializes this class. 1520 1521 @type feedback_fn: callable 1522 @param feedback_fn: Feedback function 1523 @type cluster_name: string 1524 @param cluster_name: Cluster name 1525 @type master_node: string 1526 @param master_node Master node name 1527 @type online_nodes: list 1528 @param online_nodes: List of names of online nodes 1529 @type ssh_ports: list 1530 @param ssh_ports: List of SSH ports of online nodes 1531 @type exclude_daemons: list of string 1532 @param exclude_daemons: list of daemons that will be restarted on master 1533 after all others are shutdown 1534 @type debug: boolean 1535 @param debug: show debug output 1536 @type verbose: boolesn 1537 @param verbose: show verbose output 1538 1539 """ 1540 self.feedback_fn = feedback_fn 1541 self.cluster_name = cluster_name 1542 self.master_node = master_node 1543 self.online_nodes = online_nodes 1544 self.ssh_ports = dict(zip(online_nodes, ssh_ports)) 1545 1546 self.ssh = ssh.SshRunner(self.cluster_name) 1547 1548 self.nonmaster_nodes = [name for name in online_nodes 1549 if name != master_node] 1550 1551 self.exclude_daemons = exclude_daemons 1552 self.debug = debug 1553 self.verbose = verbose 1554 1555 assert self.master_node not in self.nonmaster_nodes
1556
1557 - def _RunCmd(self, node_name, cmd):
1558 """Runs a command on the local or a remote machine. 1559 1560 @type node_name: string 1561 @param node_name: Machine name 1562 @type cmd: list 1563 @param cmd: Command 1564 1565 """ 1566 if node_name is None or node_name == self.master_node: 1567 # No need to use SSH 1568 result = utils.RunCmd(cmd) 1569 else: 1570 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER, 1571 utils.ShellQuoteArgs(cmd), 1572 port=self.ssh_ports[node_name]) 1573 1574 if result.failed: 1575 errmsg = ["Failed to run command %s" % result.cmd] 1576 if node_name: 1577 errmsg.append("on node %s" % node_name) 1578 errmsg.append(": exitcode %s and error %s" % 1579 (result.exit_code, result.output)) 1580 raise errors.OpExecError(" ".join(errmsg))
1581
1582 - def Call(self, fn, *args):
1583 """Call function while all daemons are stopped. 1584 1585 @type fn: callable 1586 @param fn: Function to be called 1587 1588 """ 1589 # Pause watcher by acquiring an exclusive lock on watcher state file 1590 self.feedback_fn("Blocking watcher") 1591 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE) 1592 try: 1593 # TODO: Currently, this just blocks. There's no timeout. 1594 # TODO: Should it be a shared lock? 1595 watcher_block.Exclusive(blocking=True) 1596 1597 # Stop master daemons, so that no new jobs can come in and all running 1598 # ones are finished 1599 self.feedback_fn("Stopping master daemons") 1600 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"]) 1601 try: 1602 # Stop daemons on all nodes 1603 online_nodes = [self.master_node] + [n for n in self.online_nodes 1604 if n != self.master_node] 1605 for node_name in online_nodes: 1606 self.feedback_fn("Stopping daemons on %s" % node_name) 1607 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"]) 1608 # Starting any daemons listed as exception 1609 if node_name == self.master_node: 1610 for daemon in self.exclude_daemons: 1611 self.feedback_fn("Starting daemon '%s' on %s" % (daemon, 1612 node_name)) 1613 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start", daemon]) 1614 1615 # All daemons are shut down now 1616 try: 1617 return fn(self, *args) 1618 except Exception, err: 1619 _, errmsg = FormatError(err) 1620 logging.exception("Caught exception") 1621 self.feedback_fn(errmsg) 1622 raise 1623 finally: 1624 # Start cluster again, master node last 1625 for node_name in self.nonmaster_nodes + [self.master_node]: 1626 # Stopping any daemons listed as exception. 1627 # This might look unnecessary, but it makes sure that daemon-util 1628 # starts all daemons in the right order. 1629 if node_name == self.master_node: 1630 self.exclude_daemons.reverse() 1631 for daemon in self.exclude_daemons: 1632 self.feedback_fn("Stopping daemon '%s' on %s" % (daemon, 1633 node_name)) 1634 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop", daemon]) 1635 self.feedback_fn("Starting daemons on %s" % node_name) 1636 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"]) 1637 1638 finally: 1639 # Resume watcher 1640 watcher_block.Close()
1641
1642 1643 -def RunWhileDaemonsStopped(feedback_fn, exclude_daemons, fn, *args, **kwargs):
1644 """Calls a function while all cluster daemons are stopped. 1645 1646 @type feedback_fn: callable 1647 @param feedback_fn: Feedback function 1648 @type exclude_daemons: list of string 1649 @param exclude_daemons: list of daemons that stopped, but immediately 1650 restarted on the master to be available when calling 1651 'fn'. If None, all daemons will be stopped and none 1652 will be started before calling 'fn'. 1653 @type fn: callable 1654 @param fn: Function to be called when daemons are stopped 1655 1656 """ 1657 feedback_fn("Gathering cluster information") 1658 1659 # This ensures we're running on the master daemon 1660 cl = GetClient() 1661 1662 (cluster_name, master_node) = \ 1663 cl.QueryConfigValues(["cluster_name", "master_node"]) 1664 1665 online_nodes = GetOnlineNodes([], cl=cl) 1666 ssh_ports = GetNodesSshPorts(online_nodes, cl) 1667 1668 # Don't keep a reference to the client. The master daemon will go away. 1669 del cl 1670 1671 assert master_node in online_nodes 1672 if exclude_daemons is None: 1673 exclude_daemons = [] 1674 1675 debug = kwargs.get("debug", False) 1676 verbose = kwargs.get("verbose", False) 1677 1678 return _RunWhileDaemonsStoppedHelper( 1679 feedback_fn, cluster_name, master_node, online_nodes, ssh_ports, 1680 exclude_daemons, debug, verbose).Call(fn, *args)
1681
1682 1683 -def RunWhileClusterStopped(feedback_fn, fn, *args):
1684 """Calls a function while all cluster daemons are stopped. 1685 1686 @type feedback_fn: callable 1687 @param feedback_fn: Feedback function 1688 @type fn: callable 1689 @param fn: Function to be called when daemons are stopped 1690 1691 """ 1692 RunWhileDaemonsStopped(feedback_fn, None, fn, *args)
1693
1694 1695 -def GenerateTable(headers, fields, separator, data, 1696 numfields=None, unitfields=None, 1697 units=None):
1698 """Prints a table with headers and different fields. 1699 1700 @type headers: dict 1701 @param headers: dictionary mapping field names to headers for 1702 the table 1703 @type fields: list 1704 @param fields: the field names corresponding to each row in 1705 the data field 1706 @param separator: the separator to be used; if this is None, 1707 the default 'smart' algorithm is used which computes optimal 1708 field width, otherwise just the separator is used between 1709 each field 1710 @type data: list 1711 @param data: a list of lists, each sublist being one row to be output 1712 @type numfields: list 1713 @param numfields: a list with the fields that hold numeric 1714 values and thus should be right-aligned 1715 @type unitfields: list 1716 @param unitfields: a list with the fields that hold numeric 1717 values that should be formatted with the units field 1718 @type units: string or None 1719 @param units: the units we should use for formatting, or None for 1720 automatic choice (human-readable for non-separator usage, otherwise 1721 megabytes); this is a one-letter string 1722 1723 """ 1724 if units is None: 1725 if separator: 1726 units = "m" 1727 else: 1728 units = "h" 1729 1730 if numfields is None: 1731 numfields = [] 1732 if unitfields is None: 1733 unitfields = [] 1734 1735 numfields = utils.FieldSet(*numfields) # pylint: disable=W0142 1736 unitfields = utils.FieldSet(*unitfields) # pylint: disable=W0142 1737 1738 format_fields = [] 1739 for field in fields: 1740 if headers and field not in headers: 1741 # TODO: handle better unknown fields (either revert to old 1742 # style of raising exception, or deal more intelligently with 1743 # variable fields) 1744 headers[field] = field 1745 if separator is not None: 1746 format_fields.append("%s") 1747 elif numfields.Matches(field): 1748 format_fields.append("%*s") 1749 else: 1750 format_fields.append("%-*s") 1751 1752 if separator is None: 1753 mlens = [0 for name in fields] 1754 format_str = " ".join(format_fields) 1755 else: 1756 format_str = separator.replace("%", "%%").join(format_fields) 1757 1758 for row in data: 1759 if row is None: 1760 continue 1761 for idx, val in enumerate(row): 1762 if unitfields.Matches(fields[idx]): 1763 try: 1764 val = int(val) 1765 except (TypeError, ValueError): 1766 pass 1767 else: 1768 val = row[idx] = utils.FormatUnit(val, units) 1769 val = row[idx] = str(val) 1770 if separator is None: 1771 mlens[idx] = max(mlens[idx], len(val)) 1772 1773 result = [] 1774 if headers: 1775 args = [] 1776 for idx, name in enumerate(fields): 1777 hdr = headers[name] 1778 if separator is None: 1779 mlens[idx] = max(mlens[idx], len(hdr)) 1780 args.append(mlens[idx]) 1781 args.append(hdr) 1782 result.append(format_str % tuple(args)) 1783 1784 if separator is None: 1785 assert len(mlens) == len(fields) 1786 1787 if fields and not numfields.Matches(fields[-1]): 1788 mlens[-1] = 0 1789 1790 for line in data: 1791 args = [] 1792 if line is None: 1793 line = ["-" for _ in fields] 1794 for idx in range(len(fields)): 1795 if separator is None: 1796 args.append(mlens[idx]) 1797 args.append(line[idx]) 1798 result.append(format_str % tuple(args)) 1799 1800 return result
1801
1802 1803 -def _FormatBool(value):
1804 """Formats a boolean value as a string. 1805 1806 """ 1807 if value: 1808 return "Y" 1809 return "N"
1810 1811 1812 #: Default formatting for query results; (callback, align right) 1813 _DEFAULT_FORMAT_QUERY = { 1814 constants.QFT_TEXT: (str, False), 1815 constants.QFT_BOOL: (_FormatBool, False), 1816 constants.QFT_NUMBER: (str, True), 1817 constants.QFT_NUMBER_FLOAT: (str, True), 1818 constants.QFT_TIMESTAMP: (utils.FormatTime, False), 1819 constants.QFT_OTHER: (str, False), 1820 constants.QFT_UNKNOWN: (str, False), 1821 }
1822 1823 1824 -def _GetColumnFormatter(fdef, override, unit):
1825 """Returns formatting function for a field. 1826 1827 @type fdef: L{objects.QueryFieldDefinition} 1828 @type override: dict 1829 @param override: Dictionary for overriding field formatting functions, 1830 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 1831 @type unit: string 1832 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} 1833 @rtype: tuple; (callable, bool) 1834 @return: Returns the function to format a value (takes one parameter) and a 1835 boolean for aligning the value on the right-hand side 1836 1837 """ 1838 fmt = override.get(fdef.name, None) 1839 if fmt is not None: 1840 return fmt 1841 1842 assert constants.QFT_UNIT not in _DEFAULT_FORMAT_QUERY 1843 1844 if fdef.kind == constants.QFT_UNIT: 1845 # Can't keep this information in the static dictionary 1846 return (lambda value: utils.FormatUnit(value, unit), True) 1847 1848 fmt = _DEFAULT_FORMAT_QUERY.get(fdef.kind, None) 1849 if fmt is not None: 1850 return fmt 1851 1852 raise NotImplementedError("Can't format column type '%s'" % fdef.kind)
1853
1854 1855 -class _QueryColumnFormatter(object):
1856 """Callable class for formatting fields of a query. 1857 1858 """
1859 - def __init__(self, fn, status_fn, verbose):
1860 """Initializes this class. 1861 1862 @type fn: callable 1863 @param fn: Formatting function 1864 @type status_fn: callable 1865 @param status_fn: Function to report fields' status 1866 @type verbose: boolean 1867 @param verbose: whether to use verbose field descriptions or not 1868 1869 """ 1870 self._fn = fn 1871 self._status_fn = status_fn 1872 self._verbose = verbose
1873
1874 - def __call__(self, data):
1875 """Returns a field's string representation. 1876 1877 """ 1878 (status, value) = data 1879 1880 # Report status 1881 self._status_fn(status) 1882 1883 if status == constants.RS_NORMAL: 1884 return self._fn(value) 1885 1886 assert value is None, \ 1887 "Found value %r for abnormal status %s" % (value, status) 1888 1889 return FormatResultError(status, self._verbose)
1890
1891 1892 -def FormatResultError(status, verbose):
1893 """Formats result status other than L{constants.RS_NORMAL}. 1894 1895 @param status: The result status 1896 @type verbose: boolean 1897 @param verbose: Whether to return the verbose text 1898 @return: Text of result status 1899 1900 """ 1901 assert status != constants.RS_NORMAL, \ 1902 "FormatResultError called with status equal to constants.RS_NORMAL" 1903 try: 1904 (verbose_text, normal_text) = constants.RSS_DESCRIPTION[status] 1905 except KeyError: 1906 raise NotImplementedError("Unknown status %s" % status) 1907 else: 1908 if verbose: 1909 return verbose_text 1910 return normal_text
1911
1912 1913 -def FormatQueryResult(result, unit=None, format_override=None, separator=None, 1914 header=False, verbose=False):
1915 """Formats data in L{objects.QueryResponse}. 1916 1917 @type result: L{objects.QueryResponse} 1918 @param result: result of query operation 1919 @type unit: string 1920 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT}, 1921 see L{utils.text.FormatUnit} 1922 @type format_override: dict 1923 @param format_override: Dictionary for overriding field formatting functions, 1924 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 1925 @type separator: string or None 1926 @param separator: String used to separate fields 1927 @type header: bool 1928 @param header: Whether to output header row 1929 @type verbose: boolean 1930 @param verbose: whether to use verbose field descriptions or not 1931 1932 """ 1933 if unit is None: 1934 if separator: 1935 unit = "m" 1936 else: 1937 unit = "h" 1938 1939 if format_override is None: 1940 format_override = {} 1941 1942 stats = dict.fromkeys(constants.RS_ALL, 0) 1943 1944 def _RecordStatus(status): 1945 if status in stats: 1946 stats[status] += 1
1947 1948 columns = [] 1949 for fdef in result.fields: 1950 assert fdef.title and fdef.name 1951 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit) 1952 columns.append(TableColumn(fdef.title, 1953 _QueryColumnFormatter(fn, _RecordStatus, 1954 verbose), 1955 align_right)) 1956 1957 table = FormatTable(result.data, columns, header, separator) 1958 1959 # Collect statistics 1960 assert len(stats) == len(constants.RS_ALL) 1961 assert compat.all(count >= 0 for count in stats.values()) 1962 1963 # Determine overall status. If there was no data, unknown fields must be 1964 # detected via the field definitions. 1965 if (stats[constants.RS_UNKNOWN] or 1966 (not result.data and _GetUnknownFields(result.fields))): 1967 status = QR_UNKNOWN 1968 elif compat.any(count > 0 for key, count in stats.items() 1969 if key != constants.RS_NORMAL): 1970 status = QR_INCOMPLETE 1971 else: 1972 status = QR_NORMAL 1973 1974 return (status, table) 1975
1976 1977 -def _GetUnknownFields(fdefs):
1978 """Returns list of unknown fields included in C{fdefs}. 1979 1980 @type fdefs: list of L{objects.QueryFieldDefinition} 1981 1982 """ 1983 return [fdef for fdef in fdefs 1984 if fdef.kind == constants.QFT_UNKNOWN]
1985
1986 1987 -def _WarnUnknownFields(fdefs):
1988 """Prints a warning to stderr if a query included unknown fields. 1989 1990 @type fdefs: list of L{objects.QueryFieldDefinition} 1991 1992 """ 1993 unknown = _GetUnknownFields(fdefs) 1994 if unknown: 1995 ToStderr("Warning: Queried for unknown fields %s", 1996 utils.CommaJoin(fdef.name for fdef in unknown)) 1997 return True 1998 1999 return False
2000
2001 2002 -def GenericList(resource, fields, names, unit, separator, header, cl=None, 2003 format_override=None, verbose=False, force_filter=False, 2004 namefield=None, qfilter=None, isnumeric=False):
2005 """Generic implementation for listing all items of a resource. 2006 2007 @param resource: One of L{constants.QR_VIA_LUXI} 2008 @type fields: list of strings 2009 @param fields: List of fields to query for 2010 @type names: list of strings 2011 @param names: Names of items to query for 2012 @type unit: string or None 2013 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or 2014 None for automatic choice (human-readable for non-separator usage, 2015 otherwise megabytes); this is a one-letter string 2016 @type separator: string or None 2017 @param separator: String used to separate fields 2018 @type header: bool 2019 @param header: Whether to show header row 2020 @type force_filter: bool 2021 @param force_filter: Whether to always treat names as filter 2022 @type format_override: dict 2023 @param format_override: Dictionary for overriding field formatting functions, 2024 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY} 2025 @type verbose: boolean 2026 @param verbose: whether to use verbose field descriptions or not 2027 @type namefield: string 2028 @param namefield: Name of field to use for simple filters (see 2029 L{qlang.MakeFilter} for details) 2030 @type qfilter: list or None 2031 @param qfilter: Query filter (in addition to names) 2032 @param isnumeric: bool 2033 @param isnumeric: Whether the namefield's type is numeric, and therefore 2034 any simple filters built by namefield should use integer values to 2035 reflect that 2036 2037 """ 2038 if not names: 2039 names = None 2040 2041 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield, 2042 isnumeric=isnumeric) 2043 2044 if qfilter is None: 2045 qfilter = namefilter 2046 elif namefilter is not None: 2047 qfilter = [qlang.OP_AND, namefilter, qfilter] 2048 2049 if cl is None: 2050 cl = GetClient() 2051 2052 response = cl.Query(resource, fields, qfilter) 2053 2054 found_unknown = _WarnUnknownFields(response.fields) 2055 2056 (status, data) = FormatQueryResult(response, unit=unit, separator=separator, 2057 header=header, 2058 format_override=format_override, 2059 verbose=verbose) 2060 2061 for line in data: 2062 ToStdout(line) 2063 2064 assert ((found_unknown and status == QR_UNKNOWN) or 2065 (not found_unknown and status != QR_UNKNOWN)) 2066 2067 if status == QR_UNKNOWN: 2068 return constants.EXIT_UNKNOWN_FIELD 2069 2070 # TODO: Should the list command fail if not all data could be collected? 2071 return constants.EXIT_SUCCESS
2072
2073 2074 -def _FieldDescValues(fdef):
2075 """Helper function for L{GenericListFields} to get query field description. 2076 2077 @type fdef: L{objects.QueryFieldDefinition} 2078 @rtype: list 2079 2080 """ 2081 return [ 2082 fdef.name, 2083 _QFT_NAMES.get(fdef.kind, fdef.kind), 2084 fdef.title, 2085 fdef.doc, 2086 ]
2087
2088 2089 -def GenericListFields(resource, fields, separator, header, cl=None):
2090 """Generic implementation for listing fields for a resource. 2091 2092 @param resource: One of L{constants.QR_VIA_LUXI} 2093 @type fields: list of strings 2094 @param fields: List of fields to query for 2095 @type separator: string or None 2096 @param separator: String used to separate fields 2097 @type header: bool 2098 @param header: Whether to show header row 2099 2100 """ 2101 if cl is None: 2102 cl = GetClient() 2103 2104 if not fields: 2105 fields = None 2106 2107 response = cl.QueryFields(resource, fields) 2108 2109 found_unknown = _WarnUnknownFields(response.fields) 2110 2111 columns = [ 2112 TableColumn("Name", str, False), 2113 TableColumn("Type", str, False), 2114 TableColumn("Title", str, False), 2115 TableColumn("Description", str, False), 2116 ] 2117 2118 rows = map(_FieldDescValues, response.fields) 2119 2120 for line in FormatTable(rows, columns, header, separator): 2121 ToStdout(line) 2122 2123 if found_unknown: 2124 return constants.EXIT_UNKNOWN_FIELD 2125 2126 return constants.EXIT_SUCCESS
2127
2128 2129 -class TableColumn(object):
2130 """Describes a column for L{FormatTable}. 2131 2132 """
2133 - def __init__(self, title, fn, align_right):
2134 """Initializes this class. 2135 2136 @type title: string 2137 @param title: Column title 2138 @type fn: callable 2139 @param fn: Formatting function 2140 @type align_right: bool 2141 @param align_right: Whether to align values on the right-hand side 2142 2143 """ 2144 self.title = title 2145 self.format = fn 2146 self.align_right = align_right
2147
2148 2149 -def _GetColFormatString(width, align_right):
2150 """Returns the format string for a field. 2151 2152 """ 2153 if align_right: 2154 sign = "" 2155 else: 2156 sign = "-" 2157 2158 return "%%%s%ss" % (sign, width)
2159
2160 2161 -def FormatTable(rows, columns, header, separator):
2162 """Formats data as a table. 2163 2164 @type rows: list of lists 2165 @param rows: Row data, one list per row 2166 @type columns: list of L{TableColumn} 2167 @param columns: Column descriptions 2168 @type header: bool 2169 @param header: Whether to show header row 2170 @type separator: string or None 2171 @param separator: String used to separate columns 2172 2173 """ 2174 if header: 2175 data = [[col.title for col in columns]] 2176 colwidth = [len(col.title) for col in columns] 2177 else: 2178 data = [] 2179 colwidth = [0 for _ in columns] 2180 2181 # Format row data 2182 for row in rows: 2183 assert len(row) == len(columns) 2184 2185 formatted = [col.format(value) for value, col in zip(row, columns)] 2186 2187 if separator is None: 2188 # Update column widths 2189 for idx, (oldwidth, value) in enumerate(zip(colwidth, formatted)): 2190 # Modifying a list's items while iterating is fine 2191 colwidth[idx] = max(oldwidth, len(value)) 2192 2193 data.append(formatted) 2194 2195 if separator is not None: 2196 # Return early if a separator is used 2197 return [separator.join(row) for row in data] 2198 2199 if columns and not columns[-1].align_right: 2200 # Avoid unnecessary spaces at end of line 2201 colwidth[-1] = 0 2202 2203 # Build format string 2204 fmt = " ".join([_GetColFormatString(width, col.align_right) 2205 for col, width in zip(columns, colwidth)]) 2206 2207 return [fmt % tuple(row) for row in data]
2208
2209 2210 -def FormatTimestamp(ts):
2211 """Formats a given timestamp. 2212 2213 @type ts: timestamp 2214 @param ts: a timeval-type timestamp, a tuple of seconds and microseconds 2215 2216 @rtype: string 2217 @return: a string with the formatted timestamp 2218 2219 """ 2220 if not isinstance(ts, (tuple, list)) or len(ts) != 2: 2221 return "?" 2222 2223 (sec, usecs) = ts 2224 return utils.FormatTime(sec, usecs=usecs)
2225
2226 2227 -def ParseTimespec(value):
2228 """Parse a time specification. 2229 2230 The following suffixed will be recognized: 2231 2232 - s: seconds 2233 - m: minutes 2234 - h: hours 2235 - d: day 2236 - w: weeks 2237 2238 Without any suffix, the value will be taken to be in seconds. 2239 2240 """ 2241 value = str(value) 2242 if not value: 2243 raise errors.OpPrereqError("Empty time specification passed", 2244 errors.ECODE_INVAL) 2245 suffix_map = { 2246 "s": 1, 2247 "m": 60, 2248 "h": 3600, 2249 "d": 86400, 2250 "w": 604800, 2251 } 2252 if value[-1] not in suffix_map: 2253 try: 2254 value = int(value) 2255 except (TypeError, ValueError): 2256 raise errors.OpPrereqError("Invalid time specification '%s'" % value, 2257 errors.ECODE_INVAL) 2258 else: 2259 multiplier = suffix_map[value[-1]] 2260 value = value[:-1] 2261 if not value: # no data left after stripping the suffix 2262 raise errors.OpPrereqError("Invalid time specification (only" 2263 " suffix passed)", errors.ECODE_INVAL) 2264 try: 2265 value = int(value) * multiplier 2266 except (TypeError, ValueError): 2267 raise errors.OpPrereqError("Invalid time specification '%s'" % value, 2268 errors.ECODE_INVAL) 2269 return value
2270
2271 2272 -def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False, 2273 filter_master=False, nodegroup=None):
2274 """Returns the names of online nodes. 2275 2276 This function will also log a warning on stderr with the names of 2277 the online nodes. 2278 2279 @param nodes: if not empty, use only this subset of nodes (minus the 2280 offline ones) 2281 @param cl: if not None, luxi client to use 2282 @type nowarn: boolean 2283 @param nowarn: by default, this function will output a note with the 2284 offline nodes that are skipped; if this parameter is True the 2285 note is not displayed 2286 @type secondary_ips: boolean 2287 @param secondary_ips: if True, return the secondary IPs instead of the 2288 names, useful for doing network traffic over the replication interface 2289 (if any) 2290 @type filter_master: boolean 2291 @param filter_master: if True, do not return the master node in the list 2292 (useful in coordination with secondary_ips where we cannot check our 2293 node name against the list) 2294 @type nodegroup: string 2295 @param nodegroup: If set, only return nodes in this node group 2296 2297 """ 2298 if cl is None: 2299 cl = GetClient() 2300 2301 qfilter = [] 2302 2303 if nodes: 2304 qfilter.append(qlang.MakeSimpleFilter("name", nodes)) 2305 2306 if nodegroup is not None: 2307 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup], 2308 [qlang.OP_EQUAL, "group.uuid", nodegroup]]) 2309 2310 if filter_master: 2311 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]]) 2312 2313 if qfilter: 2314 if len(qfilter) > 1: 2315 final_filter = [qlang.OP_AND] + qfilter 2316 else: 2317 assert len(qfilter) == 1 2318 final_filter = qfilter[0] 2319 else: 2320 final_filter = None 2321 2322 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter) 2323 2324 def _IsOffline(row): 2325 (_, (_, offline), _) = row 2326 return offline
2327 2328 def _GetName(row): 2329 ((_, name), _, _) = row 2330 return name 2331 2332 def _GetSip(row): 2333 (_, _, (_, sip)) = row 2334 return sip 2335 2336 (offline, online) = compat.partition(result.data, _IsOffline) 2337 2338 if offline and not nowarn: 2339 ToStderr("Note: skipping offline node(s): %s" % 2340 utils.CommaJoin(map(_GetName, offline))) 2341 2342 if secondary_ips: 2343 fn = _GetSip 2344 else: 2345 fn = _GetName 2346 2347 return map(fn, online) 2348
2349 2350 -def GetNodesSshPorts(nodes, cl):
2351 """Retrieves SSH ports of given nodes. 2352 2353 @param nodes: the names of nodes 2354 @type nodes: a list of strings 2355 @param cl: a client to use for the query 2356 @type cl: L{ganeti.luxi.Client} 2357 @return: the list of SSH ports corresponding to the nodes 2358 @rtype: a list of tuples 2359 2360 """ 2361 return map(lambda t: t[0], 2362 cl.QueryNodes(names=nodes, 2363 fields=["ndp/ssh_port"], 2364 use_locking=False))
2365
2366 2367 -def GetNodeUUIDs(nodes, cl):
2368 """Retrieves the UUIDs of given nodes. 2369 2370 @param nodes: the names of nodes 2371 @type nodes: a list of string 2372 @param cl: a client to use for the query 2373 @type cl: L{ganeti.luxi.Client} 2374 @return: the list of UUIDs corresponding to the nodes 2375 @rtype: a list of tuples 2376 2377 """ 2378 return map(lambda t: t[0], 2379 cl.QueryNodes(names=nodes, 2380 fields=["uuid"], 2381 use_locking=False))
2382
2383 2384 -def _ToStream(stream, txt, *args):
2385 """Write a message to a stream, bypassing the logging system 2386 2387 @type stream: file object 2388 @param stream: the file to which we should write 2389 @type txt: str 2390 @param txt: the message 2391 2392 """ 2393 try: 2394 if args: 2395 args = tuple(args) 2396 stream.write(txt % args) 2397 else: 2398 stream.write(txt) 2399 stream.write("\n") 2400 stream.flush() 2401 except IOError, err: 2402 if err.errno == errno.EPIPE: 2403 # our terminal went away, we'll exit 2404 sys.exit(constants.EXIT_FAILURE) 2405 else: 2406 raise
2407
2408 2409 -def ToStdout(txt, *args):
2410 """Write a message to stdout only, bypassing the logging system 2411 2412 This is just a wrapper over _ToStream. 2413 2414 @type txt: str 2415 @param txt: the message 2416 2417 """ 2418 _ToStream(sys.stdout, txt, *args)
2419
2420 2421 -def ToStdoutAndLoginfo(txt, *args):
2422 """Write a message to stdout and additionally log it at INFO level""" 2423 ToStdout(txt, *args) 2424 logging.info(txt, *args)
2425
2426 2427 -def ToStderr(txt, *args):
2428 """Write a message to stderr only, bypassing the logging system 2429 2430 This is just a wrapper over _ToStream. 2431 2432 @type txt: str 2433 @param txt: the message 2434 2435 """ 2436 _ToStream(sys.stderr, txt, *args)
2437
2438 2439 -class JobExecutor(object):
2440 """Class which manages the submission and execution of multiple jobs. 2441 2442 Note that instances of this class should not be reused between 2443 GetResults() calls. 2444 2445 """
2446 - def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2447 self.queue = [] 2448 if cl is None: 2449 cl = GetClient() 2450 self.cl = cl 2451 self.verbose = verbose 2452 self.jobs = [] 2453 self.opts = opts 2454 self.feedback_fn = feedback_fn 2455 self._counter = itertools.count()
2456 2457 @staticmethod
2458 - def _IfName(name, fmt):
2459 """Helper function for formatting name. 2460 2461 """ 2462 if name: 2463 return fmt % name 2464 2465 return ""
2466
2467 - def QueueJob(self, name, *ops):
2468 """Record a job for later submit. 2469 2470 @type name: string 2471 @param name: a description of the job, will be used in WaitJobSet 2472 2473 """ 2474 SetGenericOpcodeOpts(ops, self.opts) 2475 self.queue.append((self._counter.next(), name, ops))
2476
2477 - def AddJobId(self, name, status, job_id):
2478 """Adds a job ID to the internal queue. 2479 2480 """ 2481 self.jobs.append((self._counter.next(), status, job_id, name))
2482
2483 - def SubmitPending(self, each=False):
2484 """Submit all pending jobs. 2485 2486 """ 2487 if each: 2488 results = [] 2489 for (_, _, ops) in self.queue: 2490 # SubmitJob will remove the success status, but raise an exception if 2491 # the submission fails, so we'll notice that anyway. 2492 results.append([True, self.cl.SubmitJob(ops)[0]]) 2493 else: 2494 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue]) 2495 for ((status, data), (idx, name, _)) in zip(results, self.queue): 2496 self.jobs.append((idx, status, data, name))
2497
2498 - def _ChooseJob(self):
2499 """Choose a non-waiting/queued job to poll next. 2500 2501 """ 2502 assert self.jobs, "_ChooseJob called with empty job list" 2503 2504 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]], 2505 ["status"]) 2506 assert result 2507 2508 for job_data, status in zip(self.jobs, result): 2509 if (isinstance(status, list) and status and 2510 status[0] in (constants.JOB_STATUS_QUEUED, 2511 constants.JOB_STATUS_WAITING, 2512 constants.JOB_STATUS_CANCELING)): 2513 # job is still present and waiting 2514 continue 2515 # good candidate found (either running job or lost job) 2516 self.jobs.remove(job_data) 2517 return job_data 2518 2519 # no job found 2520 return self.jobs.pop(0)
2521
2522 - def GetResults(self):
2523 """Wait for and return the results of all jobs. 2524 2525 @rtype: list 2526 @return: list of tuples (success, job results), in the same order 2527 as the submitted jobs; if a job has failed, instead of the result 2528 there will be the error message 2529 2530 """ 2531 if not self.jobs: 2532 self.SubmitPending() 2533 results = [] 2534 if self.verbose: 2535 ok_jobs = [row[2] for row in self.jobs if row[1]] 2536 if ok_jobs: 2537 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs)) 2538 2539 # first, remove any non-submitted jobs 2540 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1]) 2541 for idx, _, jid, name in failures: 2542 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid) 2543 results.append((idx, False, jid)) 2544 2545 while self.jobs: 2546 (idx, _, jid, name) = self._ChooseJob() 2547 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s")) 2548 try: 2549 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn) 2550 success = True 2551 except errors.JobLost, err: 2552 _, job_result = FormatError(err) 2553 ToStderr("Job %s%s has been archived, cannot check its result", 2554 jid, self._IfName(name, " for %s")) 2555 success = False 2556 except (errors.GenericError, rpcerr.ProtocolError), err: 2557 _, job_result = FormatError(err) 2558 success = False 2559 # the error message will always be shown, verbose or not 2560 ToStderr("Job %s%s has failed: %s", 2561 jid, self._IfName(name, " for %s"), job_result) 2562 2563 results.append((idx, success, job_result)) 2564 2565 # sort based on the index, then drop it 2566 results.sort() 2567 results = [i[1:] for i in results] 2568 2569 return results
2570
2571 - def WaitOrShow(self, wait):
2572 """Wait for job results or only print the job IDs. 2573 2574 @type wait: boolean 2575 @param wait: whether to wait or not 2576 2577 """ 2578 if wait: 2579 return self.GetResults() 2580 else: 2581 if not self.jobs: 2582 self.SubmitPending() 2583 for _, status, result, name in self.jobs: 2584 if status: 2585 ToStdout("%s: %s", result, name) 2586 else: 2587 ToStderr("Failure for %s: %s", name, result) 2588 return [row[1:3] for row in self.jobs]
2589
2590 2591 -def FormatParamsDictInfo(param_dict, actual, roman=False):
2592 """Formats a parameter dictionary. 2593 2594 @type param_dict: dict 2595 @param param_dict: the own parameters 2596 @type actual: dict 2597 @param actual: the current parameter set (including defaults) 2598 @rtype: dict 2599 @return: dictionary where the value of each parameter is either a fully 2600 formatted string or a dictionary containing formatted strings 2601 2602 """ 2603 ret = {} 2604 for (key, data) in actual.items(): 2605 if isinstance(data, dict) and data: 2606 ret[key] = FormatParamsDictInfo(param_dict.get(key, {}), data, roman) 2607 else: 2608 default_str = "default (%s)" % compat.TryToRoman(data, roman) 2609 ret[key] = str(compat.TryToRoman(param_dict.get(key, default_str), roman)) 2610 return ret
2611
2612 2613 -def _FormatListInfoDefault(data, def_data):
2614 if data is not None: 2615 ret = utils.CommaJoin(data) 2616 else: 2617 ret = "default (%s)" % utils.CommaJoin(def_data) 2618 return ret
2619
2620 2621 -def FormatPolicyInfo(custom_ipolicy, eff_ipolicy, iscluster, roman=False):
2622 """Formats an instance policy. 2623 2624 @type custom_ipolicy: dict 2625 @param custom_ipolicy: own policy 2626 @type eff_ipolicy: dict 2627 @param eff_ipolicy: effective policy (including defaults); ignored for 2628 cluster 2629 @type iscluster: bool 2630 @param iscluster: the policy is at cluster level 2631 @type roman: bool 2632 @param roman: whether to print the values in roman numerals 2633 @rtype: list of pairs 2634 @return: formatted data, suitable for L{PrintGenericInfo} 2635 2636 """ 2637 if iscluster: 2638 eff_ipolicy = custom_ipolicy 2639 2640 minmax_out = [] 2641 custom_minmax = custom_ipolicy.get(constants.ISPECS_MINMAX) 2642 if custom_minmax: 2643 for (k, minmax) in enumerate(custom_minmax): 2644 minmax_out.append([ 2645 ("%s/%s" % (key, k), 2646 FormatParamsDictInfo(minmax[key], minmax[key], roman)) 2647 for key in constants.ISPECS_MINMAX_KEYS 2648 ]) 2649 else: 2650 for (k, minmax) in enumerate(eff_ipolicy[constants.ISPECS_MINMAX]): 2651 minmax_out.append([ 2652 ("%s/%s" % (key, k), 2653 FormatParamsDictInfo({}, minmax[key], roman)) 2654 for key in constants.ISPECS_MINMAX_KEYS 2655 ]) 2656 ret = [("bounds specs", minmax_out)] 2657 2658 if iscluster: 2659 stdspecs = custom_ipolicy[constants.ISPECS_STD] 2660 ret.append( 2661 (constants.ISPECS_STD, 2662 FormatParamsDictInfo(stdspecs, stdspecs, roman)) 2663 ) 2664 2665 ret.append( 2666 ("allowed disk templates", 2667 _FormatListInfoDefault(custom_ipolicy.get(constants.IPOLICY_DTS), 2668 eff_ipolicy[constants.IPOLICY_DTS])) 2669 ) 2670 to_roman = compat.TryToRoman 2671 ret.extend([ 2672 (key, str(to_roman(custom_ipolicy.get(key, 2673 "default (%s)" % eff_ipolicy[key]), 2674 roman))) 2675 for key in constants.IPOLICY_PARAMETERS 2676 ]) 2677 return ret
2678
2679 2680 -def _PrintSpecsParameters(buf, specs):
2681 values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items())) 2682 buf.write(",".join(values))
2683
2684 2685 -def PrintIPolicyCommand(buf, ipolicy, isgroup):
2686 """Print the command option used to generate the given instance policy. 2687 2688 Currently only the parts dealing with specs are supported. 2689 2690 @type buf: StringIO 2691 @param buf: stream to write into 2692 @type ipolicy: dict 2693 @param ipolicy: instance policy 2694 @type isgroup: bool 2695 @param isgroup: whether the policy is at group level 2696 2697 """ 2698 if not isgroup: 2699 stdspecs = ipolicy.get("std") 2700 if stdspecs: 2701 buf.write(" %s " % IPOLICY_STD_SPECS_STR) 2702 _PrintSpecsParameters(buf, stdspecs) 2703 minmaxes = ipolicy.get("minmax", []) 2704 first = True 2705 for minmax in minmaxes: 2706 minspecs = minmax.get("min") 2707 maxspecs = minmax.get("max") 2708 if minspecs and maxspecs: 2709 if first: 2710 buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR) 2711 first = False 2712 else: 2713 buf.write("//") 2714 buf.write("min:") 2715 _PrintSpecsParameters(buf, minspecs) 2716 buf.write("/max:") 2717 _PrintSpecsParameters(buf, maxspecs)
2718
2719 2720 -def ConfirmOperation(names, list_type, text, extra=""):
2721 """Ask the user to confirm an operation on a list of list_type. 2722 2723 This function is used to request confirmation for doing an operation 2724 on a given list of list_type. 2725 2726 @type names: list 2727 @param names: the list of names that we display when 2728 we ask for confirmation 2729 @type list_type: str 2730 @param list_type: Human readable name for elements in the list (e.g. nodes) 2731 @type text: str 2732 @param text: the operation that the user should confirm 2733 @rtype: boolean 2734 @return: True or False depending on user's confirmation. 2735 2736 """ 2737 count = len(names) 2738 msg = ("The %s will operate on %d %s.\n%s" 2739 "Do you want to continue?" % (text, count, list_type, extra)) 2740 affected = (("\nAffected %s:\n" % list_type) + 2741 "\n".join([" %s" % name for name in names])) 2742 2743 choices = [("y", True, "Yes, execute the %s" % text), 2744 ("n", False, "No, abort the %s" % text)] 2745 2746 if count > 20: 2747 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type)) 2748 question = msg 2749 else: 2750 question = msg + affected 2751 2752 choice = AskUser(question, choices) 2753 if choice == "v": 2754 choices.pop(1) 2755 choice = AskUser(msg + affected, choices) 2756 return choice
2757
2758 2759 -def _MaybeParseUnit(elements):
2760 """Parses and returns an array of potential values with units. 2761 2762 """ 2763 parsed = {} 2764 for k, v in elements.items(): 2765 if v == constants.VALUE_DEFAULT: 2766 parsed[k] = v 2767 else: 2768 parsed[k] = utils.ParseUnit(v) 2769 return parsed
2770
2771 2772 -def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count, 2773 ispecs_disk_count, ispecs_disk_size, 2774 ispecs_nic_count, group_ipolicy, fill_all):
2775 try: 2776 if ispecs_mem_size: 2777 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size) 2778 if ispecs_disk_size: 2779 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size) 2780 except (TypeError, ValueError, errors.UnitParseError), err: 2781 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size" 2782 " in policy: %s" % 2783 (ispecs_disk_size, ispecs_mem_size, err), 2784 errors.ECODE_INVAL) 2785 2786 # prepare ipolicy dict 2787 ispecs_transposed = { 2788 constants.ISPEC_MEM_SIZE: ispecs_mem_size, 2789 constants.ISPEC_CPU_COUNT: ispecs_cpu_count, 2790 constants.ISPEC_DISK_COUNT: ispecs_disk_count, 2791 constants.ISPEC_DISK_SIZE: ispecs_disk_size, 2792 constants.ISPEC_NIC_COUNT: ispecs_nic_count, 2793 } 2794 2795 # first, check that the values given are correct 2796 if group_ipolicy: 2797 forced_type = TISPECS_GROUP_TYPES 2798 else: 2799 forced_type = TISPECS_CLUSTER_TYPES 2800 for specs in ispecs_transposed.values(): 2801 assert type(specs) is dict 2802 utils.ForceDictType(specs, forced_type) 2803 2804 # then transpose 2805 ispecs = { 2806 constants.ISPECS_MIN: {}, 2807 constants.ISPECS_MAX: {}, 2808 constants.ISPECS_STD: {}, 2809 } 2810 for (name, specs) in ispecs_transposed.iteritems(): 2811 assert name in constants.ISPECS_PARAMETERS 2812 for key, val in specs.items(): # {min: .. ,max: .., std: ..} 2813 assert key in ispecs 2814 ispecs[key][name] = val 2815 minmax_out = {} 2816 for key in constants.ISPECS_MINMAX_KEYS: 2817 if fill_all: 2818 minmax_out[key] = \ 2819 objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key]) 2820 else: 2821 minmax_out[key] = ispecs[key] 2822 ipolicy[constants.ISPECS_MINMAX] = [minmax_out] 2823 if fill_all: 2824 ipolicy[constants.ISPECS_STD] = \ 2825 objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD], 2826 ispecs[constants.ISPECS_STD]) 2827 else: 2828 ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
2829
2830 2831 -def _ParseSpecUnit(spec, keyname):
2832 ret = spec.copy() 2833 for k in [constants.ISPEC_DISK_SIZE, constants.ISPEC_MEM_SIZE]: 2834 if k in ret: 2835 try: 2836 ret[k] = utils.ParseUnit(ret[k]) 2837 except (TypeError, ValueError, errors.UnitParseError), err: 2838 raise errors.OpPrereqError(("Invalid parameter %s (%s) in %s instance" 2839 " specs: %s" % (k, ret[k], keyname, err)), 2840 errors.ECODE_INVAL) 2841 return ret
2842
2843 2844 -def _ParseISpec(spec, keyname, required):
2845 ret = _ParseSpecUnit(spec, keyname) 2846 utils.ForceDictType(ret, constants.ISPECS_PARAMETER_TYPES) 2847 missing = constants.ISPECS_PARAMETERS - frozenset(ret.keys()) 2848 if required and missing: 2849 raise errors.OpPrereqError("Missing parameters in ipolicy spec %s: %s" % 2850 (keyname, utils.CommaJoin(missing)), 2851 errors.ECODE_INVAL) 2852 return ret
2853
2854 2855 -def _GetISpecsInAllowedValues(minmax_ispecs, allowed_values):
2856 ret = None 2857 if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and 2858 len(minmax_ispecs[0]) == 1): 2859 for (key, spec) in minmax_ispecs[0].items(): 2860 # This loop is executed exactly once 2861 if key in allowed_values and not spec: 2862 ret = key 2863 return ret
2864
2865 2866 -def _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs, 2867 group_ipolicy, allowed_values):
2868 found_allowed = _GetISpecsInAllowedValues(minmax_ispecs, allowed_values) 2869 if found_allowed is not None: 2870 ipolicy_out[constants.ISPECS_MINMAX] = found_allowed 2871 elif minmax_ispecs is not None: 2872 minmax_out = [] 2873 for mmpair in minmax_ispecs: 2874 mmpair_out = {} 2875 for (key, spec) in mmpair.items(): 2876 if key not in constants.ISPECS_MINMAX_KEYS: 2877 msg = "Invalid key in bounds instance specifications: %s" % key 2878 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 2879 mmpair_out[key] = _ParseISpec(spec, key, True) 2880 minmax_out.append(mmpair_out) 2881 ipolicy_out[constants.ISPECS_MINMAX] = minmax_out 2882 if std_ispecs is not None: 2883 assert not group_ipolicy # This is not an option for gnt-group 2884 ipolicy_out[constants.ISPECS_STD] = _ParseISpec(std_ispecs, "std", False)
2885
2886 2887 -def CreateIPolicyFromOpts(ispecs_mem_size=None, 2888 ispecs_cpu_count=None, 2889 ispecs_disk_count=None, 2890 ispecs_disk_size=None, 2891 ispecs_nic_count=None, 2892 minmax_ispecs=None, 2893 std_ispecs=None, 2894 ipolicy_disk_templates=None, 2895 ipolicy_vcpu_ratio=None, 2896 ipolicy_spindle_ratio=None, 2897 group_ipolicy=False, 2898 allowed_values=None, 2899 fill_all=False):
2900 """Creation of instance policy based on command line options. 2901 2902 @param fill_all: whether for cluster policies we should ensure that 2903 all values are filled 2904 2905 """ 2906 assert not (fill_all and allowed_values) 2907 2908 split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or 2909 ispecs_disk_size or ispecs_nic_count) 2910 if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)): 2911 raise errors.OpPrereqError("A --specs-xxx option cannot be specified" 2912 " together with any --ipolicy-xxx-specs option", 2913 errors.ECODE_INVAL) 2914 2915 ipolicy_out = objects.MakeEmptyIPolicy() 2916 if split_specs: 2917 assert fill_all 2918 _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count, 2919 ispecs_disk_count, ispecs_disk_size, 2920 ispecs_nic_count, group_ipolicy, fill_all) 2921 elif (minmax_ispecs is not None or std_ispecs is not None): 2922 _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs, 2923 group_ipolicy, allowed_values) 2924 2925 if ipolicy_disk_templates is not None: 2926 if allowed_values and ipolicy_disk_templates in allowed_values: 2927 ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates 2928 else: 2929 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates) 2930 if ipolicy_vcpu_ratio is not None: 2931 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio 2932 if ipolicy_spindle_ratio is not None: 2933 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio 2934 2935 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS) 2936 2937 if not group_ipolicy and fill_all: 2938 ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out) 2939 2940 return ipolicy_out
2941
2942 2943 -def _NotAContainer(data):
2944 """ Checks whether the input is not a container data type. 2945 2946 @rtype: bool 2947 2948 """ 2949 return not (isinstance(data, (list, dict, tuple)))
2950
2951 2952 -def _GetAlignmentMapping(data):
2953 """ Returns info about alignment if present in an encoded ordered dictionary. 2954 2955 @type data: list of tuple 2956 @param data: The encoded ordered dictionary, as defined in 2957 L{_SerializeGenericInfo}. 2958 @rtype: dict of any to int 2959 @return: The dictionary mapping alignment groups to the maximum length of the 2960 dictionary key found in the group. 2961 2962 """ 2963 alignment_map = {} 2964 for entry in data: 2965 if len(entry) > 2: 2966 group_key = entry[2] 2967 key_length = len(entry[0]) 2968 if group_key in alignment_map: 2969 alignment_map[group_key] = max(alignment_map[group_key], key_length) 2970 else: 2971 alignment_map[group_key] = key_length 2972 2973 return alignment_map
2974
2975 2976 -def _SerializeGenericInfo(buf, data, level, afterkey=False):
2977 """Formatting core of L{PrintGenericInfo}. 2978 2979 @param buf: (string) stream to accumulate the result into 2980 @param data: data to format 2981 @type level: int 2982 @param level: depth in the data hierarchy, used for indenting 2983 @type afterkey: bool 2984 @param afterkey: True when we are in the middle of a line after a key (used 2985 to properly add newlines or indentation) 2986 2987 """ 2988 baseind = " " 2989 if isinstance(data, dict): 2990 if not data: 2991 buf.write("\n") 2992 else: 2993 if afterkey: 2994 buf.write("\n") 2995 doindent = True 2996 else: 2997 doindent = False 2998 for key in sorted(data): 2999 if doindent: 3000 buf.write(baseind * level) 3001 else: 3002 doindent = True 3003 buf.write(key) 3004 buf.write(": ") 3005 _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True) 3006 elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple): 3007 # list of tuples (an ordered dictionary) 3008 # the tuples may have two or three members - key, value, and alignment group 3009 # if the alignment group is present, align all values sharing the same group 3010 if afterkey: 3011 buf.write("\n") 3012 doindent = True 3013 else: 3014 doindent = False 3015 3016 alignment_mapping = _GetAlignmentMapping(data) 3017 for entry in data: 3018 key, val = entry[0:2] 3019 if doindent: 3020 buf.write(baseind * level) 3021 else: 3022 doindent = True 3023 buf.write(key) 3024 buf.write(": ") 3025 if len(entry) > 2: 3026 max_key_length = alignment_mapping[entry[2]] 3027 buf.write(" " * (max_key_length - len(key))) 3028 _SerializeGenericInfo(buf, val, level + 1, afterkey=True) 3029 elif isinstance(data, tuple) and all(map(_NotAContainer, data)): 3030 # tuples with simple content are serialized as inline lists 3031 buf.write("[%s]\n" % utils.CommaJoin(data)) 3032 elif isinstance(data, list) or isinstance(data, tuple): 3033 # lists and tuples 3034 if not data: 3035 buf.write("\n") 3036 else: 3037 if afterkey: 3038 buf.write("\n") 3039 doindent = True 3040 else: 3041 doindent = False 3042 for item in data: 3043 if doindent: 3044 buf.write(baseind * level) 3045 else: 3046 doindent = True 3047 buf.write("-") 3048 buf.write(baseind[1:]) 3049 _SerializeGenericInfo(buf, item, level + 1) 3050 else: 3051 # This branch should be only taken for strings, but it's practically 3052 # impossible to guarantee that no other types are produced somewhere 3053 buf.write(str(data)) 3054 buf.write("\n")
3055
3056 3057 -def PrintGenericInfo(data):
3058 """Print information formatted according to the hierarchy. 3059 3060 The output is a valid YAML string. 3061 3062 @param data: the data to print. It's a hierarchical structure whose elements 3063 can be: 3064 - dictionaries, where keys are strings and values are of any of the 3065 types listed here 3066 - lists of tuples (key, value) or (key, value, alignment_group), where 3067 key is a string, value is of any of the types listed here, and 3068 alignment_group can be any hashable value; it's a way to encode 3069 ordered dictionaries; any entries sharing the same alignment group are 3070 aligned by appending whitespace before the value as needed 3071 - lists of any of the types listed here 3072 - strings 3073 3074 """ 3075 buf = StringIO() 3076 _SerializeGenericInfo(buf, data, 0) 3077 ToStdout(buf.getvalue().rstrip("\n"))
3078