Package ganeti :: Package client :: Module gnt_instance
[hide private]
[frames] | no frames]

Source Code for Module ganeti.client.gnt_instance

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc. 
   5  # 
   6  # This program is free software; you can redistribute it and/or modify 
   7  # it under the terms of the GNU General Public License as published by 
   8  # the Free Software Foundation; either version 2 of the License, or 
   9  # (at your option) any later version. 
  10  # 
  11  # This program is distributed in the hope that it will be useful, but 
  12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
  13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
  14  # General Public License for more details. 
  15  # 
  16  # You should have received a copy of the GNU General Public License 
  17  # along with this program; if not, write to the Free Software 
  18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
  19  # 02110-1301, USA. 
  20   
  21  """Instance related commands""" 
  22   
  23  # pylint: disable=W0401,W0614,C0103 
  24  # W0401: Wildcard import ganeti.cli 
  25  # W0614: Unused import %s from wildcard import (since we need cli) 
  26  # C0103: Invalid name gnt-instance 
  27   
  28  import copy 
  29  import itertools 
  30  import simplejson 
  31  import logging 
  32  from cStringIO import StringIO 
  33   
  34  from ganeti.cli import * 
  35  from ganeti import opcodes 
  36  from ganeti import constants 
  37  from ganeti import compat 
  38  from ganeti import utils 
  39  from ganeti import errors 
  40  from ganeti import netutils 
  41  from ganeti import ssh 
  42  from ganeti import objects 
  43  from ganeti import ht 
  44   
  45   
  46  _EXPAND_CLUSTER = "cluster" 
  47  _EXPAND_NODES_BOTH = "nodes" 
  48  _EXPAND_NODES_PRI = "nodes-pri" 
  49  _EXPAND_NODES_SEC = "nodes-sec" 
  50  _EXPAND_NODES_BOTH_BY_TAGS = "nodes-by-tags" 
  51  _EXPAND_NODES_PRI_BY_TAGS = "nodes-pri-by-tags" 
  52  _EXPAND_NODES_SEC_BY_TAGS = "nodes-sec-by-tags" 
  53  _EXPAND_INSTANCES = "instances" 
  54  _EXPAND_INSTANCES_BY_TAGS = "instances-by-tags" 
  55   
  56  _EXPAND_NODES_TAGS_MODES = frozenset([ 
  57    _EXPAND_NODES_BOTH_BY_TAGS, 
  58    _EXPAND_NODES_PRI_BY_TAGS, 
  59    _EXPAND_NODES_SEC_BY_TAGS, 
  60    ]) 
  61   
  62   
  63  #: default list of options for L{ListInstances} 
  64  _LIST_DEF_FIELDS = [ 
  65    "name", "hypervisor", "os", "pnode", "status", "oper_ram", 
  66    ] 
  67   
  68   
  69  _MISSING = object() 
  70  _ENV_OVERRIDE = frozenset(["list"]) 
  71   
  72   
73 -def _ExpandMultiNames(mode, names, client=None):
74 """Expand the given names using the passed mode. 75 76 For _EXPAND_CLUSTER, all instances will be returned. For 77 _EXPAND_NODES_PRI/SEC, all instances having those nodes as 78 primary/secondary will be returned. For _EXPAND_NODES_BOTH, all 79 instances having those nodes as either primary or secondary will be 80 returned. For _EXPAND_INSTANCES, the given instances will be 81 returned. 82 83 @param mode: one of L{_EXPAND_CLUSTER}, L{_EXPAND_NODES_BOTH}, 84 L{_EXPAND_NODES_PRI}, L{_EXPAND_NODES_SEC} or 85 L{_EXPAND_INSTANCES} 86 @param names: a list of names; for cluster, it must be empty, 87 and for node and instance it must be a list of valid item 88 names (short names are valid as usual, e.g. node1 instead of 89 node1.example.com) 90 @rtype: list 91 @return: the list of names after the expansion 92 @raise errors.ProgrammerError: for unknown selection type 93 @raise errors.OpPrereqError: for invalid input parameters 94 95 """ 96 # pylint: disable=W0142 97 98 if client is None: 99 client = GetClient() 100 if mode == _EXPAND_CLUSTER: 101 if names: 102 raise errors.OpPrereqError("Cluster filter mode takes no arguments", 103 errors.ECODE_INVAL) 104 idata = client.QueryInstances([], ["name"], False) 105 inames = [row[0] for row in idata] 106 107 elif (mode in _EXPAND_NODES_TAGS_MODES or 108 mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_PRI, _EXPAND_NODES_SEC)): 109 if mode in _EXPAND_NODES_TAGS_MODES: 110 if not names: 111 raise errors.OpPrereqError("No node tags passed", errors.ECODE_INVAL) 112 ndata = client.QueryNodes([], ["name", "pinst_list", 113 "sinst_list", "tags"], False) 114 ndata = [row for row in ndata if set(row[3]).intersection(names)] 115 else: 116 if not names: 117 raise errors.OpPrereqError("No node names passed", errors.ECODE_INVAL) 118 ndata = client.QueryNodes(names, ["name", "pinst_list", "sinst_list"], 119 False) 120 121 ipri = [row[1] for row in ndata] 122 pri_names = list(itertools.chain(*ipri)) 123 isec = [row[2] for row in ndata] 124 sec_names = list(itertools.chain(*isec)) 125 if mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_BOTH_BY_TAGS): 126 inames = pri_names + sec_names 127 elif mode in (_EXPAND_NODES_PRI, _EXPAND_NODES_PRI_BY_TAGS): 128 inames = pri_names 129 elif mode in (_EXPAND_NODES_SEC, _EXPAND_NODES_SEC_BY_TAGS): 130 inames = sec_names 131 else: 132 raise errors.ProgrammerError("Unhandled shutdown type") 133 elif mode == _EXPAND_INSTANCES: 134 if not names: 135 raise errors.OpPrereqError("No instance names passed", 136 errors.ECODE_INVAL) 137 idata = client.QueryInstances(names, ["name"], False) 138 inames = [row[0] for row in idata] 139 elif mode == _EXPAND_INSTANCES_BY_TAGS: 140 if not names: 141 raise errors.OpPrereqError("No instance tags passed", 142 errors.ECODE_INVAL) 143 idata = client.QueryInstances([], ["name", "tags"], False) 144 inames = [row[0] for row in idata if set(row[1]).intersection(names)] 145 else: 146 raise errors.OpPrereqError("Unknown mode '%s'" % mode, errors.ECODE_INVAL) 147 148 return inames
149 150
151 -def _EnsureInstancesExist(client, names):
152 """Check for and ensure the given instance names exist. 153 154 This function will raise an OpPrereqError in case they don't 155 exist. Otherwise it will exit cleanly. 156 157 @type client: L{ganeti.luxi.Client} 158 @param client: the client to use for the query 159 @type names: list 160 @param names: the list of instance names to query 161 @raise errors.OpPrereqError: in case any instance is missing 162 163 """ 164 # TODO: change LUInstanceQuery to that it actually returns None 165 # instead of raising an exception, or devise a better mechanism 166 result = client.QueryInstances(names, ["name"], False) 167 for orig_name, row in zip(names, result): 168 if row[0] is None: 169 raise errors.OpPrereqError("Instance '%s' does not exist" % orig_name, 170 errors.ECODE_NOENT)
171 172
173 -def GenericManyOps(operation, fn):
174 """Generic multi-instance operations. 175 176 The will return a wrapper that processes the options and arguments 177 given, and uses the passed function to build the opcode needed for 178 the specific operation. Thus all the generic loop/confirmation code 179 is abstracted into this function. 180 181 """ 182 def realfn(opts, args): 183 if opts.multi_mode is None: 184 opts.multi_mode = _EXPAND_INSTANCES 185 cl = GetClient() 186 inames = _ExpandMultiNames(opts.multi_mode, args, client=cl) 187 if not inames: 188 if opts.multi_mode == _EXPAND_CLUSTER: 189 ToStdout("Cluster is empty, no instances to shutdown") 190 return 0 191 raise errors.OpPrereqError("Selection filter does not match" 192 " any instances", errors.ECODE_INVAL) 193 multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1 194 if not (opts.force_multi or not multi_on 195 or ConfirmOperation(inames, "instances", operation)): 196 return 1 197 jex = JobExecutor(verbose=multi_on, cl=cl, opts=opts) 198 for name in inames: 199 op = fn(name, opts) 200 jex.QueueJob(name, op) 201 results = jex.WaitOrShow(not opts.submit_only) 202 rcode = compat.all(row[0] for row in results) 203 return int(not rcode)
204 return realfn 205 206
207 -def ListInstances(opts, args):
208 """List instances and their properties. 209 210 @param opts: the command line options selected by the user 211 @type args: list 212 @param args: should be an empty list 213 @rtype: int 214 @return: the desired exit code 215 216 """ 217 selected_fields = ParseFields(opts.output, _LIST_DEF_FIELDS) 218 219 fmtoverride = dict.fromkeys(["tags", "disk.sizes", "nic.macs", "nic.ips", 220 "nic.modes", "nic.links", "nic.bridges", 221 "snodes", "snodes.group", "snodes.group.uuid"], 222 (lambda value: ",".join(str(item) 223 for item in value), 224 False)) 225 226 return GenericList(constants.QR_INSTANCE, selected_fields, args, opts.units, 227 opts.separator, not opts.no_headers, 228 format_override=fmtoverride, verbose=opts.verbose, 229 force_filter=opts.force_filter)
230 231
232 -def ListInstanceFields(opts, args):
233 """List instance fields. 234 235 @param opts: the command line options selected by the user 236 @type args: list 237 @param args: fields to list, or empty for all 238 @rtype: int 239 @return: the desired exit code 240 241 """ 242 return GenericListFields(constants.QR_INSTANCE, args, opts.separator, 243 not opts.no_headers)
244 245
246 -def AddInstance(opts, args):
247 """Add an instance to the cluster. 248 249 This is just a wrapper over GenericInstanceCreate. 250 251 """ 252 return GenericInstanceCreate(constants.INSTANCE_CREATE, opts, args)
253 254
255 -def BatchCreate(opts, args):
256 """Create instances using a definition file. 257 258 This function reads a json file with instances defined 259 in the form:: 260 261 {"instance-name":{ 262 "disk_size": [20480], 263 "template": "drbd", 264 "backend": { 265 "memory": 512, 266 "vcpus": 1 }, 267 "os": "debootstrap", 268 "primary_node": "firstnode", 269 "secondary_node": "secondnode", 270 "iallocator": "dumb"} 271 } 272 273 Note that I{primary_node} and I{secondary_node} have precedence over 274 I{iallocator}. 275 276 @param opts: the command line options selected by the user 277 @type args: list 278 @param args: should contain one element, the json filename 279 @rtype: int 280 @return: the desired exit code 281 282 """ 283 _DEFAULT_SPECS = {"disk_size": [20 * 1024], 284 "backend": {}, 285 "iallocator": None, 286 "primary_node": None, 287 "secondary_node": None, 288 "nics": None, 289 "start": True, 290 "ip_check": True, 291 "name_check": True, 292 "hypervisor": None, 293 "hvparams": {}, 294 "file_storage_dir": None, 295 "force_variant": False, 296 "file_driver": "loop"} 297 298 def _PopulateWithDefaults(spec): 299 """Returns a new hash combined with default values.""" 300 mydict = _DEFAULT_SPECS.copy() 301 mydict.update(spec) 302 return mydict
303 304 def _Validate(spec): 305 """Validate the instance specs.""" 306 # Validate fields required under any circumstances 307 for required_field in ("os", "template"): 308 if required_field not in spec: 309 raise errors.OpPrereqError('Required field "%s" is missing.' % 310 required_field, errors.ECODE_INVAL) 311 # Validate special fields 312 if spec["primary_node"] is not None: 313 if (spec["template"] in constants.DTS_INT_MIRROR and 314 spec["secondary_node"] is None): 315 raise errors.OpPrereqError("Template requires secondary node, but" 316 " there was no secondary provided.", 317 errors.ECODE_INVAL) 318 elif spec["iallocator"] is None: 319 raise errors.OpPrereqError("You have to provide at least a primary_node" 320 " or an iallocator.", 321 errors.ECODE_INVAL) 322 323 if (spec["hvparams"] and 324 not isinstance(spec["hvparams"], dict)): 325 raise errors.OpPrereqError("Hypervisor parameters must be a dict.", 326 errors.ECODE_INVAL) 327 328 json_filename = args[0] 329 try: 330 instance_data = simplejson.loads(utils.ReadFile(json_filename)) 331 except Exception, err: # pylint: disable=W0703 332 ToStderr("Can't parse the instance definition file: %s" % str(err)) 333 return 1 334 335 if not isinstance(instance_data, dict): 336 ToStderr("The instance definition file is not in dict format.") 337 return 1 338 339 jex = JobExecutor(opts=opts) 340 341 # Iterate over the instances and do: 342 # * Populate the specs with default value 343 # * Validate the instance specs 344 i_names = utils.NiceSort(instance_data.keys()) # pylint: disable=E1103 345 for name in i_names: 346 specs = instance_data[name] 347 specs = _PopulateWithDefaults(specs) 348 _Validate(specs) 349 350 hypervisor = specs["hypervisor"] 351 hvparams = specs["hvparams"] 352 353 disks = [] 354 for elem in specs["disk_size"]: 355 try: 356 size = utils.ParseUnit(elem) 357 except (TypeError, ValueError), err: 358 raise errors.OpPrereqError("Invalid disk size '%s' for" 359 " instance %s: %s" % 360 (elem, name, err), errors.ECODE_INVAL) 361 disks.append({"size": size}) 362 363 utils.ForceDictType(specs["backend"], constants.BES_PARAMETER_COMPAT) 364 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES) 365 366 tmp_nics = [] 367 for field in constants.INIC_PARAMS: 368 if field in specs: 369 if not tmp_nics: 370 tmp_nics.append({}) 371 tmp_nics[0][field] = specs[field] 372 373 if specs["nics"] is not None and tmp_nics: 374 raise errors.OpPrereqError("'nics' list incompatible with using" 375 " individual nic fields as well", 376 errors.ECODE_INVAL) 377 elif specs["nics"] is not None: 378 tmp_nics = specs["nics"] 379 elif not tmp_nics: 380 tmp_nics = [{}] 381 382 op = opcodes.OpInstanceCreate(instance_name=name, 383 disks=disks, 384 disk_template=specs["template"], 385 mode=constants.INSTANCE_CREATE, 386 os_type=specs["os"], 387 force_variant=specs["force_variant"], 388 pnode=specs["primary_node"], 389 snode=specs["secondary_node"], 390 nics=tmp_nics, 391 start=specs["start"], 392 ip_check=specs["ip_check"], 393 name_check=specs["name_check"], 394 wait_for_sync=True, 395 iallocator=specs["iallocator"], 396 hypervisor=hypervisor, 397 hvparams=hvparams, 398 beparams=specs["backend"], 399 file_storage_dir=specs["file_storage_dir"], 400 file_driver=specs["file_driver"]) 401 402 jex.QueueJob(name, op) 403 # we never want to wait, just show the submitted job IDs 404 jex.WaitOrShow(False) 405 406 return 0 407 408
409 -def ReinstallInstance(opts, args):
410 """Reinstall an instance. 411 412 @param opts: the command line options selected by the user 413 @type args: list 414 @param args: should contain only one element, the name of the 415 instance to be reinstalled 416 @rtype: int 417 @return: the desired exit code 418 419 """ 420 # first, compute the desired name list 421 if opts.multi_mode is None: 422 opts.multi_mode = _EXPAND_INSTANCES 423 424 inames = _ExpandMultiNames(opts.multi_mode, args) 425 if not inames: 426 raise errors.OpPrereqError("Selection filter does not match any instances", 427 errors.ECODE_INVAL) 428 429 # second, if requested, ask for an OS 430 if opts.select_os is True: 431 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[]) 432 result = SubmitOpCode(op, opts=opts) 433 434 if not result: 435 ToStdout("Can't get the OS list") 436 return 1 437 438 ToStdout("Available OS templates:") 439 number = 0 440 choices = [] 441 for (name, variants) in result: 442 for entry in CalculateOSNames(name, variants): 443 ToStdout("%3s: %s", number, entry) 444 choices.append(("%s" % number, entry, entry)) 445 number += 1 446 447 choices.append(("x", "exit", "Exit gnt-instance reinstall")) 448 selected = AskUser("Enter OS template number (or x to abort):", 449 choices) 450 451 if selected == "exit": 452 ToStderr("User aborted reinstall, exiting") 453 return 1 454 455 os_name = selected 456 os_msg = "change the OS to '%s'" % selected 457 else: 458 os_name = opts.os 459 if opts.os is not None: 460 os_msg = "change the OS to '%s'" % os_name 461 else: 462 os_msg = "keep the same OS" 463 464 # third, get confirmation: multi-reinstall requires --force-multi, 465 # single-reinstall either --force or --force-multi (--force-multi is 466 # a stronger --force) 467 multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1 468 if multi_on: 469 warn_msg = ("Note: this will remove *all* data for the" 470 " below instances! It will %s.\n" % os_msg) 471 if not (opts.force_multi or 472 ConfirmOperation(inames, "instances", "reinstall", extra=warn_msg)): 473 return 1 474 else: 475 if not (opts.force or opts.force_multi): 476 usertext = ("This will reinstall the instance '%s' (and %s) which" 477 " removes all data. Continue?") % (inames[0], os_msg) 478 if not AskUser(usertext): 479 return 1 480 481 jex = JobExecutor(verbose=multi_on, opts=opts) 482 for instance_name in inames: 483 op = opcodes.OpInstanceReinstall(instance_name=instance_name, 484 os_type=os_name, 485 force_variant=opts.force_variant, 486 osparams=opts.osparams) 487 jex.QueueJob(instance_name, op) 488 489 results = jex.WaitOrShow(not opts.submit_only) 490 491 if compat.all(map(compat.fst, results)): 492 return constants.EXIT_SUCCESS 493 else: 494 return constants.EXIT_FAILURE
495 496
497 -def RemoveInstance(opts, args):
498 """Remove an instance. 499 500 @param opts: the command line options selected by the user 501 @type args: list 502 @param args: should contain only one element, the name of 503 the instance to be removed 504 @rtype: int 505 @return: the desired exit code 506 507 """ 508 instance_name = args[0] 509 force = opts.force 510 cl = GetClient() 511 512 if not force: 513 _EnsureInstancesExist(cl, [instance_name]) 514 515 usertext = ("This will remove the volumes of the instance %s" 516 " (including mirrors), thus removing all the data" 517 " of the instance. Continue?") % instance_name 518 if not AskUser(usertext): 519 return 1 520 521 op = opcodes.OpInstanceRemove(instance_name=instance_name, 522 ignore_failures=opts.ignore_failures, 523 shutdown_timeout=opts.shutdown_timeout) 524 SubmitOrSend(op, opts, cl=cl) 525 return 0
526 527
528 -def RenameInstance(opts, args):
529 """Rename an instance. 530 531 @param opts: the command line options selected by the user 532 @type args: list 533 @param args: should contain two elements, the old and the 534 new instance names 535 @rtype: int 536 @return: the desired exit code 537 538 """ 539 if not opts.name_check: 540 if not AskUser("As you disabled the check of the DNS entry, please verify" 541 " that '%s' is a FQDN. Continue?" % args[1]): 542 return 1 543 544 op = opcodes.OpInstanceRename(instance_name=args[0], 545 new_name=args[1], 546 ip_check=opts.ip_check, 547 name_check=opts.name_check) 548 result = SubmitOrSend(op, opts) 549 550 if result: 551 ToStdout("Instance '%s' renamed to '%s'", args[0], result) 552 553 return 0
554 555
556 -def ActivateDisks(opts, args):
557 """Activate an instance's disks. 558 559 This serves two purposes: 560 - it allows (as long as the instance is not running) 561 mounting the disks and modifying them from the node 562 - it repairs inactive secondary drbds 563 564 @param opts: the command line options selected by the user 565 @type args: list 566 @param args: should contain only one element, the instance name 567 @rtype: int 568 @return: the desired exit code 569 570 """ 571 instance_name = args[0] 572 op = opcodes.OpInstanceActivateDisks(instance_name=instance_name, 573 ignore_size=opts.ignore_size) 574 disks_info = SubmitOrSend(op, opts) 575 for host, iname, nname in disks_info: 576 ToStdout("%s:%s:%s", host, iname, nname) 577 return 0
578 579
580 -def DeactivateDisks(opts, args):
581 """Deactivate an instance's disks. 582 583 This function takes the instance name, looks for its primary node 584 and the tries to shutdown its block devices on that node. 585 586 @param opts: the command line options selected by the user 587 @type args: list 588 @param args: should contain only one element, the instance name 589 @rtype: int 590 @return: the desired exit code 591 592 """ 593 instance_name = args[0] 594 op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name, 595 force=opts.force) 596 SubmitOrSend(op, opts) 597 return 0
598 599
600 -def RecreateDisks(opts, args):
601 """Recreate an instance's disks. 602 603 @param opts: the command line options selected by the user 604 @type args: list 605 @param args: should contain only one element, the instance name 606 @rtype: int 607 @return: the desired exit code 608 609 """ 610 instance_name = args[0] 611 612 disks = [] 613 614 if opts.disks: 615 for didx, ddict in opts.disks: 616 didx = int(didx) 617 618 if not ht.TDict(ddict): 619 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict) 620 raise errors.OpPrereqError(msg) 621 622 if constants.IDISK_SIZE in ddict: 623 try: 624 ddict[constants.IDISK_SIZE] = \ 625 utils.ParseUnit(ddict[constants.IDISK_SIZE]) 626 except ValueError, err: 627 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" % 628 (didx, err)) 629 630 disks.append((didx, ddict)) 631 632 # TODO: Verify modifyable parameters (already done in 633 # LUInstanceRecreateDisks, but it'd be nice to have in the client) 634 635 if opts.node: 636 pnode, snode = SplitNodeOption(opts.node) 637 nodes = [pnode] 638 if snode is not None: 639 nodes.append(snode) 640 else: 641 nodes = [] 642 643 op = opcodes.OpInstanceRecreateDisks(instance_name=instance_name, 644 disks=disks, nodes=nodes) 645 SubmitOrSend(op, opts) 646 647 return 0
648 649
650 -def GrowDisk(opts, args):
651 """Grow an instance's disks. 652 653 @param opts: the command line options selected by the user 654 @type args: list 655 @param args: should contain three elements, the target instance name, 656 the target disk id, and the target growth 657 @rtype: int 658 @return: the desired exit code 659 660 """ 661 instance = args[0] 662 disk = args[1] 663 try: 664 disk = int(disk) 665 except (TypeError, ValueError), err: 666 raise errors.OpPrereqError("Invalid disk index: %s" % str(err), 667 errors.ECODE_INVAL) 668 try: 669 amount = utils.ParseUnit(args[2]) 670 except errors.UnitParseError: 671 raise errors.OpPrereqError("Can't parse the given amount '%s'" % args[2], 672 errors.ECODE_INVAL) 673 op = opcodes.OpInstanceGrowDisk(instance_name=instance, 674 disk=disk, amount=amount, 675 wait_for_sync=opts.wait_for_sync, 676 absolute=opts.absolute) 677 SubmitOrSend(op, opts) 678 return 0
679 680
681 -def _StartupInstance(name, opts):
682 """Startup instances. 683 684 This returns the opcode to start an instance, and its decorator will 685 wrap this into a loop starting all desired instances. 686 687 @param name: the name of the instance to act on 688 @param opts: the command line options selected by the user 689 @return: the opcode needed for the operation 690 691 """ 692 op = opcodes.OpInstanceStartup(instance_name=name, 693 force=opts.force, 694 ignore_offline_nodes=opts.ignore_offline, 695 no_remember=opts.no_remember, 696 startup_paused=opts.startup_paused) 697 # do not add these parameters to the opcode unless they're defined 698 if opts.hvparams: 699 op.hvparams = opts.hvparams 700 if opts.beparams: 701 op.beparams = opts.beparams 702 return op
703 704
705 -def _RebootInstance(name, opts):
706 """Reboot instance(s). 707 708 This returns the opcode to reboot an instance, and its decorator 709 will wrap this into a loop rebooting all desired instances. 710 711 @param name: the name of the instance to act on 712 @param opts: the command line options selected by the user 713 @return: the opcode needed for the operation 714 715 """ 716 return opcodes.OpInstanceReboot(instance_name=name, 717 reboot_type=opts.reboot_type, 718 ignore_secondaries=opts.ignore_secondaries, 719 shutdown_timeout=opts.shutdown_timeout)
720 721
722 -def _ShutdownInstance(name, opts):
723 """Shutdown an instance. 724 725 This returns the opcode to shutdown an instance, and its decorator 726 will wrap this into a loop shutting down all desired instances. 727 728 @param name: the name of the instance to act on 729 @param opts: the command line options selected by the user 730 @return: the opcode needed for the operation 731 732 """ 733 return opcodes.OpInstanceShutdown(instance_name=name, 734 timeout=opts.timeout, 735 ignore_offline_nodes=opts.ignore_offline, 736 no_remember=opts.no_remember)
737 738
739 -def ReplaceDisks(opts, args):
740 """Replace the disks of an instance 741 742 @param opts: the command line options selected by the user 743 @type args: list 744 @param args: should contain only one element, the instance name 745 @rtype: int 746 @return: the desired exit code 747 748 """ 749 new_2ndary = opts.dst_node 750 iallocator = opts.iallocator 751 if opts.disks is None: 752 disks = [] 753 else: 754 try: 755 disks = [int(i) for i in opts.disks.split(",")] 756 except (TypeError, ValueError), err: 757 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err), 758 errors.ECODE_INVAL) 759 cnt = [opts.on_primary, opts.on_secondary, opts.auto, 760 new_2ndary is not None, iallocator is not None].count(True) 761 if cnt != 1: 762 raise errors.OpPrereqError("One and only one of the -p, -s, -a, -n and -I" 763 " options must be passed", errors.ECODE_INVAL) 764 elif opts.on_primary: 765 mode = constants.REPLACE_DISK_PRI 766 elif opts.on_secondary: 767 mode = constants.REPLACE_DISK_SEC 768 elif opts.auto: 769 mode = constants.REPLACE_DISK_AUTO 770 if disks: 771 raise errors.OpPrereqError("Cannot specify disks when using automatic" 772 " mode", errors.ECODE_INVAL) 773 elif new_2ndary is not None or iallocator is not None: 774 # replace secondary 775 mode = constants.REPLACE_DISK_CHG 776 777 op = opcodes.OpInstanceReplaceDisks(instance_name=args[0], disks=disks, 778 remote_node=new_2ndary, mode=mode, 779 iallocator=iallocator, 780 early_release=opts.early_release, 781 ignore_ipolicy=opts.ignore_ipolicy) 782 SubmitOrSend(op, opts) 783 return 0
784 785
786 -def FailoverInstance(opts, args):
787 """Failover an instance. 788 789 The failover is done by shutting it down on its present node and 790 starting it on the secondary. 791 792 @param opts: the command line options selected by the user 793 @type args: list 794 @param args: should contain only one element, the instance name 795 @rtype: int 796 @return: the desired exit code 797 798 """ 799 cl = GetClient() 800 instance_name = args[0] 801 force = opts.force 802 iallocator = opts.iallocator 803 target_node = opts.dst_node 804 805 if iallocator and target_node: 806 raise errors.OpPrereqError("Specify either an iallocator (-I), or a target" 807 " node (-n) but not both", errors.ECODE_INVAL) 808 809 if not force: 810 _EnsureInstancesExist(cl, [instance_name]) 811 812 usertext = ("Failover will happen to image %s." 813 " This requires a shutdown of the instance. Continue?" % 814 (instance_name,)) 815 if not AskUser(usertext): 816 return 1 817 818 op = opcodes.OpInstanceFailover(instance_name=instance_name, 819 ignore_consistency=opts.ignore_consistency, 820 shutdown_timeout=opts.shutdown_timeout, 821 iallocator=iallocator, 822 target_node=target_node, 823 ignore_ipolicy=opts.ignore_ipolicy) 824 SubmitOrSend(op, opts, cl=cl) 825 return 0
826 827
828 -def MigrateInstance(opts, args):
829 """Migrate an instance. 830 831 The migrate is done without shutdown. 832 833 @param opts: the command line options selected by the user 834 @type args: list 835 @param args: should contain only one element, the instance name 836 @rtype: int 837 @return: the desired exit code 838 839 """ 840 cl = GetClient() 841 instance_name = args[0] 842 force = opts.force 843 iallocator = opts.iallocator 844 target_node = opts.dst_node 845 846 if iallocator and target_node: 847 raise errors.OpPrereqError("Specify either an iallocator (-I), or a target" 848 " node (-n) but not both", errors.ECODE_INVAL) 849 850 if not force: 851 _EnsureInstancesExist(cl, [instance_name]) 852 853 if opts.cleanup: 854 usertext = ("Instance %s will be recovered from a failed migration." 855 " Note that the migration procedure (including cleanup)" % 856 (instance_name,)) 857 else: 858 usertext = ("Instance %s will be migrated. Note that migration" % 859 (instance_name,)) 860 usertext += (" might impact the instance if anything goes wrong" 861 " (e.g. due to bugs in the hypervisor). Continue?") 862 if not AskUser(usertext): 863 return 1 864 865 # this should be removed once --non-live is deprecated 866 if not opts.live and opts.migration_mode is not None: 867 raise errors.OpPrereqError("Only one of the --non-live and " 868 "--migration-mode options can be passed", 869 errors.ECODE_INVAL) 870 if not opts.live: # --non-live passed 871 mode = constants.HT_MIGRATION_NONLIVE 872 else: 873 mode = opts.migration_mode 874 875 op = opcodes.OpInstanceMigrate(instance_name=instance_name, mode=mode, 876 cleanup=opts.cleanup, iallocator=iallocator, 877 target_node=target_node, 878 allow_failover=opts.allow_failover, 879 allow_runtime_changes=opts.allow_runtime_chgs, 880 ignore_ipolicy=opts.ignore_ipolicy) 881 SubmitOrSend(op, cl=cl, opts=opts) 882 return 0
883 884
885 -def MoveInstance(opts, args):
886 """Move an instance. 887 888 @param opts: the command line options selected by the user 889 @type args: list 890 @param args: should contain only one element, the instance name 891 @rtype: int 892 @return: the desired exit code 893 894 """ 895 cl = GetClient() 896 instance_name = args[0] 897 force = opts.force 898 899 if not force: 900 usertext = ("Instance %s will be moved." 901 " This requires a shutdown of the instance. Continue?" % 902 (instance_name,)) 903 if not AskUser(usertext): 904 return 1 905 906 op = opcodes.OpInstanceMove(instance_name=instance_name, 907 target_node=opts.node, 908 shutdown_timeout=opts.shutdown_timeout, 909 ignore_consistency=opts.ignore_consistency, 910 ignore_ipolicy=opts.ignore_ipolicy) 911 SubmitOrSend(op, opts, cl=cl) 912 return 0
913 914
915 -def ConnectToInstanceConsole(opts, args):
916 """Connect to the console of an instance. 917 918 @param opts: the command line options selected by the user 919 @type args: list 920 @param args: should contain only one element, the instance name 921 @rtype: int 922 @return: the desired exit code 923 924 """ 925 instance_name = args[0] 926 927 cl = GetClient() 928 try: 929 cluster_name = cl.QueryConfigValues(["cluster_name"])[0] 930 ((console_data, oper_state), ) = \ 931 cl.QueryInstances([instance_name], ["console", "oper_state"], False) 932 finally: 933 # Ensure client connection is closed while external commands are run 934 cl.Close() 935 936 del cl 937 938 if not console_data: 939 if oper_state: 940 # Instance is running 941 raise errors.OpExecError("Console information for instance %s is" 942 " unavailable" % instance_name) 943 else: 944 raise errors.OpExecError("Instance %s is not running, can't get console" % 945 instance_name) 946 947 return _DoConsole(objects.InstanceConsole.FromDict(console_data), 948 opts.show_command, cluster_name)
949 950
951 -def _DoConsole(console, show_command, cluster_name, feedback_fn=ToStdout, 952 _runcmd_fn=utils.RunCmd):
953 """Acts based on the result of L{opcodes.OpInstanceConsole}. 954 955 @type console: L{objects.InstanceConsole} 956 @param console: Console object 957 @type show_command: bool 958 @param show_command: Whether to just display commands 959 @type cluster_name: string 960 @param cluster_name: Cluster name as retrieved from master daemon 961 962 """ 963 assert console.Validate() 964 965 if console.kind == constants.CONS_MESSAGE: 966 feedback_fn(console.message) 967 elif console.kind == constants.CONS_VNC: 968 feedback_fn("Instance %s has VNC listening on %s:%s (display %s)," 969 " URL <vnc://%s:%s/>", 970 console.instance, console.host, console.port, 971 console.display, console.host, console.port) 972 elif console.kind == constants.CONS_SPICE: 973 feedback_fn("Instance %s has SPICE listening on %s:%s", console.instance, 974 console.host, console.port) 975 elif console.kind == constants.CONS_SSH: 976 # Convert to string if not already one 977 if isinstance(console.command, basestring): 978 cmd = console.command 979 else: 980 cmd = utils.ShellQuoteArgs(console.command) 981 982 srun = ssh.SshRunner(cluster_name=cluster_name) 983 ssh_cmd = srun.BuildCmd(console.host, console.user, cmd, 984 batch=True, quiet=False, tty=True) 985 986 if show_command: 987 feedback_fn(utils.ShellQuoteArgs(ssh_cmd)) 988 else: 989 result = _runcmd_fn(ssh_cmd, interactive=True) 990 if result.failed: 991 logging.error("Console command \"%s\" failed with reason '%s' and" 992 " output %r", result.cmd, result.fail_reason, 993 result.output) 994 raise errors.OpExecError("Connection to console of instance %s failed," 995 " please check cluster configuration" % 996 console.instance) 997 else: 998 raise errors.GenericError("Unknown console type '%s'" % console.kind) 999 1000 return constants.EXIT_SUCCESS
1001 1002
1003 -def _FormatLogicalID(dev_type, logical_id, roman):
1004 """Formats the logical_id of a disk. 1005 1006 """ 1007 if dev_type == constants.LD_DRBD8: 1008 node_a, node_b, port, minor_a, minor_b, key = logical_id 1009 data = [ 1010 ("nodeA", "%s, minor=%s" % (node_a, compat.TryToRoman(minor_a, 1011 convert=roman))), 1012 ("nodeB", "%s, minor=%s" % (node_b, compat.TryToRoman(minor_b, 1013 convert=roman))), 1014 ("port", compat.TryToRoman(port, convert=roman)), 1015 ("auth key", key), 1016 ] 1017 elif dev_type == constants.LD_LV: 1018 vg_name, lv_name = logical_id 1019 data = ["%s/%s" % (vg_name, lv_name)] 1020 else: 1021 data = [str(logical_id)] 1022 1023 return data
1024 1025
1026 -def _FormatBlockDevInfo(idx, top_level, dev, roman):
1027 """Show block device information. 1028 1029 This is only used by L{ShowInstanceConfig}, but it's too big to be 1030 left for an inline definition. 1031 1032 @type idx: int 1033 @param idx: the index of the current disk 1034 @type top_level: boolean 1035 @param top_level: if this a top-level disk? 1036 @type dev: dict 1037 @param dev: dictionary with disk information 1038 @type roman: boolean 1039 @param roman: whether to try to use roman integers 1040 @return: a list of either strings, tuples or lists 1041 (which should be formatted at a higher indent level) 1042 1043 """ 1044 def helper(dtype, status): 1045 """Format one line for physical device status. 1046 1047 @type dtype: str 1048 @param dtype: a constant from the L{constants.LDS_BLOCK} set 1049 @type status: tuple 1050 @param status: a tuple as returned from L{backend.FindBlockDevice} 1051 @return: the string representing the status 1052 1053 """ 1054 if not status: 1055 return "not active" 1056 txt = "" 1057 (path, major, minor, syncp, estt, degr, ldisk_status) = status 1058 if major is None: 1059 major_string = "N/A" 1060 else: 1061 major_string = str(compat.TryToRoman(major, convert=roman)) 1062 1063 if minor is None: 1064 minor_string = "N/A" 1065 else: 1066 minor_string = str(compat.TryToRoman(minor, convert=roman)) 1067 1068 txt += ("%s (%s:%s)" % (path, major_string, minor_string)) 1069 if dtype in (constants.LD_DRBD8, ): 1070 if syncp is not None: 1071 sync_text = "*RECOVERING* %5.2f%%," % syncp 1072 if estt: 1073 sync_text += " ETA %ss" % compat.TryToRoman(estt, convert=roman) 1074 else: 1075 sync_text += " ETA unknown" 1076 else: 1077 sync_text = "in sync" 1078 if degr: 1079 degr_text = "*DEGRADED*" 1080 else: 1081 degr_text = "ok" 1082 if ldisk_status == constants.LDS_FAULTY: 1083 ldisk_text = " *MISSING DISK*" 1084 elif ldisk_status == constants.LDS_UNKNOWN: 1085 ldisk_text = " *UNCERTAIN STATE*" 1086 else: 1087 ldisk_text = "" 1088 txt += (" %s, status %s%s" % (sync_text, degr_text, ldisk_text)) 1089 elif dtype == constants.LD_LV: 1090 if ldisk_status == constants.LDS_FAULTY: 1091 ldisk_text = " *FAILED* (failed drive?)" 1092 else: 1093 ldisk_text = "" 1094 txt += ldisk_text 1095 return txt
1096 1097 # the header 1098 if top_level: 1099 if dev["iv_name"] is not None: 1100 txt = dev["iv_name"] 1101 else: 1102 txt = "disk %s" % compat.TryToRoman(idx, convert=roman) 1103 else: 1104 txt = "child %s" % compat.TryToRoman(idx, convert=roman) 1105 if isinstance(dev["size"], int): 1106 nice_size = utils.FormatUnit(dev["size"], "h") 1107 else: 1108 nice_size = dev["size"] 1109 d1 = ["- %s: %s, size %s" % (txt, dev["dev_type"], nice_size)] 1110 data = [] 1111 if top_level: 1112 data.append(("access mode", dev["mode"])) 1113 if dev["logical_id"] is not None: 1114 try: 1115 l_id = _FormatLogicalID(dev["dev_type"], dev["logical_id"], roman) 1116 except ValueError: 1117 l_id = [str(dev["logical_id"])] 1118 if len(l_id) == 1: 1119 data.append(("logical_id", l_id[0])) 1120 else: 1121 data.extend(l_id) 1122 elif dev["physical_id"] is not None: 1123 data.append("physical_id:") 1124 data.append([dev["physical_id"]]) 1125 1126 if dev["pstatus"]: 1127 data.append(("on primary", helper(dev["dev_type"], dev["pstatus"]))) 1128 1129 if dev["sstatus"]: 1130 data.append(("on secondary", helper(dev["dev_type"], dev["sstatus"]))) 1131 1132 if dev["children"]: 1133 data.append("child devices:") 1134 for c_idx, child in enumerate(dev["children"]): 1135 data.append(_FormatBlockDevInfo(c_idx, False, child, roman)) 1136 d1.append(data) 1137 return d1 1138 1139
1140 -def _FormatList(buf, data, indent_level):
1141 """Formats a list of data at a given indent level. 1142 1143 If the element of the list is: 1144 - a string, it is simply formatted as is 1145 - a tuple, it will be split into key, value and the all the 1146 values in a list will be aligned all at the same start column 1147 - a list, will be recursively formatted 1148 1149 @type buf: StringIO 1150 @param buf: the buffer into which we write the output 1151 @param data: the list to format 1152 @type indent_level: int 1153 @param indent_level: the indent level to format at 1154 1155 """ 1156 max_tlen = max([len(elem[0]) for elem in data 1157 if isinstance(elem, tuple)] or [0]) 1158 for elem in data: 1159 if isinstance(elem, basestring): 1160 buf.write("%*s%s\n" % (2 * indent_level, "", elem)) 1161 elif isinstance(elem, tuple): 1162 key, value = elem 1163 spacer = "%*s" % (max_tlen - len(key), "") 1164 buf.write("%*s%s:%s %s\n" % (2 * indent_level, "", key, spacer, value)) 1165 elif isinstance(elem, list): 1166 _FormatList(buf, elem, indent_level + 1)
1167 1168
1169 -def ShowInstanceConfig(opts, args):
1170 """Compute instance run-time status. 1171 1172 @param opts: the command line options selected by the user 1173 @type args: list 1174 @param args: either an empty list, and then we query all 1175 instances, or should contain a list of instance names 1176 @rtype: int 1177 @return: the desired exit code 1178 1179 """ 1180 if not args and not opts.show_all: 1181 ToStderr("No instance selected." 1182 " Please pass in --all if you want to query all instances.\n" 1183 "Note that this can take a long time on a big cluster.") 1184 return 1 1185 elif args and opts.show_all: 1186 ToStderr("Cannot use --all if you specify instance names.") 1187 return 1 1188 1189 retcode = 0 1190 op = opcodes.OpInstanceQueryData(instances=args, static=opts.static, 1191 use_locking=not opts.static) 1192 result = SubmitOpCode(op, opts=opts) 1193 if not result: 1194 ToStdout("No instances.") 1195 return 1 1196 1197 buf = StringIO() 1198 retcode = 0 1199 for instance_name in result: 1200 instance = result[instance_name] 1201 buf.write("Instance name: %s\n" % instance["name"]) 1202 buf.write("UUID: %s\n" % instance["uuid"]) 1203 buf.write("Serial number: %s\n" % 1204 compat.TryToRoman(instance["serial_no"], 1205 convert=opts.roman_integers)) 1206 buf.write("Creation time: %s\n" % utils.FormatTime(instance["ctime"])) 1207 buf.write("Modification time: %s\n" % utils.FormatTime(instance["mtime"])) 1208 buf.write("State: configured to be %s" % instance["config_state"]) 1209 if instance["run_state"]: 1210 buf.write(", actual state is %s" % instance["run_state"]) 1211 buf.write("\n") 1212 ##buf.write("Considered for memory checks in cluster verify: %s\n" % 1213 ## instance["auto_balance"]) 1214 buf.write(" Nodes:\n") 1215 buf.write(" - primary: %s\n" % instance["pnode"]) 1216 buf.write(" group: %s (UUID %s)\n" % 1217 (instance["pnode_group_name"], instance["pnode_group_uuid"])) 1218 buf.write(" - secondaries: %s\n" % 1219 utils.CommaJoin("%s (group %s, group UUID %s)" % 1220 (name, group_name, group_uuid) 1221 for (name, group_name, group_uuid) in 1222 zip(instance["snodes"], 1223 instance["snodes_group_names"], 1224 instance["snodes_group_uuids"]))) 1225 buf.write(" Operating system: %s\n" % instance["os"]) 1226 FormatParameterDict(buf, instance["os_instance"], instance["os_actual"], 1227 level=2) 1228 if "network_port" in instance: 1229 buf.write(" Allocated network port: %s\n" % 1230 compat.TryToRoman(instance["network_port"], 1231 convert=opts.roman_integers)) 1232 buf.write(" Hypervisor: %s\n" % instance["hypervisor"]) 1233 1234 # custom VNC console information 1235 vnc_bind_address = instance["hv_actual"].get(constants.HV_VNC_BIND_ADDRESS, 1236 None) 1237 if vnc_bind_address: 1238 port = instance["network_port"] 1239 display = int(port) - constants.VNC_BASE_PORT 1240 if display > 0 and vnc_bind_address == constants.IP4_ADDRESS_ANY: 1241 vnc_console_port = "%s:%s (display %s)" % (instance["pnode"], 1242 port, 1243 display) 1244 elif display > 0 and netutils.IP4Address.IsValid(vnc_bind_address): 1245 vnc_console_port = ("%s:%s (node %s) (display %s)" % 1246 (vnc_bind_address, port, 1247 instance["pnode"], display)) 1248 else: 1249 # vnc bind address is a file 1250 vnc_console_port = "%s:%s" % (instance["pnode"], 1251 vnc_bind_address) 1252 buf.write(" - console connection: vnc to %s\n" % vnc_console_port) 1253 1254 FormatParameterDict(buf, instance["hv_instance"], instance["hv_actual"], 1255 level=2) 1256 buf.write(" Hardware:\n") 1257 # deprecated "memory" value, kept for one version for compatibility 1258 # TODO(ganeti 2.7) remove. 1259 be_actual = copy.deepcopy(instance["be_actual"]) 1260 be_actual["memory"] = be_actual[constants.BE_MAXMEM] 1261 FormatParameterDict(buf, instance["be_instance"], be_actual, level=2) 1262 # TODO(ganeti 2.7) rework the NICs as well 1263 buf.write(" - NICs:\n") 1264 for idx, (ip, mac, mode, link) in enumerate(instance["nics"]): 1265 buf.write(" - nic/%d: MAC: %s, IP: %s, mode: %s, link: %s\n" % 1266 (idx, mac, ip, mode, link)) 1267 buf.write(" Disk template: %s\n" % instance["disk_template"]) 1268 buf.write(" Disks:\n") 1269 1270 for idx, device in enumerate(instance["disks"]): 1271 _FormatList(buf, _FormatBlockDevInfo(idx, True, device, 1272 opts.roman_integers), 2) 1273 1274 ToStdout(buf.getvalue().rstrip("\n")) 1275 return retcode
1276 1277
1278 -def _ConvertNicDiskModifications(mods):
1279 """Converts NIC/disk modifications from CLI to opcode. 1280 1281 When L{opcodes.OpInstanceSetParams} was changed to support adding/removing 1282 disks at arbitrary indices, its parameter format changed. This function 1283 converts legacy requests (e.g. "--net add" or "--disk add:size=4G") to the 1284 newer format and adds support for new-style requests (e.g. "--new 4:add"). 1285 1286 @type mods: list of tuples 1287 @param mods: Modifications as given by command line parser 1288 @rtype: list of tuples 1289 @return: Modifications as understood by L{opcodes.OpInstanceSetParams} 1290 1291 """ 1292 result = [] 1293 1294 for (idx, params) in mods: 1295 if idx == constants.DDM_ADD: 1296 # Add item as last item (legacy interface) 1297 action = constants.DDM_ADD 1298 idxno = -1 1299 elif idx == constants.DDM_REMOVE: 1300 # Remove last item (legacy interface) 1301 action = constants.DDM_REMOVE 1302 idxno = -1 1303 else: 1304 # Modifications and adding/removing at arbitrary indices 1305 try: 1306 idxno = int(idx) 1307 except (TypeError, ValueError): 1308 raise errors.OpPrereqError("Non-numeric index '%s'" % idx, 1309 errors.ECODE_INVAL) 1310 1311 add = params.pop(constants.DDM_ADD, _MISSING) 1312 remove = params.pop(constants.DDM_REMOVE, _MISSING) 1313 modify = params.pop(constants.DDM_MODIFY, _MISSING) 1314 1315 if modify is _MISSING: 1316 if not (add is _MISSING or remove is _MISSING): 1317 raise errors.OpPrereqError("Cannot add and remove at the same time", 1318 errors.ECODE_INVAL) 1319 elif add is not _MISSING: 1320 action = constants.DDM_ADD 1321 elif remove is not _MISSING: 1322 action = constants.DDM_REMOVE 1323 else: 1324 action = constants.DDM_MODIFY 1325 1326 elif add is _MISSING and remove is _MISSING: 1327 action = constants.DDM_MODIFY 1328 else: 1329 raise errors.OpPrereqError("Cannot modify and add/remove at the" 1330 " same time", errors.ECODE_INVAL) 1331 1332 assert not (constants.DDMS_VALUES_WITH_MODIFY & set(params.keys())) 1333 1334 if action == constants.DDM_REMOVE and params: 1335 raise errors.OpPrereqError("Not accepting parameters on removal", 1336 errors.ECODE_INVAL) 1337 1338 result.append((action, idxno, params)) 1339 1340 return result
1341 1342
1343 -def _ParseDiskSizes(mods):
1344 """Parses disk sizes in parameters. 1345 1346 """ 1347 for (action, _, params) in mods: 1348 if params and constants.IDISK_SIZE in params: 1349 params[constants.IDISK_SIZE] = \ 1350 utils.ParseUnit(params[constants.IDISK_SIZE]) 1351 elif action == constants.DDM_ADD: 1352 raise errors.OpPrereqError("Missing required parameter 'size'", 1353 errors.ECODE_INVAL) 1354 1355 return mods
1356 1357
1358 -def SetInstanceParams(opts, args):
1359 """Modifies an instance. 1360 1361 All parameters take effect only at the next restart of the instance. 1362 1363 @param opts: the command line options selected by the user 1364 @type args: list 1365 @param args: should contain only one element, the instance name 1366 @rtype: int 1367 @return: the desired exit code 1368 1369 """ 1370 if not (opts.nics or opts.disks or opts.disk_template or 1371 opts.hvparams or opts.beparams or opts.os or opts.osparams or 1372 opts.offline_inst or opts.online_inst or opts.runtime_mem): 1373 ToStderr("Please give at least one of the parameters.") 1374 return 1 1375 1376 for param in opts.beparams: 1377 if isinstance(opts.beparams[param], basestring): 1378 if opts.beparams[param].lower() == "default": 1379 opts.beparams[param] = constants.VALUE_DEFAULT 1380 1381 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT, 1382 allowed_values=[constants.VALUE_DEFAULT]) 1383 1384 for param in opts.hvparams: 1385 if isinstance(opts.hvparams[param], basestring): 1386 if opts.hvparams[param].lower() == "default": 1387 opts.hvparams[param] = constants.VALUE_DEFAULT 1388 1389 utils.ForceDictType(opts.hvparams, constants.HVS_PARAMETER_TYPES, 1390 allowed_values=[constants.VALUE_DEFAULT]) 1391 1392 nics = _ConvertNicDiskModifications(opts.nics) 1393 disks = _ParseDiskSizes(_ConvertNicDiskModifications(opts.disks)) 1394 1395 if (opts.disk_template and 1396 opts.disk_template in constants.DTS_INT_MIRROR and 1397 not opts.node): 1398 ToStderr("Changing the disk template to a mirrored one requires" 1399 " specifying a secondary node") 1400 return 1 1401 1402 if opts.offline_inst: 1403 offline = True 1404 elif opts.online_inst: 1405 offline = False 1406 else: 1407 offline = None 1408 1409 op = opcodes.OpInstanceSetParams(instance_name=args[0], 1410 nics=nics, 1411 disks=disks, 1412 disk_template=opts.disk_template, 1413 remote_node=opts.node, 1414 hvparams=opts.hvparams, 1415 beparams=opts.beparams, 1416 runtime_mem=opts.runtime_mem, 1417 os_name=opts.os, 1418 osparams=opts.osparams, 1419 force_variant=opts.force_variant, 1420 force=opts.force, 1421 wait_for_sync=opts.wait_for_sync, 1422 offline=offline, 1423 ignore_ipolicy=opts.ignore_ipolicy) 1424 1425 # even if here we process the result, we allow submit only 1426 result = SubmitOrSend(op, opts) 1427 1428 if result: 1429 ToStdout("Modified instance %s", args[0]) 1430 for param, data in result: 1431 ToStdout(" - %-5s -> %s", param, data) 1432 ToStdout("Please don't forget that most parameters take effect" 1433 " only at the next (re)start of the instance initiated by" 1434 " ganeti; restarting from within the instance will" 1435 " not be enough.") 1436 return 0
1437 1438
1439 -def ChangeGroup(opts, args):
1440 """Moves an instance to another group. 1441 1442 """ 1443 (instance_name, ) = args 1444 1445 cl = GetClient() 1446 1447 op = opcodes.OpInstanceChangeGroup(instance_name=instance_name, 1448 iallocator=opts.iallocator, 1449 target_groups=opts.to, 1450 early_release=opts.early_release) 1451 result = SubmitOrSend(op, opts, cl=cl) 1452 1453 # Keep track of submitted jobs 1454 jex = JobExecutor(cl=cl, opts=opts) 1455 1456 for (status, job_id) in result[constants.JOB_IDS_KEY]: 1457 jex.AddJobId(None, status, job_id) 1458 1459 results = jex.GetResults() 1460 bad_cnt = len([row for row in results if not row[0]]) 1461 if bad_cnt == 0: 1462 ToStdout("Instance '%s' changed group successfully.", instance_name) 1463 rcode = constants.EXIT_SUCCESS 1464 else: 1465 ToStdout("There were %s errors while changing group of instance '%s'.", 1466 bad_cnt, instance_name) 1467 rcode = constants.EXIT_FAILURE 1468 1469 return rcode
1470 1471 1472 # multi-instance selection options 1473 m_force_multi = cli_option("--force-multiple", dest="force_multi", 1474 help="Do not ask for confirmation when more than" 1475 " one instance is affected", 1476 action="store_true", default=False) 1477 1478 m_pri_node_opt = cli_option("--primary", dest="multi_mode", 1479 help="Filter by nodes (primary only)", 1480 const=_EXPAND_NODES_PRI, action="store_const") 1481 1482 m_sec_node_opt = cli_option("--secondary", dest="multi_mode", 1483 help="Filter by nodes (secondary only)", 1484 const=_EXPAND_NODES_SEC, action="store_const") 1485 1486 m_node_opt = cli_option("--node", dest="multi_mode", 1487 help="Filter by nodes (primary and secondary)", 1488 const=_EXPAND_NODES_BOTH, action="store_const") 1489 1490 m_clust_opt = cli_option("--all", dest="multi_mode", 1491 help="Select all instances in the cluster", 1492 const=_EXPAND_CLUSTER, action="store_const") 1493 1494 m_inst_opt = cli_option("--instance", dest="multi_mode", 1495 help="Filter by instance name [default]", 1496 const=_EXPAND_INSTANCES, action="store_const") 1497 1498 m_node_tags_opt = cli_option("--node-tags", dest="multi_mode", 1499 help="Filter by node tag", 1500 const=_EXPAND_NODES_BOTH_BY_TAGS, 1501 action="store_const") 1502 1503 m_pri_node_tags_opt = cli_option("--pri-node-tags", dest="multi_mode", 1504 help="Filter by primary node tag", 1505 const=_EXPAND_NODES_PRI_BY_TAGS, 1506 action="store_const") 1507 1508 m_sec_node_tags_opt = cli_option("--sec-node-tags", dest="multi_mode", 1509 help="Filter by secondary node tag", 1510 const=_EXPAND_NODES_SEC_BY_TAGS, 1511 action="store_const") 1512 1513 m_inst_tags_opt = cli_option("--tags", dest="multi_mode", 1514 help="Filter by instance tag", 1515 const=_EXPAND_INSTANCES_BY_TAGS, 1516 action="store_const") 1517 1518 # this is defined separately due to readability only 1519 add_opts = [ 1520 NOSTART_OPT, 1521 OS_OPT, 1522 FORCE_VARIANT_OPT, 1523 NO_INSTALL_OPT, 1524 IGNORE_IPOLICY_OPT, 1525 ] 1526 1527 commands = { 1528 "add": ( 1529 AddInstance, [ArgHost(min=1, max=1)], COMMON_CREATE_OPTS + add_opts, 1530 "[...] -t disk-type -n node[:secondary-node] -o os-type <name>", 1531 "Creates and adds a new instance to the cluster"), 1532 "batch-create": ( 1533 BatchCreate, [ArgFile(min=1, max=1)], [DRY_RUN_OPT, PRIORITY_OPT], 1534 "<instances.json>", 1535 "Create a bunch of instances based on specs in the file."), 1536 "console": ( 1537 ConnectToInstanceConsole, ARGS_ONE_INSTANCE, 1538 [SHOWCMD_OPT, PRIORITY_OPT], 1539 "[--show-cmd] <instance>", "Opens a console on the specified instance"), 1540 "failover": ( 1541 FailoverInstance, ARGS_ONE_INSTANCE, 1542 [FORCE_OPT, IGNORE_CONSIST_OPT, SUBMIT_OPT, SHUTDOWN_TIMEOUT_OPT, 1543 DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT, 1544 IGNORE_IPOLICY_OPT], 1545 "[-f] <instance>", "Stops the instance, changes its primary node and" 1546 " (if it was originally running) starts it on the new node" 1547 " (the secondary for mirrored instances or any node" 1548 " for shared storage)."), 1549 "migrate": ( 1550 MigrateInstance, ARGS_ONE_INSTANCE, 1551 [FORCE_OPT, NONLIVE_OPT, MIGRATION_MODE_OPT, CLEANUP_OPT, DRY_RUN_OPT, 1552 PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT, ALLOW_FAILOVER_OPT, 1553 IGNORE_IPOLICY_OPT, NORUNTIME_CHGS_OPT, SUBMIT_OPT], 1554 "[-f] <instance>", "Migrate instance to its secondary node" 1555 " (only for mirrored instances)"), 1556 "move": ( 1557 MoveInstance, ARGS_ONE_INSTANCE, 1558 [FORCE_OPT, SUBMIT_OPT, SINGLE_NODE_OPT, SHUTDOWN_TIMEOUT_OPT, 1559 DRY_RUN_OPT, PRIORITY_OPT, IGNORE_CONSIST_OPT, IGNORE_IPOLICY_OPT], 1560 "[-f] <instance>", "Move instance to an arbitrary node" 1561 " (only for instances of type file and lv)"), 1562 "info": ( 1563 ShowInstanceConfig, ARGS_MANY_INSTANCES, 1564 [STATIC_OPT, ALL_OPT, ROMAN_OPT, PRIORITY_OPT], 1565 "[-s] {--all | <instance>...}", 1566 "Show information on the specified instance(s)"), 1567 "list": ( 1568 ListInstances, ARGS_MANY_INSTANCES, 1569 [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, VERBOSE_OPT, 1570 FORCE_FILTER_OPT], 1571 "[<instance>...]", 1572 "Lists the instances and their status. The available fields can be shown" 1573 " using the \"list-fields\" command (see the man page for details)." 1574 " The default field list is (in order): %s." % 1575 utils.CommaJoin(_LIST_DEF_FIELDS), 1576 ), 1577 "list-fields": ( 1578 ListInstanceFields, [ArgUnknown()], 1579 [NOHDR_OPT, SEP_OPT], 1580 "[fields...]", 1581 "Lists all available fields for instances"), 1582 "reinstall": ( 1583 ReinstallInstance, [ArgInstance()], 1584 [FORCE_OPT, OS_OPT, FORCE_VARIANT_OPT, m_force_multi, m_node_opt, 1585 m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, m_node_tags_opt, 1586 m_pri_node_tags_opt, m_sec_node_tags_opt, m_inst_tags_opt, SELECT_OS_OPT, 1587 SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT, OSPARAMS_OPT], 1588 "[-f] <instance>", "Reinstall a stopped instance"), 1589 "remove": ( 1590 RemoveInstance, ARGS_ONE_INSTANCE, 1591 [FORCE_OPT, SHUTDOWN_TIMEOUT_OPT, IGNORE_FAILURES_OPT, SUBMIT_OPT, 1592 DRY_RUN_OPT, PRIORITY_OPT], 1593 "[-f] <instance>", "Shuts down the instance and removes it"), 1594 "rename": ( 1595 RenameInstance, 1596 [ArgInstance(min=1, max=1), ArgHost(min=1, max=1)], 1597 [NOIPCHECK_OPT, NONAMECHECK_OPT, SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT], 1598 "<instance> <new_name>", "Rename the instance"), 1599 "replace-disks": ( 1600 ReplaceDisks, ARGS_ONE_INSTANCE, 1601 [AUTO_REPLACE_OPT, DISKIDX_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT, 1602 NEW_SECONDARY_OPT, ON_PRIMARY_OPT, ON_SECONDARY_OPT, SUBMIT_OPT, 1603 DRY_RUN_OPT, PRIORITY_OPT, IGNORE_IPOLICY_OPT], 1604 "[-s|-p|-a|-n NODE|-I NAME] <instance>", 1605 "Replaces disks for the instance"), 1606 "modify": ( 1607 SetInstanceParams, ARGS_ONE_INSTANCE, 1608 [BACKEND_OPT, DISK_OPT, FORCE_OPT, HVOPTS_OPT, NET_OPT, SUBMIT_OPT, 1609 DISK_TEMPLATE_OPT, SINGLE_NODE_OPT, OS_OPT, FORCE_VARIANT_OPT, 1610 OSPARAMS_OPT, DRY_RUN_OPT, PRIORITY_OPT, NWSYNC_OPT, OFFLINE_INST_OPT, 1611 ONLINE_INST_OPT, IGNORE_IPOLICY_OPT, RUNTIME_MEM_OPT], 1612 "<instance>", "Alters the parameters of an instance"), 1613 "shutdown": ( 1614 GenericManyOps("shutdown", _ShutdownInstance), [ArgInstance()], 1615 [m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt, 1616 m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt, 1617 m_inst_tags_opt, m_inst_opt, m_force_multi, TIMEOUT_OPT, SUBMIT_OPT, 1618 DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT, NO_REMEMBER_OPT], 1619 "<instance>", "Stops an instance"), 1620 "startup": ( 1621 GenericManyOps("startup", _StartupInstance), [ArgInstance()], 1622 [FORCE_OPT, m_force_multi, m_node_opt, m_pri_node_opt, m_sec_node_opt, 1623 m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt, 1624 m_inst_tags_opt, m_clust_opt, m_inst_opt, SUBMIT_OPT, HVOPTS_OPT, 1625 BACKEND_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT, 1626 NO_REMEMBER_OPT, STARTUP_PAUSED_OPT], 1627 "<instance>", "Starts an instance"), 1628 "reboot": ( 1629 GenericManyOps("reboot", _RebootInstance), [ArgInstance()], 1630 [m_force_multi, REBOOT_TYPE_OPT, IGNORE_SECONDARIES_OPT, m_node_opt, 1631 m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, SUBMIT_OPT, 1632 m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt, 1633 m_inst_tags_opt, SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT], 1634 "<instance>", "Reboots an instance"), 1635 "activate-disks": ( 1636 ActivateDisks, ARGS_ONE_INSTANCE, 1637 [SUBMIT_OPT, IGNORE_SIZE_OPT, PRIORITY_OPT], 1638 "<instance>", "Activate an instance's disks"), 1639 "deactivate-disks": ( 1640 DeactivateDisks, ARGS_ONE_INSTANCE, 1641 [FORCE_OPT, SUBMIT_OPT, DRY_RUN_OPT, PRIORITY_OPT], 1642 "[-f] <instance>", "Deactivate an instance's disks"), 1643 "recreate-disks": ( 1644 RecreateDisks, ARGS_ONE_INSTANCE, 1645 [SUBMIT_OPT, DISK_OPT, NODE_PLACEMENT_OPT, DRY_RUN_OPT, PRIORITY_OPT], 1646 "<instance>", "Recreate an instance's disks"), 1647 "grow-disk": ( 1648 GrowDisk, 1649 [ArgInstance(min=1, max=1), ArgUnknown(min=1, max=1), 1650 ArgUnknown(min=1, max=1)], 1651 [SUBMIT_OPT, NWSYNC_OPT, DRY_RUN_OPT, PRIORITY_OPT, ABSOLUTE_OPT], 1652 "<instance> <disk> <size>", "Grow an instance's disk"), 1653 "change-group": ( 1654 ChangeGroup, ARGS_ONE_INSTANCE, 1655 [TO_GROUP_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT, PRIORITY_OPT, SUBMIT_OPT], 1656 "[-I <iallocator>] [--to <group>]", "Change group of instance"), 1657 "list-tags": ( 1658 ListTags, ARGS_ONE_INSTANCE, [], 1659 "<instance_name>", "List the tags of the given instance"), 1660 "add-tags": ( 1661 AddTags, [ArgInstance(min=1, max=1), ArgUnknown()], 1662 [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT], 1663 "<instance_name> tag...", "Add tags to the given instance"), 1664 "remove-tags": ( 1665 RemoveTags, [ArgInstance(min=1, max=1), ArgUnknown()], 1666 [TAG_SRC_OPT, PRIORITY_OPT, SUBMIT_OPT], 1667 "<instance_name> tag...", "Remove tags from given instance"), 1668 } 1669 1670 #: dictionary with aliases for commands 1671 aliases = { 1672 "start": "startup", 1673 "stop": "shutdown", 1674 "show": "info", 1675 } 1676 1677
1678 -def Main():
1679 return GenericMain(commands, aliases=aliases, 1680 override={"tag_type": constants.TAG_INSTANCE}, 1681 env_override=_ENV_OVERRIDE)
1682