Package ganeti :: Package client :: Module gnt_instance
[hide private]
[frames] | no frames]

Source Code for Module ganeti.client.gnt_instance

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30  """Instance related commands""" 
  31   
  32  # pylint: disable=W0401,W0614,C0103 
  33  # W0401: Wildcard import ganeti.cli 
  34  # W0614: Unused import %s from wildcard import (since we need cli) 
  35  # C0103: Invalid name gnt-instance 
  36   
  37  import copy 
  38  import itertools 
  39  import simplejson 
  40  import logging 
  41   
  42  from ganeti.cli import * 
  43  from ganeti import opcodes 
  44  from ganeti import constants 
  45  from ganeti import compat 
  46  from ganeti import utils 
  47  from ganeti import errors 
  48  from ganeti import netutils 
  49  from ganeti import ssh 
  50  from ganeti import objects 
  51  from ganeti import ht 
  52   
  53   
  54  _EXPAND_CLUSTER = "cluster" 
  55  _EXPAND_NODES_BOTH = "nodes" 
  56  _EXPAND_NODES_PRI = "nodes-pri" 
  57  _EXPAND_NODES_SEC = "nodes-sec" 
  58  _EXPAND_NODES_BOTH_BY_TAGS = "nodes-by-tags" 
  59  _EXPAND_NODES_PRI_BY_TAGS = "nodes-pri-by-tags" 
  60  _EXPAND_NODES_SEC_BY_TAGS = "nodes-sec-by-tags" 
  61  _EXPAND_INSTANCES = "instances" 
  62  _EXPAND_INSTANCES_BY_TAGS = "instances-by-tags" 
  63   
  64  _EXPAND_NODES_TAGS_MODES = compat.UniqueFrozenset([ 
  65    _EXPAND_NODES_BOTH_BY_TAGS, 
  66    _EXPAND_NODES_PRI_BY_TAGS, 
  67    _EXPAND_NODES_SEC_BY_TAGS, 
  68    ]) 
  69   
  70  #: default list of options for L{ListInstances} 
  71  _LIST_DEF_FIELDS = [ 
  72    "name", "hypervisor", "os", "pnode", "status", "oper_ram", 
  73    ] 
  74   
  75  _MISSING = object() 
  76  _ENV_OVERRIDE = compat.UniqueFrozenset(["list"]) 
  77   
  78  _INST_DATA_VAL = ht.TListOf(ht.TDict) 
  79   
  80   
81 -def _ExpandMultiNames(mode, names, client=None):
82 """Expand the given names using the passed mode. 83 84 For _EXPAND_CLUSTER, all instances will be returned. For 85 _EXPAND_NODES_PRI/SEC, all instances having those nodes as 86 primary/secondary will be returned. For _EXPAND_NODES_BOTH, all 87 instances having those nodes as either primary or secondary will be 88 returned. For _EXPAND_INSTANCES, the given instances will be 89 returned. 90 91 @param mode: one of L{_EXPAND_CLUSTER}, L{_EXPAND_NODES_BOTH}, 92 L{_EXPAND_NODES_PRI}, L{_EXPAND_NODES_SEC} or 93 L{_EXPAND_INSTANCES} 94 @param names: a list of names; for cluster, it must be empty, 95 and for node and instance it must be a list of valid item 96 names (short names are valid as usual, e.g. node1 instead of 97 node1.example.com) 98 @rtype: list 99 @return: the list of names after the expansion 100 @raise errors.ProgrammerError: for unknown selection type 101 @raise errors.OpPrereqError: for invalid input parameters 102 103 """ 104 # pylint: disable=W0142 105 106 if client is None: 107 client = GetClient() 108 if mode == _EXPAND_CLUSTER: 109 if names: 110 raise errors.OpPrereqError("Cluster filter mode takes no arguments", 111 errors.ECODE_INVAL) 112 idata = client.QueryInstances([], ["name"], False) 113 inames = [row[0] for row in idata] 114 115 elif (mode in _EXPAND_NODES_TAGS_MODES or 116 mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_PRI, _EXPAND_NODES_SEC)): 117 if mode in _EXPAND_NODES_TAGS_MODES: 118 if not names: 119 raise errors.OpPrereqError("No node tags passed", errors.ECODE_INVAL) 120 ndata = client.QueryNodes([], ["name", "pinst_list", 121 "sinst_list", "tags"], False) 122 ndata = [row for row in ndata if set(row[3]).intersection(names)] 123 else: 124 if not names: 125 raise errors.OpPrereqError("No node names passed", errors.ECODE_INVAL) 126 ndata = client.QueryNodes(names, ["name", "pinst_list", "sinst_list"], 127 False) 128 129 ipri = [row[1] for row in ndata] 130 pri_names = list(itertools.chain(*ipri)) 131 isec = [row[2] for row in ndata] 132 sec_names = list(itertools.chain(*isec)) 133 if mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_BOTH_BY_TAGS): 134 inames = pri_names + sec_names 135 elif mode in (_EXPAND_NODES_PRI, _EXPAND_NODES_PRI_BY_TAGS): 136 inames = pri_names 137 elif mode in (_EXPAND_NODES_SEC, _EXPAND_NODES_SEC_BY_TAGS): 138 inames = sec_names 139 else: 140 raise errors.ProgrammerError("Unhandled shutdown type") 141 elif mode == _EXPAND_INSTANCES: 142 if not names: 143 raise errors.OpPrereqError("No instance names passed", 144 errors.ECODE_INVAL) 145 idata = client.QueryInstances(names, ["name"], False) 146 inames = [row[0] for row in idata] 147 elif mode == _EXPAND_INSTANCES_BY_TAGS: 148 if not names: 149 raise errors.OpPrereqError("No instance tags passed", 150 errors.ECODE_INVAL) 151 idata = client.QueryInstances([], ["name", "tags"], False) 152 inames = [row[0] for row in idata if set(row[1]).intersection(names)] 153 else: 154 raise errors.OpPrereqError("Unknown mode '%s'" % mode, errors.ECODE_INVAL) 155 156 return inames
157 158
159 -def _EnsureInstancesExist(client, names):
160 """Check for and ensure the given instance names exist. 161 162 This function will raise an OpPrereqError in case they don't 163 exist. Otherwise it will exit cleanly. 164 165 @type client: L{ganeti.luxi.Client} 166 @param client: the client to use for the query 167 @type names: list 168 @param names: the list of instance names to query 169 @raise errors.OpPrereqError: in case any instance is missing 170 171 """ 172 # TODO: change LUInstanceQuery to that it actually returns None 173 # instead of raising an exception, or devise a better mechanism 174 result = client.QueryInstances(names, ["name"], False) 175 for orig_name, row in zip(names, result): 176 if row[0] is None: 177 raise errors.OpPrereqError("Instance '%s' does not exist" % orig_name, 178 errors.ECODE_NOENT)
179 180
181 -def GenericManyOps(operation, fn):
182 """Generic multi-instance operations. 183 184 The will return a wrapper that processes the options and arguments 185 given, and uses the passed function to build the opcode needed for 186 the specific operation. Thus all the generic loop/confirmation code 187 is abstracted into this function. 188 189 """ 190 def realfn(opts, args): 191 if opts.multi_mode is None: 192 opts.multi_mode = _EXPAND_INSTANCES 193 cl = GetClient() 194 inames = _ExpandMultiNames(opts.multi_mode, args, client=cl) 195 if not inames: 196 if opts.multi_mode == _EXPAND_CLUSTER: 197 ToStdout("Cluster is empty, no instances to shutdown") 198 return 0 199 raise errors.OpPrereqError("Selection filter does not match" 200 " any instances", errors.ECODE_INVAL) 201 multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1 202 if not (opts.force_multi or not multi_on 203 or ConfirmOperation(inames, "instances", operation)): 204 return 1 205 jex = JobExecutor(verbose=multi_on, cl=cl, opts=opts) 206 for name in inames: 207 op = fn(name, opts) 208 jex.QueueJob(name, op) 209 results = jex.WaitOrShow(not opts.submit_only) 210 rcode = compat.all(row[0] for row in results) 211 return int(not rcode)
212 return realfn 213 214
215 -def ListInstances(opts, args):
216 """List instances and their properties. 217 218 @param opts: the command line options selected by the user 219 @type args: list 220 @param args: should be an empty list 221 @rtype: int 222 @return: the desired exit code 223 224 """ 225 selected_fields = ParseFields(opts.output, _LIST_DEF_FIELDS) 226 227 fmtoverride = dict.fromkeys(["tags", "disk.sizes", "nic.macs", "nic.ips", 228 "nic.modes", "nic.links", "nic.bridges", 229 "nic.networks", 230 "snodes", "snodes.group", "snodes.group.uuid"], 231 (lambda value: ",".join(str(item) 232 for item in value), 233 False)) 234 235 return GenericList(constants.QR_INSTANCE, selected_fields, args, opts.units, 236 opts.separator, not opts.no_headers, 237 format_override=fmtoverride, verbose=opts.verbose, 238 force_filter=opts.force_filter)
239 240
241 -def ListInstanceFields(opts, args):
242 """List instance fields. 243 244 @param opts: the command line options selected by the user 245 @type args: list 246 @param args: fields to list, or empty for all 247 @rtype: int 248 @return: the desired exit code 249 250 """ 251 return GenericListFields(constants.QR_INSTANCE, args, opts.separator, 252 not opts.no_headers)
253 254
255 -def AddInstance(opts, args):
256 """Add an instance to the cluster. 257 258 This is just a wrapper over GenericInstanceCreate. 259 260 """ 261 return GenericInstanceCreate(constants.INSTANCE_CREATE, opts, args)
262 263
264 -def BatchCreate(opts, args):
265 """Create instances using a definition file. 266 267 This function reads a json file with L{opcodes.OpInstanceCreate} 268 serialisations. 269 270 @param opts: the command line options selected by the user 271 @type args: list 272 @param args: should contain one element, the json filename 273 @rtype: int 274 @return: the desired exit code 275 276 """ 277 (json_filename,) = args 278 cl = GetClient() 279 280 try: 281 instance_data = simplejson.loads(utils.ReadFile(json_filename)) 282 except Exception, err: # pylint: disable=W0703 283 ToStderr("Can't parse the instance definition file: %s" % str(err)) 284 return 1 285 286 if not _INST_DATA_VAL(instance_data): 287 ToStderr("The instance definition file is not %s" % _INST_DATA_VAL) 288 return 1 289 290 instances = [] 291 possible_params = set(opcodes.OpInstanceCreate.GetAllSlots()) 292 for (idx, inst) in enumerate(instance_data): 293 unknown = set(inst.keys()) - possible_params 294 295 if unknown: 296 # TODO: Suggest closest match for more user friendly experience 297 raise errors.OpPrereqError("Unknown fields in definition %s: %s" % 298 (idx, utils.CommaJoin(unknown)), 299 errors.ECODE_INVAL) 300 301 op = opcodes.OpInstanceCreate(**inst) # pylint: disable=W0142 302 op.Validate(False) 303 instances.append(op) 304 305 op = opcodes.OpInstanceMultiAlloc(iallocator=opts.iallocator, 306 instances=instances) 307 result = SubmitOrSend(op, opts, cl=cl) 308 309 # Keep track of submitted jobs 310 jex = JobExecutor(cl=cl, opts=opts) 311 312 for (status, job_id) in result[constants.JOB_IDS_KEY]: 313 jex.AddJobId(None, status, job_id) 314 315 results = jex.GetResults() 316 bad_cnt = len([row for row in results if not row[0]]) 317 if bad_cnt == 0: 318 ToStdout("All instances created successfully.") 319 rcode = constants.EXIT_SUCCESS 320 else: 321 ToStdout("There were %s errors during the creation.", bad_cnt) 322 rcode = constants.EXIT_FAILURE 323 324 return rcode
325 326
327 -def ReinstallInstance(opts, args):
328 """Reinstall an instance. 329 330 @param opts: the command line options selected by the user 331 @type args: list 332 @param args: should contain only one element, the name of the 333 instance to be reinstalled 334 @rtype: int 335 @return: the desired exit code 336 337 """ 338 # first, compute the desired name list 339 if opts.multi_mode is None: 340 opts.multi_mode = _EXPAND_INSTANCES 341 342 inames = _ExpandMultiNames(opts.multi_mode, args) 343 if not inames: 344 raise errors.OpPrereqError("Selection filter does not match any instances", 345 errors.ECODE_INVAL) 346 347 # second, if requested, ask for an OS 348 if opts.select_os is True: 349 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[]) 350 result = SubmitOpCode(op, opts=opts) 351 352 if not result: 353 ToStdout("Can't get the OS list") 354 return 1 355 356 ToStdout("Available OS templates:") 357 number = 0 358 choices = [] 359 for (name, variants) in result: 360 for entry in CalculateOSNames(name, variants): 361 ToStdout("%3s: %s", number, entry) 362 choices.append(("%s" % number, entry, entry)) 363 number += 1 364 365 choices.append(("x", "exit", "Exit gnt-instance reinstall")) 366 selected = AskUser("Enter OS template number (or x to abort):", 367 choices) 368 369 if selected == "exit": 370 ToStderr("User aborted reinstall, exiting") 371 return 1 372 373 os_name = selected 374 os_msg = "change the OS to '%s'" % selected 375 else: 376 os_name = opts.os 377 if opts.os is not None: 378 os_msg = "change the OS to '%s'" % os_name 379 else: 380 os_msg = "keep the same OS" 381 382 # third, get confirmation: multi-reinstall requires --force-multi, 383 # single-reinstall either --force or --force-multi (--force-multi is 384 # a stronger --force) 385 multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1 386 if multi_on: 387 warn_msg = ("Note: this will remove *all* data for the" 388 " below instances! It will %s.\n" % os_msg) 389 if not (opts.force_multi or 390 ConfirmOperation(inames, "instances", "reinstall", extra=warn_msg)): 391 return 1 392 else: 393 if not (opts.force or opts.force_multi): 394 usertext = ("This will reinstall the instance '%s' (and %s) which" 395 " removes all data. Continue?") % (inames[0], os_msg) 396 if not AskUser(usertext): 397 return 1 398 399 jex = JobExecutor(verbose=multi_on, opts=opts) 400 for instance_name in inames: 401 op = opcodes.OpInstanceReinstall(instance_name=instance_name, 402 os_type=os_name, 403 force_variant=opts.force_variant, 404 osparams=opts.osparams) 405 jex.QueueJob(instance_name, op) 406 407 results = jex.WaitOrShow(not opts.submit_only) 408 409 if compat.all(map(compat.fst, results)): 410 return constants.EXIT_SUCCESS 411 else: 412 return constants.EXIT_FAILURE
413 414
415 -def RemoveInstance(opts, args):
416 """Remove an instance. 417 418 @param opts: the command line options selected by the user 419 @type args: list 420 @param args: should contain only one element, the name of 421 the instance to be removed 422 @rtype: int 423 @return: the desired exit code 424 425 """ 426 instance_name = args[0] 427 force = opts.force 428 cl = GetClient() 429 430 if not force: 431 _EnsureInstancesExist(cl, [instance_name]) 432 433 usertext = ("This will remove the volumes of the instance %s" 434 " (including mirrors), thus removing all the data" 435 " of the instance. Continue?") % instance_name 436 if not AskUser(usertext): 437 return 1 438 439 op = opcodes.OpInstanceRemove(instance_name=instance_name, 440 ignore_failures=opts.ignore_failures, 441 shutdown_timeout=opts.shutdown_timeout) 442 SubmitOrSend(op, opts, cl=cl) 443 return 0
444 445
446 -def RenameInstance(opts, args):
447 """Rename an instance. 448 449 @param opts: the command line options selected by the user 450 @type args: list 451 @param args: should contain two elements, the old and the 452 new instance names 453 @rtype: int 454 @return: the desired exit code 455 456 """ 457 if not opts.name_check: 458 if not AskUser("As you disabled the check of the DNS entry, please verify" 459 " that '%s' is a FQDN. Continue?" % args[1]): 460 return 1 461 462 op = opcodes.OpInstanceRename(instance_name=args[0], 463 new_name=args[1], 464 ip_check=opts.ip_check, 465 name_check=opts.name_check) 466 result = SubmitOrSend(op, opts) 467 468 if result: 469 ToStdout("Instance '%s' renamed to '%s'", args[0], result) 470 471 return 0
472 473
474 -def ActivateDisks(opts, args):
475 """Activate an instance's disks. 476 477 This serves two purposes: 478 - it allows (as long as the instance is not running) 479 mounting the disks and modifying them from the node 480 - it repairs inactive secondary drbds 481 482 @param opts: the command line options selected by the user 483 @type args: list 484 @param args: should contain only one element, the instance name 485 @rtype: int 486 @return: the desired exit code 487 488 """ 489 instance_name = args[0] 490 op = opcodes.OpInstanceActivateDisks(instance_name=instance_name, 491 ignore_size=opts.ignore_size, 492 wait_for_sync=opts.wait_for_sync) 493 disks_info = SubmitOrSend(op, opts) 494 for host, iname, nname in disks_info: 495 ToStdout("%s:%s:%s", host, iname, nname) 496 return 0
497 498
499 -def DeactivateDisks(opts, args):
500 """Deactivate an instance's disks. 501 502 This function takes the instance name, looks for its primary node 503 and the tries to shutdown its block devices on that node. 504 505 @param opts: the command line options selected by the user 506 @type args: list 507 @param args: should contain only one element, the instance name 508 @rtype: int 509 @return: the desired exit code 510 511 """ 512 instance_name = args[0] 513 op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name, 514 force=opts.force) 515 SubmitOrSend(op, opts) 516 return 0
517 518
519 -def RecreateDisks(opts, args):
520 """Recreate an instance's disks. 521 522 @param opts: the command line options selected by the user 523 @type args: list 524 @param args: should contain only one element, the instance name 525 @rtype: int 526 @return: the desired exit code 527 528 """ 529 instance_name = args[0] 530 531 disks = [] 532 533 if opts.disks: 534 for didx, ddict in opts.disks: 535 didx = int(didx) 536 537 if not ht.TDict(ddict): 538 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict) 539 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 540 541 if constants.IDISK_SIZE in ddict: 542 try: 543 ddict[constants.IDISK_SIZE] = \ 544 utils.ParseUnit(ddict[constants.IDISK_SIZE]) 545 except ValueError, err: 546 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" % 547 (didx, err), errors.ECODE_INVAL) 548 549 disks.append((didx, ddict)) 550 551 # TODO: Verify modifyable parameters (already done in 552 # LUInstanceRecreateDisks, but it'd be nice to have in the client) 553 554 if opts.node: 555 if opts.iallocator: 556 msg = "At most one of either --nodes or --iallocator can be passed" 557 raise errors.OpPrereqError(msg, errors.ECODE_INVAL) 558 pnode, snode = SplitNodeOption(opts.node) 559 nodes = [pnode] 560 if snode is not None: 561 nodes.append(snode) 562 else: 563 nodes = [] 564 565 op = opcodes.OpInstanceRecreateDisks(instance_name=instance_name, 566 disks=disks, nodes=nodes, 567 iallocator=opts.iallocator) 568 SubmitOrSend(op, opts) 569 570 return 0
571 572
573 -def GrowDisk(opts, args):
574 """Grow an instance's disks. 575 576 @param opts: the command line options selected by the user 577 @type args: list 578 @param args: should contain three elements, the target instance name, 579 the target disk id, and the target growth 580 @rtype: int 581 @return: the desired exit code 582 583 """ 584 instance = args[0] 585 disk = args[1] 586 try: 587 disk = int(disk) 588 except (TypeError, ValueError), err: 589 raise errors.OpPrereqError("Invalid disk index: %s" % str(err), 590 errors.ECODE_INVAL) 591 try: 592 amount = utils.ParseUnit(args[2]) 593 except errors.UnitParseError: 594 raise errors.OpPrereqError("Can't parse the given amount '%s'" % args[2], 595 errors.ECODE_INVAL) 596 op = opcodes.OpInstanceGrowDisk(instance_name=instance, 597 disk=disk, amount=amount, 598 wait_for_sync=opts.wait_for_sync, 599 absolute=opts.absolute) 600 SubmitOrSend(op, opts) 601 return 0
602 603
604 -def _StartupInstance(name, opts):
605 """Startup instances. 606 607 This returns the opcode to start an instance, and its decorator will 608 wrap this into a loop starting all desired instances. 609 610 @param name: the name of the instance to act on 611 @param opts: the command line options selected by the user 612 @return: the opcode needed for the operation 613 614 """ 615 op = opcodes.OpInstanceStartup(instance_name=name, 616 force=opts.force, 617 ignore_offline_nodes=opts.ignore_offline, 618 no_remember=opts.no_remember, 619 startup_paused=opts.startup_paused) 620 # do not add these parameters to the opcode unless they're defined 621 if opts.hvparams: 622 op.hvparams = opts.hvparams 623 if opts.beparams: 624 op.beparams = opts.beparams 625 return op
626 627
628 -def _RebootInstance(name, opts):
629 """Reboot instance(s). 630 631 This returns the opcode to reboot an instance, and its decorator 632 will wrap this into a loop rebooting all desired instances. 633 634 @param name: the name of the instance to act on 635 @param opts: the command line options selected by the user 636 @return: the opcode needed for the operation 637 638 """ 639 return opcodes.OpInstanceReboot(instance_name=name, 640 reboot_type=opts.reboot_type, 641 ignore_secondaries=opts.ignore_secondaries, 642 shutdown_timeout=opts.shutdown_timeout)
643 644
645 -def _ShutdownInstance(name, opts):
646 """Shutdown an instance. 647 648 This returns the opcode to shutdown an instance, and its decorator 649 will wrap this into a loop shutting down all desired instances. 650 651 @param name: the name of the instance to act on 652 @param opts: the command line options selected by the user 653 @return: the opcode needed for the operation 654 655 """ 656 return opcodes.OpInstanceShutdown(instance_name=name, 657 force=opts.force, 658 timeout=opts.timeout, 659 ignore_offline_nodes=opts.ignore_offline, 660 no_remember=opts.no_remember)
661 662
663 -def ReplaceDisks(opts, args):
664 """Replace the disks of an instance 665 666 @param opts: the command line options selected by the user 667 @type args: list 668 @param args: should contain only one element, the instance name 669 @rtype: int 670 @return: the desired exit code 671 672 """ 673 new_2ndary = opts.dst_node 674 iallocator = opts.iallocator 675 if opts.disks is None: 676 disks = [] 677 else: 678 try: 679 disks = [int(i) for i in opts.disks.split(",")] 680 except (TypeError, ValueError), err: 681 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err), 682 errors.ECODE_INVAL) 683 cnt = [opts.on_primary, opts.on_secondary, opts.auto, 684 new_2ndary is not None, iallocator is not None].count(True) 685 if cnt != 1: 686 raise errors.OpPrereqError("One and only one of the -p, -s, -a, -n and -I" 687 " options must be passed", errors.ECODE_INVAL) 688 elif opts.on_primary: 689 mode = constants.REPLACE_DISK_PRI 690 elif opts.on_secondary: 691 mode = constants.REPLACE_DISK_SEC 692 elif opts.auto: 693 mode = constants.REPLACE_DISK_AUTO 694 if disks: 695 raise errors.OpPrereqError("Cannot specify disks when using automatic" 696 " mode", errors.ECODE_INVAL) 697 elif new_2ndary is not None or iallocator is not None: 698 # replace secondary 699 mode = constants.REPLACE_DISK_CHG 700 701 op = opcodes.OpInstanceReplaceDisks(instance_name=args[0], disks=disks, 702 remote_node=new_2ndary, mode=mode, 703 iallocator=iallocator, 704 early_release=opts.early_release, 705 ignore_ipolicy=opts.ignore_ipolicy) 706 SubmitOrSend(op, opts) 707 return 0
708 709
710 -def FailoverInstance(opts, args):
711 """Failover an instance. 712 713 The failover is done by shutting it down on its present node and 714 starting it on the secondary. 715 716 @param opts: the command line options selected by the user 717 @type args: list 718 @param args: should contain only one element, the instance name 719 @rtype: int 720 @return: the desired exit code 721 722 """ 723 cl = GetClient() 724 instance_name = args[0] 725 force = opts.force 726 iallocator = opts.iallocator 727 target_node = opts.dst_node 728 729 if iallocator and target_node: 730 raise errors.OpPrereqError("Specify either an iallocator (-I), or a target" 731 " node (-n) but not both", errors.ECODE_INVAL) 732 733 if not force: 734 _EnsureInstancesExist(cl, [instance_name]) 735 736 usertext = ("Failover will happen to image %s." 737 " This requires a shutdown of the instance. Continue?" % 738 (instance_name,)) 739 if not AskUser(usertext): 740 return 1 741 742 op = opcodes.OpInstanceFailover(instance_name=instance_name, 743 ignore_consistency=opts.ignore_consistency, 744 shutdown_timeout=opts.shutdown_timeout, 745 iallocator=iallocator, 746 target_node=target_node, 747 ignore_ipolicy=opts.ignore_ipolicy) 748 SubmitOrSend(op, opts, cl=cl) 749 return 0
750 751
752 -def MigrateInstance(opts, args):
753 """Migrate an instance. 754 755 The migrate is done without shutdown. 756 757 @param opts: the command line options selected by the user 758 @type args: list 759 @param args: should contain only one element, the instance name 760 @rtype: int 761 @return: the desired exit code 762 763 """ 764 cl = GetClient() 765 instance_name = args[0] 766 force = opts.force 767 iallocator = opts.iallocator 768 target_node = opts.dst_node 769 770 if iallocator and target_node: 771 raise errors.OpPrereqError("Specify either an iallocator (-I), or a target" 772 " node (-n) but not both", errors.ECODE_INVAL) 773 774 if not force: 775 _EnsureInstancesExist(cl, [instance_name]) 776 777 if opts.cleanup: 778 usertext = ("Instance %s will be recovered from a failed migration." 779 " Note that the migration procedure (including cleanup)" % 780 (instance_name,)) 781 else: 782 usertext = ("Instance %s will be migrated. Note that migration" % 783 (instance_name,)) 784 usertext += (" might impact the instance if anything goes wrong" 785 " (e.g. due to bugs in the hypervisor). Continue?") 786 if not AskUser(usertext): 787 return 1 788 789 # this should be removed once --non-live is deprecated 790 if not opts.live and opts.migration_mode is not None: 791 raise errors.OpPrereqError("Only one of the --non-live and " 792 "--migration-mode options can be passed", 793 errors.ECODE_INVAL) 794 if not opts.live: # --non-live passed 795 mode = constants.HT_MIGRATION_NONLIVE 796 else: 797 mode = opts.migration_mode 798 799 op = opcodes.OpInstanceMigrate(instance_name=instance_name, mode=mode, 800 cleanup=opts.cleanup, iallocator=iallocator, 801 target_node=target_node, 802 allow_failover=opts.allow_failover, 803 allow_runtime_changes=opts.allow_runtime_chgs, 804 ignore_ipolicy=opts.ignore_ipolicy) 805 SubmitOrSend(op, cl=cl, opts=opts) 806 return 0
807 808
809 -def MoveInstance(opts, args):
810 """Move an instance. 811 812 @param opts: the command line options selected by the user 813 @type args: list 814 @param args: should contain only one element, the instance name 815 @rtype: int 816 @return: the desired exit code 817 818 """ 819 cl = GetClient() 820 instance_name = args[0] 821 force = opts.force 822 823 if not force: 824 usertext = ("Instance %s will be moved." 825 " This requires a shutdown of the instance. Continue?" % 826 (instance_name,)) 827 if not AskUser(usertext): 828 return 1 829 830 op = opcodes.OpInstanceMove(instance_name=instance_name, 831 target_node=opts.node, 832 shutdown_timeout=opts.shutdown_timeout, 833 ignore_consistency=opts.ignore_consistency, 834 ignore_ipolicy=opts.ignore_ipolicy) 835 SubmitOrSend(op, opts, cl=cl) 836 return 0
837 838
839 -def ConnectToInstanceConsole(opts, args):
840 """Connect to the console of an instance. 841 842 @param opts: the command line options selected by the user 843 @type args: list 844 @param args: should contain only one element, the instance name 845 @rtype: int 846 @return: the desired exit code 847 848 """ 849 instance_name = args[0] 850 851 cl = GetClient() 852 try: 853 cluster_name = cl.QueryConfigValues(["cluster_name"])[0] 854 idata = cl.QueryInstances([instance_name], ["console", "oper_state"], False) 855 if not idata: 856 raise errors.OpPrereqError("Instance '%s' does not exist" % instance_name, 857 errors.ECODE_NOENT) 858 finally: 859 # Ensure client connection is closed while external commands are run 860 cl.Close() 861 862 del cl 863 864 ((console_data, oper_state), ) = idata 865 if not console_data: 866 if oper_state: 867 # Instance is running 868 raise errors.OpExecError("Console information for instance %s is" 869 " unavailable" % instance_name) 870 else: 871 raise errors.OpExecError("Instance %s is not running, can't get console" % 872 instance_name) 873 874 return _DoConsole(objects.InstanceConsole.FromDict(console_data), 875 opts.show_command, cluster_name)
876 877
878 -def _DoConsole(console, show_command, cluster_name, feedback_fn=ToStdout, 879 _runcmd_fn=utils.RunCmd):
880 """Acts based on the result of L{opcodes.OpInstanceConsole}. 881 882 @type console: L{objects.InstanceConsole} 883 @param console: Console object 884 @type show_command: bool 885 @param show_command: Whether to just display commands 886 @type cluster_name: string 887 @param cluster_name: Cluster name as retrieved from master daemon 888 889 """ 890 assert console.Validate() 891 892 if console.kind == constants.CONS_MESSAGE: 893 feedback_fn(console.message) 894 elif console.kind == constants.CONS_VNC: 895 feedback_fn("Instance %s has VNC listening on %s:%s (display %s)," 896 " URL <vnc://%s:%s/>", 897 console.instance, console.host, console.port, 898 console.display, console.host, console.port) 899 elif console.kind == constants.CONS_SPICE: 900 feedback_fn("Instance %s has SPICE listening on %s:%s", console.instance, 901 console.host, console.port) 902 elif console.kind == constants.CONS_SSH: 903 # Convert to string if not already one 904 if isinstance(console.command, basestring): 905 cmd = console.command 906 else: 907 cmd = utils.ShellQuoteArgs(console.command) 908 909 srun = ssh.SshRunner(cluster_name=cluster_name) 910 ssh_cmd = srun.BuildCmd(console.host, console.user, cmd, 911 batch=True, quiet=False, tty=True) 912 913 if show_command: 914 feedback_fn(utils.ShellQuoteArgs(ssh_cmd)) 915 else: 916 result = _runcmd_fn(ssh_cmd, interactive=True) 917 if result.failed: 918 logging.error("Console command \"%s\" failed with reason '%s' and" 919 " output %r", result.cmd, result.fail_reason, 920 result.output) 921 raise errors.OpExecError("Connection to console of instance %s failed," 922 " please check cluster configuration" % 923 console.instance) 924 else: 925 raise errors.GenericError("Unknown console type '%s'" % console.kind) 926 927 return constants.EXIT_SUCCESS
928 929
930 -def _FormatDiskDetails(dev_type, dev, roman):
931 """Formats the logical_id of a disk. 932 933 """ 934 if dev_type == constants.DT_DRBD8: 935 drbd_info = dev["drbd_info"] 936 data = [ 937 ("nodeA", "%s, minor=%s" % 938 (drbd_info["primary_node"], 939 compat.TryToRoman(drbd_info["primary_minor"], 940 convert=roman))), 941 ("nodeB", "%s, minor=%s" % 942 (drbd_info["secondary_node"], 943 compat.TryToRoman(drbd_info["secondary_minor"], 944 convert=roman))), 945 ("port", str(compat.TryToRoman(drbd_info["port"], convert=roman))), 946 ] 947 elif dev_type == constants.DT_PLAIN: 948 vg_name, lv_name = dev["logical_id"] 949 data = ["%s/%s" % (vg_name, lv_name)] 950 else: 951 data = [str(dev["logical_id"])] 952 953 return data
954 955
956 -def _FormatBlockDevInfo(idx, top_level, dev, roman):
957 """Show block device information. 958 959 This is only used by L{ShowInstanceConfig}, but it's too big to be 960 left for an inline definition. 961 962 @type idx: int 963 @param idx: the index of the current disk 964 @type top_level: boolean 965 @param top_level: if this a top-level disk? 966 @type dev: dict 967 @param dev: dictionary with disk information 968 @type roman: boolean 969 @param roman: whether to try to use roman integers 970 @return: a list of either strings, tuples or lists 971 (which should be formatted at a higher indent level) 972 973 """ 974 def helper(dtype, status): 975 """Format one line for physical device status. 976 977 @type dtype: str 978 @param dtype: a constant from the L{constants.DTS_BLOCK} set 979 @type status: tuple 980 @param status: a tuple as returned from L{backend.FindBlockDevice} 981 @return: the string representing the status 982 983 """ 984 if not status: 985 return "not active" 986 txt = "" 987 (path, major, minor, syncp, estt, degr, ldisk_status) = status 988 if major is None: 989 major_string = "N/A" 990 else: 991 major_string = str(compat.TryToRoman(major, convert=roman)) 992 993 if minor is None: 994 minor_string = "N/A" 995 else: 996 minor_string = str(compat.TryToRoman(minor, convert=roman)) 997 998 txt += ("%s (%s:%s)" % (path, major_string, minor_string)) 999 if dtype in (constants.DT_DRBD8, ): 1000 if syncp is not None: 1001 sync_text = "*RECOVERING* %5.2f%%," % syncp 1002 if estt: 1003 sync_text += " ETA %ss" % compat.TryToRoman(estt, convert=roman) 1004 else: 1005 sync_text += " ETA unknown" 1006 else: 1007 sync_text = "in sync" 1008 if degr: 1009 degr_text = "*DEGRADED*" 1010 else: 1011 degr_text = "ok" 1012 if ldisk_status == constants.LDS_FAULTY: 1013 ldisk_text = " *MISSING DISK*" 1014 elif ldisk_status == constants.LDS_UNKNOWN: 1015 ldisk_text = " *UNCERTAIN STATE*" 1016 else: 1017 ldisk_text = "" 1018 txt += (" %s, status %s%s" % (sync_text, degr_text, ldisk_text)) 1019 elif dtype == constants.DT_PLAIN: 1020 if ldisk_status == constants.LDS_FAULTY: 1021 ldisk_text = " *FAILED* (failed drive?)" 1022 else: 1023 ldisk_text = "" 1024 txt += ldisk_text 1025 return txt
1026 1027 # the header 1028 if top_level: 1029 if dev["iv_name"] is not None: 1030 txt = dev["iv_name"] 1031 else: 1032 txt = "disk %s" % compat.TryToRoman(idx, convert=roman) 1033 else: 1034 txt = "child %s" % compat.TryToRoman(idx, convert=roman) 1035 if isinstance(dev["size"], int): 1036 nice_size = utils.FormatUnit(dev["size"], "h") 1037 else: 1038 nice_size = str(dev["size"]) 1039 data = [(txt, "%s, size %s" % (dev["dev_type"], nice_size))] 1040 if top_level: 1041 if dev["spindles"] is not None: 1042 data.append(("spindles", dev["spindles"])) 1043 data.append(("access mode", dev["mode"])) 1044 if dev["logical_id"] is not None: 1045 try: 1046 l_id = _FormatDiskDetails(dev["dev_type"], dev, roman) 1047 except ValueError: 1048 l_id = [str(dev["logical_id"])] 1049 if len(l_id) == 1: 1050 data.append(("logical_id", l_id[0])) 1051 else: 1052 data.extend(l_id) 1053 1054 if dev["pstatus"]: 1055 data.append(("on primary", helper(dev["dev_type"], dev["pstatus"]))) 1056 1057 if dev["sstatus"]: 1058 data.append(("on secondary", helper(dev["dev_type"], dev["sstatus"]))) 1059 1060 data.append(("name", dev["name"])) 1061 data.append(("UUID", dev["uuid"])) 1062 1063 if dev["children"]: 1064 data.append(("child devices", [ 1065 _FormatBlockDevInfo(c_idx, False, child, roman) 1066 for c_idx, child in enumerate(dev["children"]) 1067 ])) 1068 return data 1069 1070
1071 -def _FormatInstanceNicInfo(idx, nic):
1072 """Helper function for L{_FormatInstanceInfo()}""" 1073 (name, uuid, ip, mac, mode, link, vlan, _, netinfo) = nic 1074 network_name = None 1075 if netinfo: 1076 network_name = netinfo["name"] 1077 return [ 1078 ("nic/%d" % idx, ""), 1079 ("MAC", str(mac)), 1080 ("IP", str(ip)), 1081 ("mode", str(mode)), 1082 ("link", str(link)), 1083 ("vlan", str(vlan)), 1084 ("network", str(network_name)), 1085 ("UUID", str(uuid)), 1086 ("name", str(name)), 1087 ]
1088 1089
1090 -def _FormatInstanceNodesInfo(instance):
1091 """Helper function for L{_FormatInstanceInfo()}""" 1092 pgroup = ("%s (UUID %s)" % 1093 (instance["pnode_group_name"], instance["pnode_group_uuid"])) 1094 secs = utils.CommaJoin(("%s (group %s, group UUID %s)" % 1095 (name, group_name, group_uuid)) 1096 for (name, group_name, group_uuid) in 1097 zip(instance["snodes"], 1098 instance["snodes_group_names"], 1099 instance["snodes_group_uuids"])) 1100 return [ 1101 [ 1102 ("primary", instance["pnode"]), 1103 ("group", pgroup), 1104 ], 1105 [("secondaries", secs)], 1106 ]
1107 1108
1109 -def _GetVncConsoleInfo(instance):
1110 """Helper function for L{_FormatInstanceInfo()}""" 1111 vnc_bind_address = instance["hv_actual"].get(constants.HV_VNC_BIND_ADDRESS, 1112 None) 1113 if vnc_bind_address: 1114 port = instance["network_port"] 1115 display = int(port) - constants.VNC_BASE_PORT 1116 if display > 0 and vnc_bind_address == constants.IP4_ADDRESS_ANY: 1117 vnc_console_port = "%s:%s (display %s)" % (instance["pnode"], 1118 port, 1119 display) 1120 elif display > 0 and netutils.IP4Address.IsValid(vnc_bind_address): 1121 vnc_console_port = ("%s:%s (node %s) (display %s)" % 1122 (vnc_bind_address, port, 1123 instance["pnode"], display)) 1124 else: 1125 # vnc bind address is a file 1126 vnc_console_port = "%s:%s" % (instance["pnode"], 1127 vnc_bind_address) 1128 ret = "vnc to %s" % vnc_console_port 1129 else: 1130 ret = None 1131 return ret
1132 1133
1134 -def _FormatInstanceInfo(instance, roman_integers):
1135 """Format instance information for L{cli.PrintGenericInfo()}""" 1136 istate = "configured to be %s" % instance["config_state"] 1137 if instance["run_state"]: 1138 istate += ", actual state is %s" % instance["run_state"] 1139 info = [ 1140 ("Instance name", instance["name"]), 1141 ("UUID", instance["uuid"]), 1142 ("Serial number", 1143 str(compat.TryToRoman(instance["serial_no"], convert=roman_integers))), 1144 ("Creation time", utils.FormatTime(instance["ctime"])), 1145 ("Modification time", utils.FormatTime(instance["mtime"])), 1146 ("State", istate), 1147 ("Nodes", _FormatInstanceNodesInfo(instance)), 1148 ("Operating system", instance["os"]), 1149 ("Operating system parameters", 1150 FormatParamsDictInfo(instance["os_instance"], instance["os_actual"])), 1151 ] 1152 1153 if "network_port" in instance: 1154 info.append(("Allocated network port", 1155 str(compat.TryToRoman(instance["network_port"], 1156 convert=roman_integers)))) 1157 info.append(("Hypervisor", instance["hypervisor"])) 1158 console = _GetVncConsoleInfo(instance) 1159 if console: 1160 info.append(("console connection", console)) 1161 # deprecated "memory" value, kept for one version for compatibility 1162 # TODO(ganeti 2.7) remove. 1163 be_actual = copy.deepcopy(instance["be_actual"]) 1164 be_actual["memory"] = be_actual[constants.BE_MAXMEM] 1165 info.extend([ 1166 ("Hypervisor parameters", 1167 FormatParamsDictInfo(instance["hv_instance"], instance["hv_actual"])), 1168 ("Back-end parameters", 1169 FormatParamsDictInfo(instance["be_instance"], be_actual)), 1170 ("NICs", [ 1171 _FormatInstanceNicInfo(idx, nic) 1172 for (idx, nic) in enumerate(instance["nics"]) 1173 ]), 1174 ("Disk template", instance["disk_template"]), 1175 ("Disks", [ 1176 _FormatBlockDevInfo(idx, True, device, roman_integers) 1177 for (idx, device) in enumerate(instance["disks"]) 1178 ]), 1179 ]) 1180 return info
1181 1182
1183 -def ShowInstanceConfig(opts, args):
1184 """Compute instance run-time status. 1185 1186 @param opts: the command line options selected by the user 1187 @type args: list 1188 @param args: either an empty list, and then we query all 1189 instances, or should contain a list of instance names 1190 @rtype: int 1191 @return: the desired exit code 1192 1193 """ 1194 if not args and not opts.show_all: 1195 ToStderr("No instance selected." 1196 " Please pass in --all if you want to query all instances.\n" 1197 "Note that this can take a long time on a big cluster.") 1198 return 1 1199 elif args and opts.show_all: 1200 ToStderr("Cannot use --all if you specify instance names.") 1201 return 1 1202 1203 retcode = 0 1204 op = opcodes.OpInstanceQueryData(instances=args, static=opts.static, 1205 use_locking=not opts.static) 1206 result = SubmitOpCode(op, opts=opts) 1207 if not result: 1208 ToStdout("No instances.") 1209 return 1 1210 1211 PrintGenericInfo([ 1212 _FormatInstanceInfo(instance, opts.roman_integers) 1213 for instance in result.values() 1214 ]) 1215 return retcode
1216 1217
1218 -def _ConvertNicDiskModifications(mods):
1219 """Converts NIC/disk modifications from CLI to opcode. 1220 1221 When L{opcodes.OpInstanceSetParams} was changed to support adding/removing 1222 disks at arbitrary indices, its parameter format changed. This function 1223 converts legacy requests (e.g. "--net add" or "--disk add:size=4G") to the 1224 newer format and adds support for new-style requests (e.g. "--new 4:add"). 1225 1226 @type mods: list of tuples 1227 @param mods: Modifications as given by command line parser 1228 @rtype: list of tuples 1229 @return: Modifications as understood by L{opcodes.OpInstanceSetParams} 1230 1231 """ 1232 result = [] 1233 1234 for (identifier, params) in mods: 1235 if identifier == constants.DDM_ADD: 1236 # Add item as last item (legacy interface) 1237 action = constants.DDM_ADD 1238 identifier = -1 1239 elif identifier == constants.DDM_REMOVE: 1240 # Remove last item (legacy interface) 1241 action = constants.DDM_REMOVE 1242 identifier = -1 1243 else: 1244 # Modifications and adding/removing at arbitrary indices 1245 add = params.pop(constants.DDM_ADD, _MISSING) 1246 remove = params.pop(constants.DDM_REMOVE, _MISSING) 1247 modify = params.pop(constants.DDM_MODIFY, _MISSING) 1248 1249 if modify is _MISSING: 1250 if not (add is _MISSING or remove is _MISSING): 1251 raise errors.OpPrereqError("Cannot add and remove at the same time", 1252 errors.ECODE_INVAL) 1253 elif add is not _MISSING: 1254 action = constants.DDM_ADD 1255 elif remove is not _MISSING: 1256 action = constants.DDM_REMOVE 1257 else: 1258 action = constants.DDM_MODIFY 1259 1260 elif add is _MISSING and remove is _MISSING: 1261 action = constants.DDM_MODIFY 1262 else: 1263 raise errors.OpPrereqError("Cannot modify and add/remove at the" 1264 " same time", errors.ECODE_INVAL) 1265 1266 assert not (constants.DDMS_VALUES_WITH_MODIFY & set(params.keys())) 1267 1268 if action == constants.DDM_REMOVE and params: 1269 raise errors.OpPrereqError("Not accepting parameters on removal", 1270 errors.ECODE_INVAL) 1271 1272 result.append((action, identifier, params)) 1273 1274 return result
1275 1276
1277 -def _ParseDiskSizes(mods):
1278 """Parses disk sizes in parameters. 1279 1280 """ 1281 for (action, _, params) in mods: 1282 if params and constants.IDISK_SIZE in params: 1283 params[constants.IDISK_SIZE] = \ 1284 utils.ParseUnit(params[constants.IDISK_SIZE]) 1285 elif action == constants.DDM_ADD: 1286 raise errors.OpPrereqError("Missing required parameter 'size'", 1287 errors.ECODE_INVAL) 1288 1289 return mods
1290 1291
1292 -def SetInstanceParams(opts, args):
1293 """Modifies an instance. 1294 1295 All parameters take effect only at the next restart of the instance. 1296 1297 @param opts: the command line options selected by the user 1298 @type args: list 1299 @param args: should contain only one element, the instance name 1300 @rtype: int 1301 @return: the desired exit code 1302 1303 """ 1304 if not (opts.nics or opts.disks or opts.disk_template or 1305 opts.hvparams or opts.beparams or opts.os or opts.osparams or 1306 opts.offline_inst or opts.online_inst or opts.runtime_mem or 1307 opts.new_primary_node): 1308 ToStderr("Please give at least one of the parameters.") 1309 return 1 1310 1311 for param in opts.beparams: 1312 if isinstance(opts.beparams[param], basestring): 1313 if opts.beparams[param].lower() == "default": 1314 opts.beparams[param] = constants.VALUE_DEFAULT 1315 1316 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT, 1317 allowed_values=[constants.VALUE_DEFAULT]) 1318 1319 for param in opts.hvparams: 1320 if isinstance(opts.hvparams[param], basestring): 1321 if opts.hvparams[param].lower() == "default": 1322 opts.hvparams[param] = constants.VALUE_DEFAULT 1323 1324 utils.ForceDictType(opts.hvparams, constants.HVS_PARAMETER_TYPES, 1325 allowed_values=[constants.VALUE_DEFAULT]) 1326 FixHvParams(opts.hvparams) 1327 1328 nics = _ConvertNicDiskModifications(opts.nics) 1329 for action, _, __ in nics: 1330 if action == constants.DDM_MODIFY and opts.hotplug and not opts.force: 1331 usertext = ("You are about to hot-modify a NIC. This will be done" 1332 " by removing the existing NIC and then adding a new one." 1333 " Network connection might be lost. Continue?") 1334 if not AskUser(usertext): 1335 return 1 1336 1337 disks = _ParseDiskSizes(_ConvertNicDiskModifications(opts.disks)) 1338 1339 if (opts.disk_template and 1340 opts.disk_template in constants.DTS_INT_MIRROR and 1341 not opts.node): 1342 ToStderr("Changing the disk template to a mirrored one requires" 1343 " specifying a secondary node") 1344 return 1 1345 1346 if opts.offline_inst: 1347 offline = True 1348 elif opts.online_inst: 1349 offline = False 1350 else: 1351 offline = None 1352 1353 op = opcodes.OpInstanceSetParams(instance_name=args[0], 1354 nics=nics, 1355 disks=disks, 1356 hotplug=opts.hotplug, 1357 hotplug_if_possible=opts.hotplug_if_possible, 1358 disk_template=opts.disk_template, 1359 remote_node=opts.node, 1360 pnode=opts.new_primary_node, 1361 hvparams=opts.hvparams, 1362 beparams=opts.beparams, 1363 runtime_mem=opts.runtime_mem, 1364 os_name=opts.os, 1365 osparams=opts.osparams, 1366 force_variant=opts.force_variant, 1367 force=opts.force, 1368 wait_for_sync=opts.wait_for_sync, 1369 offline=offline, 1370 conflicts_check=opts.conflicts_check, 1371 ignore_ipolicy=opts.ignore_ipolicy) 1372 1373 # even if here we process the result, we allow submit only 1374 result = SubmitOrSend(op, opts) 1375 1376 if result: 1377 ToStdout("Modified instance %s", args[0]) 1378 for param, data in result: 1379 ToStdout(" - %-5s -> %s", param, data) 1380 ToStdout("Please don't forget that most parameters take effect" 1381 " only at the next (re)start of the instance initiated by" 1382 " ganeti; restarting from within the instance will" 1383 " not be enough.") 1384 if opts.hvparams: 1385 ToStdout("Note that changing hypervisor parameters without performing a" 1386 " restart might lead to a crash while performing a live" 1387 " migration. This will be addressed in future Ganeti versions.") 1388 return 0
1389 1390
1391 -def ChangeGroup(opts, args):
1392 """Moves an instance to another group. 1393 1394 """ 1395 (instance_name, ) = args 1396 1397 cl = GetClient() 1398 1399 op = opcodes.OpInstanceChangeGroup(instance_name=instance_name, 1400 iallocator=opts.iallocator, 1401 target_groups=opts.to, 1402 early_release=opts.early_release) 1403 result = SubmitOrSend(op, opts, cl=cl) 1404 1405 # Keep track of submitted jobs 1406 jex = JobExecutor(cl=cl, opts=opts) 1407 1408 for (status, job_id) in result[constants.JOB_IDS_KEY]: 1409 jex.AddJobId(None, status, job_id) 1410 1411 results = jex.GetResults() 1412 bad_cnt = len([row for row in results if not row[0]]) 1413 if bad_cnt == 0: 1414 ToStdout("Instance '%s' changed group successfully.", instance_name) 1415 rcode = constants.EXIT_SUCCESS 1416 else: 1417 ToStdout("There were %s errors while changing group of instance '%s'.", 1418 bad_cnt, instance_name) 1419 rcode = constants.EXIT_FAILURE 1420 1421 return rcode
1422 1423 1424 # multi-instance selection options 1425 m_force_multi = cli_option("--force-multiple", dest="force_multi", 1426 help="Do not ask for confirmation when more than" 1427 " one instance is affected", 1428 action="store_true", default=False) 1429 1430 m_pri_node_opt = cli_option("--primary", dest="multi_mode", 1431 help="Filter by nodes (primary only)", 1432 const=_EXPAND_NODES_PRI, action="store_const") 1433 1434 m_sec_node_opt = cli_option("--secondary", dest="multi_mode", 1435 help="Filter by nodes (secondary only)", 1436 const=_EXPAND_NODES_SEC, action="store_const") 1437 1438 m_node_opt = cli_option("--node", dest="multi_mode", 1439 help="Filter by nodes (primary and secondary)", 1440 const=_EXPAND_NODES_BOTH, action="store_const") 1441 1442 m_clust_opt = cli_option("--all", dest="multi_mode", 1443 help="Select all instances in the cluster", 1444 const=_EXPAND_CLUSTER, action="store_const") 1445 1446 m_inst_opt = cli_option("--instance", dest="multi_mode", 1447 help="Filter by instance name [default]", 1448 const=_EXPAND_INSTANCES, action="store_const") 1449 1450 m_node_tags_opt = cli_option("--node-tags", dest="multi_mode", 1451 help="Filter by node tag", 1452 const=_EXPAND_NODES_BOTH_BY_TAGS, 1453 action="store_const") 1454 1455 m_pri_node_tags_opt = cli_option("--pri-node-tags", dest="multi_mode", 1456 help="Filter by primary node tag", 1457 const=_EXPAND_NODES_PRI_BY_TAGS, 1458 action="store_const") 1459 1460 m_sec_node_tags_opt = cli_option("--sec-node-tags", dest="multi_mode", 1461 help="Filter by secondary node tag", 1462 const=_EXPAND_NODES_SEC_BY_TAGS, 1463 action="store_const") 1464 1465 m_inst_tags_opt = cli_option("--tags", dest="multi_mode", 1466 help="Filter by instance tag", 1467 const=_EXPAND_INSTANCES_BY_TAGS, 1468 action="store_const") 1469 1470 # this is defined separately due to readability only 1471 add_opts = [ 1472 NOSTART_OPT, 1473 OS_OPT, 1474 FORCE_VARIANT_OPT, 1475 NO_INSTALL_OPT, 1476 IGNORE_IPOLICY_OPT, 1477 ] 1478 1479 commands = { 1480 "add": ( 1481 AddInstance, [ArgHost(min=1, max=1)], COMMON_CREATE_OPTS + add_opts, 1482 "[...] -t disk-type -n node[:secondary-node] -o os-type <name>", 1483 "Creates and adds a new instance to the cluster"), 1484 "batch-create": ( 1485 BatchCreate, [ArgFile(min=1, max=1)], 1486 [DRY_RUN_OPT, PRIORITY_OPT, IALLOCATOR_OPT] + SUBMIT_OPTS, 1487 "<instances.json>", 1488 "Create a bunch of instances based on specs in the file."), 1489 "console": ( 1490 ConnectToInstanceConsole, ARGS_ONE_INSTANCE, 1491 [SHOWCMD_OPT, PRIORITY_OPT], 1492 "[--show-cmd] <instance>", "Opens a console on the specified instance"), 1493 "failover": ( 1494 FailoverInstance, ARGS_ONE_INSTANCE, 1495 [FORCE_OPT, IGNORE_CONSIST_OPT] + SUBMIT_OPTS + 1496 [SHUTDOWN_TIMEOUT_OPT, 1497 DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT, 1498 IGNORE_IPOLICY_OPT, CLEANUP_OPT], 1499 "[-f] <instance>", "Stops the instance, changes its primary node and" 1500 " (if it was originally running) starts it on the new node" 1501 " (the secondary for mirrored instances or any node" 1502 " for shared storage)."), 1503 "migrate": ( 1504 MigrateInstance, ARGS_ONE_INSTANCE, 1505 [FORCE_OPT, NONLIVE_OPT, MIGRATION_MODE_OPT, CLEANUP_OPT, DRY_RUN_OPT, 1506 PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT, ALLOW_FAILOVER_OPT, 1507 IGNORE_IPOLICY_OPT, NORUNTIME_CHGS_OPT] + SUBMIT_OPTS, 1508 "[-f] <instance>", "Migrate instance to its secondary node" 1509 " (only for mirrored instances)"), 1510 "move": ( 1511 MoveInstance, ARGS_ONE_INSTANCE, 1512 [FORCE_OPT] + SUBMIT_OPTS + 1513 [SINGLE_NODE_OPT, 1514 SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_CONSIST_OPT, 1515 IGNORE_IPOLICY_OPT], 1516 "[-f] <instance>", "Move instance to an arbitrary node" 1517 " (only for instances of type file and lv)"), 1518 "info": ( 1519 ShowInstanceConfig, ARGS_MANY_INSTANCES, 1520 [STATIC_OPT, ALL_OPT, ROMAN_OPT, PRIORITY_OPT], 1521 "[-s] {--all | <instance>...}", 1522 "Show information on the specified instance(s)"), 1523 "list": ( 1524 ListInstances, ARGS_MANY_INSTANCES, 1525 [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, VERBOSE_OPT, 1526 FORCE_FILTER_OPT], 1527 "[<instance>...]", 1528 "Lists the instances and their status. The available fields can be shown" 1529 " using the \"list-fields\" command (see the man page for details)." 1530 " The default field list is (in order): %s." % 1531 utils.CommaJoin(_LIST_DEF_FIELDS), 1532 ), 1533 "list-fields": ( 1534 ListInstanceFields, [ArgUnknown()], 1535 [NOHDR_OPT, SEP_OPT], 1536 "[fields...]", 1537 "Lists all available fields for instances"), 1538 "reinstall": ( 1539 ReinstallInstance, [ArgInstance()], 1540 [FORCE_OPT, OS_OPT, FORCE_VARIANT_OPT, m_force_multi, m_node_opt, 1541 m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, m_node_tags_opt, 1542 m_pri_node_tags_opt, m_sec_node_tags_opt, m_inst_tags_opt, SELECT_OS_OPT] 1543 + SUBMIT_OPTS + [DRY_RUN_OPT, PRIORITY_OPT, OSPARAMS_OPT], 1544 "[-f] <instance>", "Reinstall a stopped instance"), 1545 "remove": ( 1546 RemoveInstance, ARGS_ONE_INSTANCE, 1547 [FORCE_OPT, SHUTDOWN_TIMEOUT_OPT, IGNORE_FAILURES_OPT] + SUBMIT_OPTS 1548 + [DRY_RUN_OPT, PRIORITY_OPT], 1549 "[-f] <instance>", "Shuts down the instance and removes it"), 1550 "rename": ( 1551 RenameInstance, 1552 [ArgInstance(min=1, max=1), ArgHost(min=1, max=1)], 1553 [NOIPCHECK_OPT, NONAMECHECK_OPT] + SUBMIT_OPTS 1554 + [DRY_RUN_OPT, PRIORITY_OPT], 1555 "<instance> <new_name>", "Rename the instance"), 1556 "replace-disks": ( 1557 ReplaceDisks, ARGS_ONE_INSTANCE, 1558 [AUTO_REPLACE_OPT, DISKIDX_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT, 1559 NEW_SECONDARY_OPT, ON_PRIMARY_OPT, ON_SECONDARY_OPT] + SUBMIT_OPTS 1560 + [DRY_RUN_OPT, PRIORITY_OPT, IGNORE_IPOLICY_OPT], 1561 "[-s|-p|-a|-n NODE|-I NAME] <instance>", 1562 "Replaces disks for the instance"), 1563 "modify": ( 1564 SetInstanceParams, ARGS_ONE_INSTANCE, 1565 [BACKEND_OPT, DISK_OPT, FORCE_OPT, HVOPTS_OPT, NET_OPT] + SUBMIT_OPTS + 1566 [DISK_TEMPLATE_OPT, SINGLE_NODE_OPT, OS_OPT, FORCE_VARIANT_OPT, 1567 OSPARAMS_OPT, DRY_RUN_OPT, PRIORITY_OPT, NWSYNC_OPT, OFFLINE_INST_OPT, 1568 ONLINE_INST_OPT, IGNORE_IPOLICY_OPT, RUNTIME_MEM_OPT, 1569 NOCONFLICTSCHECK_OPT, NEW_PRIMARY_OPT, HOTPLUG_OPT, 1570 HOTPLUG_IF_POSSIBLE_OPT], 1571 "<instance>", "Alters the parameters of an instance"), 1572 "shutdown": ( 1573 GenericManyOps("shutdown", _ShutdownInstance), [ArgInstance()], 1574 [FORCE_OPT, m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt, 1575 m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt, 1576 m_inst_tags_opt, m_inst_opt, m_force_multi, TIMEOUT_OPT] + SUBMIT_OPTS 1577 + [DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT, NO_REMEMBER_OPT], 1578 "<instance>", "Stops an instance"), 1579 "startup": ( 1580 GenericManyOps("startup", _StartupInstance), [ArgInstance()], 1581 [FORCE_OPT, m_force_multi, m_node_opt, m_pri_node_opt, m_sec_node_opt, 1582 m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt, 1583 m_inst_tags_opt, m_clust_opt, m_inst_opt] + SUBMIT_OPTS + 1584 [HVOPTS_OPT, 1585 BACKEND_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT, 1586 NO_REMEMBER_OPT, STARTUP_PAUSED_OPT], 1587 "<instance>", "Starts an instance"), 1588 "reboot": ( 1589 GenericManyOps("reboot", _RebootInstance), [ArgInstance()], 1590 [m_force_multi, REBOOT_TYPE_OPT, IGNORE_SECONDARIES_OPT, m_node_opt, 1591 m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt] + SUBMIT_OPTS + 1592 [m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt, 1593 m_inst_tags_opt, SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT], 1594 "<instance>", "Reboots an instance"), 1595 "activate-disks": ( 1596 ActivateDisks, ARGS_ONE_INSTANCE, 1597 SUBMIT_OPTS + [IGNORE_SIZE_OPT, PRIORITY_OPT, WFSYNC_OPT], 1598 "<instance>", "Activate an instance's disks"), 1599 "deactivate-disks": ( 1600 DeactivateDisks, ARGS_ONE_INSTANCE, 1601 [FORCE_OPT] + SUBMIT_OPTS + [DRY_RUN_OPT, PRIORITY_OPT], 1602 "[-f] <instance>", "Deactivate an instance's disks"), 1603 "recreate-disks": ( 1604 RecreateDisks, ARGS_ONE_INSTANCE, 1605 SUBMIT_OPTS + 1606 [DISK_OPT, NODE_PLACEMENT_OPT, DRY_RUN_OPT, PRIORITY_OPT, 1607 IALLOCATOR_OPT], 1608 "<instance>", "Recreate an instance's disks"), 1609 "grow-disk": ( 1610 GrowDisk, 1611 [ArgInstance(min=1, max=1), ArgUnknown(min=1, max=1), 1612 ArgUnknown(min=1, max=1)], 1613 SUBMIT_OPTS + [NWSYNC_OPT, DRY_RUN_OPT, PRIORITY_OPT, ABSOLUTE_OPT], 1614 "<instance> <disk> <size>", "Grow an instance's disk"), 1615 "change-group": ( 1616 ChangeGroup, ARGS_ONE_INSTANCE, 1617 [TO_GROUP_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT, PRIORITY_OPT] 1618 + SUBMIT_OPTS, 1619 "[-I <iallocator>] [--to <group>]", "Change group of instance"), 1620 "list-tags": ( 1621 ListTags, ARGS_ONE_INSTANCE, [], 1622 "<instance_name>", "List the tags of the given instance"), 1623 "add-tags": ( 1624 AddTags, [ArgInstance(min=1, max=1), ArgUnknown()], 1625 [TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS, 1626 "<instance_name> tag...", "Add tags to the given instance"), 1627 "remove-tags": ( 1628 RemoveTags, [ArgInstance(min=1, max=1), ArgUnknown()], 1629 [TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS, 1630 "<instance_name> tag...", "Remove tags from given instance"), 1631 } 1632 1633 #: dictionary with aliases for commands 1634 aliases = { 1635 "start": "startup", 1636 "stop": "shutdown", 1637 "show": "info", 1638 } 1639 1640
1641 -def Main():
1642 return GenericMain(commands, aliases=aliases, 1643 override={"tag_type": constants.TAG_INSTANCE}, 1644 env_override=_ENV_OVERRIDE)
1645