1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30 """Instance related commands"""
31
32
33
34
35
36
37 import copy
38 import itertools
39 import simplejson
40 import logging
41
42 from ganeti.cli import *
43 from ganeti import opcodes
44 from ganeti import constants
45 from ganeti import compat
46 from ganeti import utils
47 from ganeti import errors
48 from ganeti import netutils
49 from ganeti import ssh
50 from ganeti import objects
51 from ganeti import ht
52
53
54 _EXPAND_CLUSTER = "cluster"
55 _EXPAND_NODES_BOTH = "nodes"
56 _EXPAND_NODES_PRI = "nodes-pri"
57 _EXPAND_NODES_SEC = "nodes-sec"
58 _EXPAND_NODES_BOTH_BY_TAGS = "nodes-by-tags"
59 _EXPAND_NODES_PRI_BY_TAGS = "nodes-pri-by-tags"
60 _EXPAND_NODES_SEC_BY_TAGS = "nodes-sec-by-tags"
61 _EXPAND_INSTANCES = "instances"
62 _EXPAND_INSTANCES_BY_TAGS = "instances-by-tags"
63
64 _EXPAND_NODES_TAGS_MODES = compat.UniqueFrozenset([
65 _EXPAND_NODES_BOTH_BY_TAGS,
66 _EXPAND_NODES_PRI_BY_TAGS,
67 _EXPAND_NODES_SEC_BY_TAGS,
68 ])
69
70
71 _LIST_DEF_FIELDS = [
72 "name", "hypervisor", "os", "pnode", "status", "oper_ram",
73 ]
74
75 _MISSING = object()
76 _ENV_OVERRIDE = compat.UniqueFrozenset(["list"])
77
78 _INST_DATA_VAL = ht.TListOf(ht.TDict)
79
80
82 """Expand the given names using the passed mode.
83
84 For _EXPAND_CLUSTER, all instances will be returned. For
85 _EXPAND_NODES_PRI/SEC, all instances having those nodes as
86 primary/secondary will be returned. For _EXPAND_NODES_BOTH, all
87 instances having those nodes as either primary or secondary will be
88 returned. For _EXPAND_INSTANCES, the given instances will be
89 returned.
90
91 @param mode: one of L{_EXPAND_CLUSTER}, L{_EXPAND_NODES_BOTH},
92 L{_EXPAND_NODES_PRI}, L{_EXPAND_NODES_SEC} or
93 L{_EXPAND_INSTANCES}
94 @param names: a list of names; for cluster, it must be empty,
95 and for node and instance it must be a list of valid item
96 names (short names are valid as usual, e.g. node1 instead of
97 node1.example.com)
98 @rtype: list
99 @return: the list of names after the expansion
100 @raise errors.ProgrammerError: for unknown selection type
101 @raise errors.OpPrereqError: for invalid input parameters
102
103 """
104
105
106 if client is None:
107 client = GetClient(query=True)
108 if mode == _EXPAND_CLUSTER:
109 if names:
110 raise errors.OpPrereqError("Cluster filter mode takes no arguments",
111 errors.ECODE_INVAL)
112 idata = client.QueryInstances([], ["name"], False)
113 inames = [row[0] for row in idata]
114
115 elif (mode in _EXPAND_NODES_TAGS_MODES or
116 mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_PRI, _EXPAND_NODES_SEC)):
117 if mode in _EXPAND_NODES_TAGS_MODES:
118 if not names:
119 raise errors.OpPrereqError("No node tags passed", errors.ECODE_INVAL)
120 ndata = client.QueryNodes([], ["name", "pinst_list",
121 "sinst_list", "tags"], False)
122 ndata = [row for row in ndata if set(row[3]).intersection(names)]
123 else:
124 if not names:
125 raise errors.OpPrereqError("No node names passed", errors.ECODE_INVAL)
126 ndata = client.QueryNodes(names, ["name", "pinst_list", "sinst_list"],
127 False)
128
129 ipri = [row[1] for row in ndata]
130 pri_names = list(itertools.chain(*ipri))
131 isec = [row[2] for row in ndata]
132 sec_names = list(itertools.chain(*isec))
133 if mode in (_EXPAND_NODES_BOTH, _EXPAND_NODES_BOTH_BY_TAGS):
134 inames = pri_names + sec_names
135 elif mode in (_EXPAND_NODES_PRI, _EXPAND_NODES_PRI_BY_TAGS):
136 inames = pri_names
137 elif mode in (_EXPAND_NODES_SEC, _EXPAND_NODES_SEC_BY_TAGS):
138 inames = sec_names
139 else:
140 raise errors.ProgrammerError("Unhandled shutdown type")
141 elif mode == _EXPAND_INSTANCES:
142 if not names:
143 raise errors.OpPrereqError("No instance names passed",
144 errors.ECODE_INVAL)
145 idata = client.QueryInstances(names, ["name"], False)
146 inames = [row[0] for row in idata]
147 elif mode == _EXPAND_INSTANCES_BY_TAGS:
148 if not names:
149 raise errors.OpPrereqError("No instance tags passed",
150 errors.ECODE_INVAL)
151 idata = client.QueryInstances([], ["name", "tags"], False)
152 inames = [row[0] for row in idata if set(row[1]).intersection(names)]
153 else:
154 raise errors.OpPrereqError("Unknown mode '%s'" % mode, errors.ECODE_INVAL)
155
156 return inames
157
158
160 """Check for and ensure the given instance names exist.
161
162 This function will raise an OpPrereqError in case they don't
163 exist. Otherwise it will exit cleanly.
164
165 @type client: L{ganeti.luxi.Client}
166 @param client: the client to use for the query
167 @type names: list
168 @param names: the list of instance names to query
169 @raise errors.OpPrereqError: in case any instance is missing
170
171 """
172
173
174 result = client.QueryInstances(names, ["name"], False)
175 for orig_name, row in zip(names, result):
176 if row[0] is None:
177 raise errors.OpPrereqError("Instance '%s' does not exist" % orig_name,
178 errors.ECODE_NOENT)
179
180
182 """Generic multi-instance operations.
183
184 The will return a wrapper that processes the options and arguments
185 given, and uses the passed function to build the opcode needed for
186 the specific operation. Thus all the generic loop/confirmation code
187 is abstracted into this function.
188
189 """
190 def realfn(opts, args):
191 if opts.multi_mode is None:
192 opts.multi_mode = _EXPAND_INSTANCES
193 cl = GetClient()
194 qcl = GetClient(query=True)
195 inames = _ExpandMultiNames(opts.multi_mode, args, client=qcl)
196 if not inames:
197 if opts.multi_mode == _EXPAND_CLUSTER:
198 ToStdout("Cluster is empty, no instances to shutdown")
199 return 0
200 raise errors.OpPrereqError("Selection filter does not match"
201 " any instances", errors.ECODE_INVAL)
202 multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1
203 if not (opts.force_multi or not multi_on
204 or ConfirmOperation(inames, "instances", operation)):
205 return 1
206 jex = JobExecutor(verbose=multi_on, cl=cl, opts=opts)
207 for name in inames:
208 op = fn(name, opts)
209 jex.QueueJob(name, op)
210 results = jex.WaitOrShow(not opts.submit_only)
211 rcode = compat.all(row[0] for row in results)
212 return int(not rcode)
213 return realfn
214
215
217 """List instances and their properties.
218
219 @param opts: the command line options selected by the user
220 @type args: list
221 @param args: should be an empty list
222 @rtype: int
223 @return: the desired exit code
224
225 """
226 selected_fields = ParseFields(opts.output, _LIST_DEF_FIELDS)
227
228 fmtoverride = dict.fromkeys(["tags", "disk.sizes", "nic.macs", "nic.ips",
229 "nic.modes", "nic.links", "nic.bridges",
230 "nic.networks",
231 "snodes", "snodes.group", "snodes.group.uuid"],
232 (lambda value: ",".join(str(item)
233 for item in value),
234 False))
235
236 cl = GetClient(query=True)
237
238 return GenericList(constants.QR_INSTANCE, selected_fields, args, opts.units,
239 opts.separator, not opts.no_headers,
240 format_override=fmtoverride, verbose=opts.verbose,
241 force_filter=opts.force_filter, cl=cl)
242
243
245 """List instance fields.
246
247 @param opts: the command line options selected by the user
248 @type args: list
249 @param args: fields to list, or empty for all
250 @rtype: int
251 @return: the desired exit code
252
253 """
254 return GenericListFields(constants.QR_INSTANCE, args, opts.separator,
255 not opts.no_headers)
256
257
265
266
268 """Create instances using a definition file.
269
270 This function reads a json file with L{opcodes.OpInstanceCreate}
271 serialisations.
272
273 @param opts: the command line options selected by the user
274 @type args: list
275 @param args: should contain one element, the json filename
276 @rtype: int
277 @return: the desired exit code
278
279 """
280 (json_filename,) = args
281 cl = GetClient()
282
283 try:
284 instance_data = simplejson.loads(utils.ReadFile(json_filename))
285 except Exception, err:
286 ToStderr("Can't parse the instance definition file: %s" % str(err))
287 return 1
288
289 if not _INST_DATA_VAL(instance_data):
290 ToStderr("The instance definition file is not %s" % _INST_DATA_VAL)
291 return 1
292
293 instances = []
294 possible_params = set(opcodes.OpInstanceCreate.GetAllSlots())
295 for (idx, inst) in enumerate(instance_data):
296 unknown = set(inst.keys()) - possible_params
297
298 if unknown:
299
300 raise errors.OpPrereqError("Unknown fields in definition %s: %s" %
301 (idx, utils.CommaJoin(unknown)),
302 errors.ECODE_INVAL)
303
304 op = opcodes.OpInstanceCreate(**inst)
305 op.Validate(False)
306 instances.append(op)
307
308 op = opcodes.OpInstanceMultiAlloc(iallocator=opts.iallocator,
309 instances=instances)
310 result = SubmitOrSend(op, opts, cl=cl)
311
312
313 jex = JobExecutor(cl=cl, opts=opts)
314
315 for (status, job_id) in result[constants.JOB_IDS_KEY]:
316 jex.AddJobId(None, status, job_id)
317
318 results = jex.GetResults()
319 bad_cnt = len([row for row in results if not row[0]])
320 if bad_cnt == 0:
321 ToStdout("All instances created successfully.")
322 rcode = constants.EXIT_SUCCESS
323 else:
324 ToStdout("There were %s errors during the creation.", bad_cnt)
325 rcode = constants.EXIT_FAILURE
326
327 return rcode
328
329
331 """Reinstall an instance.
332
333 @param opts: the command line options selected by the user
334 @type args: list
335 @param args: should contain only one element, the name of the
336 instance to be reinstalled
337 @rtype: int
338 @return: the desired exit code
339
340 """
341
342 if opts.multi_mode is None:
343 opts.multi_mode = _EXPAND_INSTANCES
344
345 inames = _ExpandMultiNames(opts.multi_mode, args)
346 if not inames:
347 raise errors.OpPrereqError("Selection filter does not match any instances",
348 errors.ECODE_INVAL)
349
350
351 if opts.select_os is True:
352 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
353 result = SubmitOpCode(op, opts=opts)
354
355 if not result:
356 ToStdout("Can't get the OS list")
357 return 1
358
359 ToStdout("Available OS templates:")
360 number = 0
361 choices = []
362 for (name, variants) in result:
363 for entry in CalculateOSNames(name, variants):
364 ToStdout("%3s: %s", number, entry)
365 choices.append(("%s" % number, entry, entry))
366 number += 1
367
368 choices.append(("x", "exit", "Exit gnt-instance reinstall"))
369 selected = AskUser("Enter OS template number (or x to abort):",
370 choices)
371
372 if selected == "exit":
373 ToStderr("User aborted reinstall, exiting")
374 return 1
375
376 os_name = selected
377 os_msg = "change the OS to '%s'" % selected
378 else:
379 os_name = opts.os
380 if opts.os is not None:
381 os_msg = "change the OS to '%s'" % os_name
382 else:
383 os_msg = "keep the same OS"
384
385
386
387
388 multi_on = opts.multi_mode != _EXPAND_INSTANCES or len(inames) > 1
389 if multi_on:
390 warn_msg = ("Note: this will remove *all* data for the"
391 " below instances! It will %s.\n" % os_msg)
392 if not (opts.force_multi or
393 ConfirmOperation(inames, "instances", "reinstall", extra=warn_msg)):
394 return 1
395 else:
396 if not (opts.force or opts.force_multi):
397 usertext = ("This will reinstall the instance '%s' (and %s) which"
398 " removes all data. Continue?") % (inames[0], os_msg)
399 if not AskUser(usertext):
400 return 1
401
402 jex = JobExecutor(verbose=multi_on, opts=opts)
403 for instance_name in inames:
404 op = opcodes.OpInstanceReinstall(instance_name=instance_name,
405 os_type=os_name,
406 force_variant=opts.force_variant,
407 osparams=opts.osparams)
408 jex.QueueJob(instance_name, op)
409
410 results = jex.WaitOrShow(not opts.submit_only)
411
412 if compat.all(map(compat.fst, results)):
413 return constants.EXIT_SUCCESS
414 else:
415 return constants.EXIT_FAILURE
416
417
419 """Remove an instance.
420
421 @param opts: the command line options selected by the user
422 @type args: list
423 @param args: should contain only one element, the name of
424 the instance to be removed
425 @rtype: int
426 @return: the desired exit code
427
428 """
429 instance_name = args[0]
430 force = opts.force
431 cl = GetClient()
432 qcl = GetClient(query=True)
433
434 if not force:
435 _EnsureInstancesExist(qcl, [instance_name])
436
437 usertext = ("This will remove the volumes of the instance %s"
438 " (including mirrors), thus removing all the data"
439 " of the instance. Continue?") % instance_name
440 if not AskUser(usertext):
441 return 1
442
443 op = opcodes.OpInstanceRemove(instance_name=instance_name,
444 ignore_failures=opts.ignore_failures,
445 shutdown_timeout=opts.shutdown_timeout)
446 SubmitOrSend(op, opts, cl=cl)
447 return 0
448
449
451 """Rename an instance.
452
453 @param opts: the command line options selected by the user
454 @type args: list
455 @param args: should contain two elements, the old and the
456 new instance names
457 @rtype: int
458 @return: the desired exit code
459
460 """
461 if not opts.name_check:
462 if not AskUser("As you disabled the check of the DNS entry, please verify"
463 " that '%s' is a FQDN. Continue?" % args[1]):
464 return 1
465
466 op = opcodes.OpInstanceRename(instance_name=args[0],
467 new_name=args[1],
468 ip_check=opts.ip_check,
469 name_check=opts.name_check)
470 result = SubmitOrSend(op, opts)
471
472 if result:
473 ToStdout("Instance '%s' renamed to '%s'", args[0], result)
474
475 return 0
476
477
479 """Activate an instance's disks.
480
481 This serves two purposes:
482 - it allows (as long as the instance is not running)
483 mounting the disks and modifying them from the node
484 - it repairs inactive secondary drbds
485
486 @param opts: the command line options selected by the user
487 @type args: list
488 @param args: should contain only one element, the instance name
489 @rtype: int
490 @return: the desired exit code
491
492 """
493 instance_name = args[0]
494 op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
495 ignore_size=opts.ignore_size,
496 wait_for_sync=opts.wait_for_sync)
497 disks_info = SubmitOrSend(op, opts)
498 for host, iname, nname in disks_info:
499 ToStdout("%s:%s:%s", host, iname, nname)
500 return 0
501
502
504 """Deactivate an instance's disks.
505
506 This function takes the instance name, looks for its primary node
507 and the tries to shutdown its block devices on that node.
508
509 @param opts: the command line options selected by the user
510 @type args: list
511 @param args: should contain only one element, the instance name
512 @rtype: int
513 @return: the desired exit code
514
515 """
516 instance_name = args[0]
517 op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name,
518 force=opts.force)
519 SubmitOrSend(op, opts)
520 return 0
521
522
524 """Recreate an instance's disks.
525
526 @param opts: the command line options selected by the user
527 @type args: list
528 @param args: should contain only one element, the instance name
529 @rtype: int
530 @return: the desired exit code
531
532 """
533 instance_name = args[0]
534
535 disks = []
536
537 if opts.disks:
538 for didx, ddict in opts.disks:
539 didx = int(didx)
540
541 if not ht.TDict(ddict):
542 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
543 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
544
545 if constants.IDISK_SIZE in ddict:
546 try:
547 ddict[constants.IDISK_SIZE] = \
548 utils.ParseUnit(ddict[constants.IDISK_SIZE])
549 except ValueError, err:
550 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
551 (didx, err), errors.ECODE_INVAL)
552
553 if constants.IDISK_SPINDLES in ddict:
554 try:
555 ddict[constants.IDISK_SPINDLES] = \
556 int(ddict[constants.IDISK_SPINDLES])
557 except ValueError, err:
558 raise errors.OpPrereqError("Invalid spindles for disk %d: %s" %
559 (didx, err), errors.ECODE_INVAL)
560
561 disks.append((didx, ddict))
562
563
564
565
566 if opts.node:
567 if opts.iallocator:
568 msg = "At most one of either --nodes or --iallocator can be passed"
569 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
570 pnode, snode = SplitNodeOption(opts.node)
571 nodes = [pnode]
572 if snode is not None:
573 nodes.append(snode)
574 else:
575 nodes = []
576
577 op = opcodes.OpInstanceRecreateDisks(instance_name=instance_name,
578 disks=disks, nodes=nodes,
579 iallocator=opts.iallocator)
580 SubmitOrSend(op, opts)
581
582 return 0
583
584
586 """Grow an instance's disks.
587
588 @param opts: the command line options selected by the user
589 @type args: list
590 @param args: should contain three elements, the target instance name,
591 the target disk id, and the target growth
592 @rtype: int
593 @return: the desired exit code
594
595 """
596 instance = args[0]
597 disk = args[1]
598 try:
599 disk = int(disk)
600 except (TypeError, ValueError), err:
601 raise errors.OpPrereqError("Invalid disk index: %s" % str(err),
602 errors.ECODE_INVAL)
603 try:
604 amount = utils.ParseUnit(args[2])
605 except errors.UnitParseError:
606 raise errors.OpPrereqError("Can't parse the given amount '%s'" % args[2],
607 errors.ECODE_INVAL)
608 op = opcodes.OpInstanceGrowDisk(instance_name=instance,
609 disk=disk, amount=amount,
610 wait_for_sync=opts.wait_for_sync,
611 absolute=opts.absolute)
612 SubmitOrSend(op, opts)
613 return 0
614
615
617 """Startup instances.
618
619 This returns the opcode to start an instance, and its decorator will
620 wrap this into a loop starting all desired instances.
621
622 @param name: the name of the instance to act on
623 @param opts: the command line options selected by the user
624 @return: the opcode needed for the operation
625
626 """
627 op = opcodes.OpInstanceStartup(instance_name=name,
628 force=opts.force,
629 ignore_offline_nodes=opts.ignore_offline,
630 no_remember=opts.no_remember,
631 startup_paused=opts.startup_paused)
632
633 if opts.hvparams:
634 op.hvparams = opts.hvparams
635 if opts.beparams:
636 op.beparams = opts.beparams
637 return op
638
639
641 """Reboot instance(s).
642
643 This returns the opcode to reboot an instance, and its decorator
644 will wrap this into a loop rebooting all desired instances.
645
646 @param name: the name of the instance to act on
647 @param opts: the command line options selected by the user
648 @return: the opcode needed for the operation
649
650 """
651 return opcodes.OpInstanceReboot(instance_name=name,
652 reboot_type=opts.reboot_type,
653 ignore_secondaries=opts.ignore_secondaries,
654 shutdown_timeout=opts.shutdown_timeout)
655
656
658 """Shutdown an instance.
659
660 This returns the opcode to shutdown an instance, and its decorator
661 will wrap this into a loop shutting down all desired instances.
662
663 @param name: the name of the instance to act on
664 @param opts: the command line options selected by the user
665 @return: the opcode needed for the operation
666
667 """
668 return opcodes.OpInstanceShutdown(instance_name=name,
669 force=opts.force,
670 timeout=opts.timeout,
671 ignore_offline_nodes=opts.ignore_offline,
672 no_remember=opts.no_remember)
673
674
676 """Replace the disks of an instance
677
678 @param opts: the command line options selected by the user
679 @type args: list
680 @param args: should contain only one element, the instance name
681 @rtype: int
682 @return: the desired exit code
683
684 """
685 new_2ndary = opts.dst_node
686 iallocator = opts.iallocator
687 if opts.disks is None:
688 disks = []
689 else:
690 try:
691 disks = [int(i) for i in opts.disks.split(",")]
692 except (TypeError, ValueError), err:
693 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
694 errors.ECODE_INVAL)
695 cnt = [opts.on_primary, opts.on_secondary, opts.auto,
696 new_2ndary is not None, iallocator is not None].count(True)
697 if cnt != 1:
698 raise errors.OpPrereqError("One and only one of the -p, -s, -a, -n and -I"
699 " options must be passed", errors.ECODE_INVAL)
700 elif opts.on_primary:
701 mode = constants.REPLACE_DISK_PRI
702 elif opts.on_secondary:
703 mode = constants.REPLACE_DISK_SEC
704 elif opts.auto:
705 mode = constants.REPLACE_DISK_AUTO
706 if disks:
707 raise errors.OpPrereqError("Cannot specify disks when using automatic"
708 " mode", errors.ECODE_INVAL)
709 elif new_2ndary is not None or iallocator is not None:
710
711 mode = constants.REPLACE_DISK_CHG
712
713 op = opcodes.OpInstanceReplaceDisks(instance_name=args[0], disks=disks,
714 remote_node=new_2ndary, mode=mode,
715 iallocator=iallocator,
716 early_release=opts.early_release,
717 ignore_ipolicy=opts.ignore_ipolicy)
718 SubmitOrSend(op, opts)
719 return 0
720
721
723 """Failover an instance.
724
725 The failover is done by shutting it down on its present node and
726 starting it on the secondary.
727
728 @param opts: the command line options selected by the user
729 @type args: list
730 @param args: should contain only one element, the instance name
731 @rtype: int
732 @return: the desired exit code
733
734 """
735 cl = GetClient()
736 instance_name = args[0]
737 force = opts.force
738 iallocator = opts.iallocator
739 target_node = opts.dst_node
740
741 if iallocator and target_node:
742 raise errors.OpPrereqError("Specify either an iallocator (-I), or a target"
743 " node (-n) but not both", errors.ECODE_INVAL)
744
745 if not force:
746 _EnsureInstancesExist(cl, [instance_name])
747
748 usertext = ("Failover will happen to image %s."
749 " This requires a shutdown of the instance. Continue?" %
750 (instance_name,))
751 if not AskUser(usertext):
752 return 1
753
754 op = opcodes.OpInstanceFailover(instance_name=instance_name,
755 ignore_consistency=opts.ignore_consistency,
756 shutdown_timeout=opts.shutdown_timeout,
757 iallocator=iallocator,
758 target_node=target_node,
759 ignore_ipolicy=opts.ignore_ipolicy)
760 SubmitOrSend(op, opts, cl=cl)
761 return 0
762
763
765 """Migrate an instance.
766
767 The migrate is done without shutdown.
768
769 @param opts: the command line options selected by the user
770 @type args: list
771 @param args: should contain only one element, the instance name
772 @rtype: int
773 @return: the desired exit code
774
775 """
776 cl = GetClient()
777 instance_name = args[0]
778 force = opts.force
779 iallocator = opts.iallocator
780 target_node = opts.dst_node
781
782 if iallocator and target_node:
783 raise errors.OpPrereqError("Specify either an iallocator (-I), or a target"
784 " node (-n) but not both", errors.ECODE_INVAL)
785
786 if not force:
787 _EnsureInstancesExist(cl, [instance_name])
788
789 if opts.cleanup:
790 usertext = ("Instance %s will be recovered from a failed migration."
791 " Note that the migration procedure (including cleanup)" %
792 (instance_name,))
793 else:
794 usertext = ("Instance %s will be migrated. Note that migration" %
795 (instance_name,))
796 usertext += (" might impact the instance if anything goes wrong"
797 " (e.g. due to bugs in the hypervisor). Continue?")
798 if not AskUser(usertext):
799 return 1
800
801
802 if not opts.live and opts.migration_mode is not None:
803 raise errors.OpPrereqError("Only one of the --non-live and "
804 "--migration-mode options can be passed",
805 errors.ECODE_INVAL)
806 if not opts.live:
807 mode = constants.HT_MIGRATION_NONLIVE
808 else:
809 mode = opts.migration_mode
810
811 op = opcodes.OpInstanceMigrate(instance_name=instance_name, mode=mode,
812 cleanup=opts.cleanup, iallocator=iallocator,
813 target_node=target_node,
814 allow_failover=opts.allow_failover,
815 allow_runtime_changes=opts.allow_runtime_chgs,
816 ignore_ipolicy=opts.ignore_ipolicy)
817 SubmitOrSend(op, cl=cl, opts=opts)
818 return 0
819
820
822 """Move an instance.
823
824 @param opts: the command line options selected by the user
825 @type args: list
826 @param args: should contain only one element, the instance name
827 @rtype: int
828 @return: the desired exit code
829
830 """
831 cl = GetClient()
832 instance_name = args[0]
833 force = opts.force
834
835 if not force:
836 usertext = ("Instance %s will be moved."
837 " This requires a shutdown of the instance. Continue?" %
838 (instance_name,))
839 if not AskUser(usertext):
840 return 1
841
842 op = opcodes.OpInstanceMove(instance_name=instance_name,
843 target_node=opts.node,
844 compress=opts.compress,
845 shutdown_timeout=opts.shutdown_timeout,
846 ignore_consistency=opts.ignore_consistency,
847 ignore_ipolicy=opts.ignore_ipolicy)
848 SubmitOrSend(op, opts, cl=cl)
849 return 0
850
851
853 """Connect to the console of an instance.
854
855 @param opts: the command line options selected by the user
856 @type args: list
857 @param args: should contain only one element, the instance name
858 @rtype: int
859 @return: the desired exit code
860
861 """
862 instance_name = args[0]
863
864 cl = GetClient()
865 qcl = GetClient(query=True)
866 try:
867 cluster_name = cl.QueryConfigValues(["cluster_name"])[0]
868 idata = \
869 qcl.QueryInstances([instance_name], ["console", "oper_state"], False)
870 if not idata:
871 raise errors.OpPrereqError("Instance '%s' does not exist" % instance_name,
872 errors.ECODE_NOENT)
873 finally:
874
875 cl.Close()
876 qcl.Close()
877
878 del cl
879 del qcl
880
881 ((console_data, oper_state), ) = idata
882 if not console_data:
883 if oper_state:
884
885 raise errors.OpExecError("Console information for instance %s is"
886 " unavailable" % instance_name)
887 else:
888 raise errors.OpExecError("Instance %s is not running, can't get console" %
889 instance_name)
890
891 return _DoConsole(objects.InstanceConsole.FromDict(console_data),
892 opts.show_command, cluster_name)
893
894
897 """Acts based on the result of L{opcodes.OpInstanceConsole}.
898
899 @type console: L{objects.InstanceConsole}
900 @param console: Console object
901 @type show_command: bool
902 @param show_command: Whether to just display commands
903 @type cluster_name: string
904 @param cluster_name: Cluster name as retrieved from master daemon
905
906 """
907 console.Validate()
908
909 if console.kind == constants.CONS_MESSAGE:
910 feedback_fn(console.message)
911 elif console.kind == constants.CONS_VNC:
912 feedback_fn("Instance %s has VNC listening on %s:%s (display %s),"
913 " URL <vnc://%s:%s/>",
914 console.instance, console.host, console.port,
915 console.display, console.host, console.port)
916 elif console.kind == constants.CONS_SPICE:
917 feedback_fn("Instance %s has SPICE listening on %s:%s", console.instance,
918 console.host, console.port)
919 elif console.kind == constants.CONS_SSH:
920
921 if isinstance(console.command, basestring):
922 cmd = console.command
923 else:
924 cmd = utils.ShellQuoteArgs(console.command)
925
926 srun = ssh.SshRunner(cluster_name=cluster_name)
927 ssh_cmd = srun.BuildCmd(console.host, console.user, cmd,
928 port=console.port,
929 batch=True, quiet=False, tty=True)
930
931 if show_command:
932 feedback_fn(utils.ShellQuoteArgs(ssh_cmd))
933 else:
934 result = _runcmd_fn(ssh_cmd, interactive=True)
935 if result.failed:
936 logging.error("Console command \"%s\" failed with reason '%s' and"
937 " output %r", result.cmd, result.fail_reason,
938 result.output)
939 raise errors.OpExecError("Connection to console of instance %s failed,"
940 " please check cluster configuration" %
941 console.instance)
942 else:
943 raise errors.GenericError("Unknown console type '%s'" % console.kind)
944
945 return constants.EXIT_SUCCESS
946
947
972
973
1044
1045
1046 if top_level:
1047 if dev["iv_name"] is not None:
1048 txt = dev["iv_name"]
1049 else:
1050 txt = "disk %s" % compat.TryToRoman(idx, convert=roman)
1051 else:
1052 txt = "child %s" % compat.TryToRoman(idx, convert=roman)
1053 if isinstance(dev["size"], int):
1054 nice_size = utils.FormatUnit(dev["size"], "h")
1055 else:
1056 nice_size = str(dev["size"])
1057 data = [(txt, "%s, size %s" % (dev["dev_type"], nice_size))]
1058 if top_level:
1059 if dev["spindles"] is not None:
1060 data.append(("spindles", dev["spindles"]))
1061 data.append(("access mode", dev["mode"]))
1062 if dev["logical_id"] is not None:
1063 try:
1064 l_id = _FormatDiskDetails(dev["dev_type"], dev, roman)
1065 except ValueError:
1066 l_id = [str(dev["logical_id"])]
1067 if len(l_id) == 1:
1068 data.append(("logical_id", l_id[0]))
1069 else:
1070 data.extend(l_id)
1071
1072 if dev["pstatus"]:
1073 data.append(("on primary", helper(dev["dev_type"], dev["pstatus"])))
1074
1075 if dev["sstatus"]:
1076 data.append(("on secondary", helper(dev["dev_type"], dev["sstatus"])))
1077
1078 data.append(("name", dev["name"]))
1079 data.append(("UUID", dev["uuid"]))
1080
1081 if dev["children"]:
1082 data.append(("child devices", [
1083 _FormatBlockDevInfo(c_idx, False, child, roman)
1084 for c_idx, child in enumerate(dev["children"])
1085 ]))
1086 return data
1087
1088
1106
1107
1125
1126
1128 """Helper function for L{_FormatInstanceInfo()}"""
1129 vnc_bind_address = instance["hv_actual"].get(constants.HV_VNC_BIND_ADDRESS,
1130 None)
1131 if vnc_bind_address:
1132 port = instance["network_port"]
1133 display = int(port) - constants.VNC_BASE_PORT
1134 if display > 0 and vnc_bind_address == constants.IP4_ADDRESS_ANY:
1135 vnc_console_port = "%s:%s (display %s)" % (instance["pnode"],
1136 port,
1137 display)
1138 elif display > 0 and netutils.IP4Address.IsValid(vnc_bind_address):
1139 vnc_console_port = ("%s:%s (node %s) (display %s)" %
1140 (vnc_bind_address, port,
1141 instance["pnode"], display))
1142 else:
1143
1144 vnc_console_port = "%s:%s" % (instance["pnode"],
1145 vnc_bind_address)
1146 ret = "vnc to %s" % vnc_console_port
1147 else:
1148 ret = None
1149 return ret
1150
1151
1199
1200
1202 """Compute instance run-time status.
1203
1204 @param opts: the command line options selected by the user
1205 @type args: list
1206 @param args: either an empty list, and then we query all
1207 instances, or should contain a list of instance names
1208 @rtype: int
1209 @return: the desired exit code
1210
1211 """
1212 if not args and not opts.show_all:
1213 ToStderr("No instance selected."
1214 " Please pass in --all if you want to query all instances.\n"
1215 "Note that this can take a long time on a big cluster.")
1216 return 1
1217 elif args and opts.show_all:
1218 ToStderr("Cannot use --all if you specify instance names.")
1219 return 1
1220
1221 retcode = 0
1222 op = opcodes.OpInstanceQueryData(instances=args, static=opts.static,
1223 use_locking=not opts.static)
1224 result = SubmitOpCode(op, opts=opts)
1225 if not result:
1226 ToStdout("No instances.")
1227 return 1
1228
1229 PrintGenericInfo([
1230 _FormatInstanceInfo(instance, opts.roman_integers)
1231 for instance in result.values()
1232 ])
1233 return retcode
1234
1235
1237 """Converts NIC/disk modifications from CLI to opcode.
1238
1239 When L{opcodes.OpInstanceSetParams} was changed to support adding/removing
1240 disks at arbitrary indices, its parameter format changed. This function
1241 converts legacy requests (e.g. "--net add" or "--disk add:size=4G") to the
1242 newer format and adds support for new-style requests (e.g. "--new 4:add").
1243
1244 @type mods: list of tuples
1245 @param mods: Modifications as given by command line parser
1246 @rtype: list of tuples
1247 @return: Modifications as understood by L{opcodes.OpInstanceSetParams}
1248
1249 """
1250 result = []
1251
1252 for (identifier, params) in mods:
1253 if identifier == constants.DDM_ADD:
1254
1255 action = constants.DDM_ADD
1256 identifier = -1
1257 elif identifier == constants.DDM_REMOVE:
1258
1259 action = constants.DDM_REMOVE
1260 identifier = -1
1261 else:
1262
1263 add = params.pop(constants.DDM_ADD, _MISSING)
1264 remove = params.pop(constants.DDM_REMOVE, _MISSING)
1265 modify = params.pop(constants.DDM_MODIFY, _MISSING)
1266
1267 if modify is _MISSING:
1268 if not (add is _MISSING or remove is _MISSING):
1269 raise errors.OpPrereqError("Cannot add and remove at the same time",
1270 errors.ECODE_INVAL)
1271 elif add is not _MISSING:
1272 action = constants.DDM_ADD
1273 elif remove is not _MISSING:
1274 action = constants.DDM_REMOVE
1275 else:
1276 action = constants.DDM_MODIFY
1277
1278 elif add is _MISSING and remove is _MISSING:
1279 action = constants.DDM_MODIFY
1280 else:
1281 raise errors.OpPrereqError("Cannot modify and add/remove at the"
1282 " same time", errors.ECODE_INVAL)
1283
1284 assert not (constants.DDMS_VALUES_WITH_MODIFY & set(params.keys()))
1285
1286 if action == constants.DDM_REMOVE and params:
1287 raise errors.OpPrereqError("Not accepting parameters on removal",
1288 errors.ECODE_INVAL)
1289
1290 result.append((action, identifier, params))
1291
1292 return result
1293
1294
1311
1312
1314 """Modifies an instance.
1315
1316 All parameters take effect only at the next restart of the instance.
1317
1318 @param opts: the command line options selected by the user
1319 @type args: list
1320 @param args: should contain only one element, the instance name
1321 @rtype: int
1322 @return: the desired exit code
1323
1324 """
1325 if not (opts.nics or opts.disks or opts.disk_template or
1326 opts.hvparams or opts.beparams or opts.os or opts.osparams or
1327 opts.offline_inst or opts.online_inst or opts.runtime_mem or
1328 opts.new_primary_node):
1329 ToStderr("Please give at least one of the parameters.")
1330 return 1
1331
1332 for param in opts.beparams:
1333 if isinstance(opts.beparams[param], basestring):
1334 if opts.beparams[param].lower() == "default":
1335 opts.beparams[param] = constants.VALUE_DEFAULT
1336
1337 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT,
1338 allowed_values=[constants.VALUE_DEFAULT])
1339
1340 for param in opts.hvparams:
1341 if isinstance(opts.hvparams[param], basestring):
1342 if opts.hvparams[param].lower() == "default":
1343 opts.hvparams[param] = constants.VALUE_DEFAULT
1344
1345 utils.ForceDictType(opts.hvparams, constants.HVS_PARAMETER_TYPES,
1346 allowed_values=[constants.VALUE_DEFAULT])
1347 FixHvParams(opts.hvparams)
1348
1349 nics = _ConvertNicDiskModifications(opts.nics)
1350 for action, _, __ in nics:
1351 if action == constants.DDM_MODIFY and opts.hotplug and not opts.force:
1352 usertext = ("You are about to hot-modify a NIC. This will be done"
1353 " by removing the existing NIC and then adding a new one."
1354 " Network connection might be lost. Continue?")
1355 if not AskUser(usertext):
1356 return 1
1357
1358 disks = _ParseDiskSizes(_ConvertNicDiskModifications(opts.disks))
1359
1360 if (opts.disk_template and
1361 opts.disk_template in constants.DTS_INT_MIRROR and
1362 not opts.node):
1363 ToStderr("Changing the disk template to a mirrored one requires"
1364 " specifying a secondary node")
1365 return 1
1366
1367 if opts.offline_inst:
1368 offline = True
1369 elif opts.online_inst:
1370 offline = False
1371 else:
1372 offline = None
1373
1374 op = opcodes.OpInstanceSetParams(instance_name=args[0],
1375 nics=nics,
1376 disks=disks,
1377 hotplug=opts.hotplug,
1378 hotplug_if_possible=opts.hotplug_if_possible,
1379 disk_template=opts.disk_template,
1380 remote_node=opts.node,
1381 pnode=opts.new_primary_node,
1382 hvparams=opts.hvparams,
1383 beparams=opts.beparams,
1384 runtime_mem=opts.runtime_mem,
1385 os_name=opts.os,
1386 osparams=opts.osparams,
1387 force_variant=opts.force_variant,
1388 force=opts.force,
1389 wait_for_sync=opts.wait_for_sync,
1390 offline=offline,
1391 conflicts_check=opts.conflicts_check,
1392 ignore_ipolicy=opts.ignore_ipolicy)
1393
1394
1395 result = SubmitOrSend(op, opts)
1396
1397 if result:
1398 ToStdout("Modified instance %s", args[0])
1399 for param, data in result:
1400 ToStdout(" - %-5s -> %s", param, data)
1401 ToStdout("Please don't forget that most parameters take effect"
1402 " only at the next (re)start of the instance initiated by"
1403 " ganeti; restarting from within the instance will"
1404 " not be enough.")
1405 if opts.hvparams:
1406 ToStdout("Note that changing hypervisor parameters without performing a"
1407 " restart might lead to a crash while performing a live"
1408 " migration. This will be addressed in future Ganeti versions.")
1409 return 0
1410
1411
1413 """Moves an instance to another group.
1414
1415 """
1416 (instance_name, ) = args
1417
1418 cl = GetClient()
1419
1420 op = opcodes.OpInstanceChangeGroup(instance_name=instance_name,
1421 iallocator=opts.iallocator,
1422 target_groups=opts.to,
1423 early_release=opts.early_release)
1424 result = SubmitOrSend(op, opts, cl=cl)
1425
1426
1427 jex = JobExecutor(cl=cl, opts=opts)
1428
1429 for (status, job_id) in result[constants.JOB_IDS_KEY]:
1430 jex.AddJobId(None, status, job_id)
1431
1432 results = jex.GetResults()
1433 bad_cnt = len([row for row in results if not row[0]])
1434 if bad_cnt == 0:
1435 ToStdout("Instance '%s' changed group successfully.", instance_name)
1436 rcode = constants.EXIT_SUCCESS
1437 else:
1438 ToStdout("There were %s errors while changing group of instance '%s'.",
1439 bad_cnt, instance_name)
1440 rcode = constants.EXIT_FAILURE
1441
1442 return rcode
1443
1444
1445
1446 m_force_multi = cli_option("--force-multiple", dest="force_multi",
1447 help="Do not ask for confirmation when more than"
1448 " one instance is affected",
1449 action="store_true", default=False)
1450
1451 m_pri_node_opt = cli_option("--primary", dest="multi_mode",
1452 help="Filter by nodes (primary only)",
1453 const=_EXPAND_NODES_PRI, action="store_const")
1454
1455 m_sec_node_opt = cli_option("--secondary", dest="multi_mode",
1456 help="Filter by nodes (secondary only)",
1457 const=_EXPAND_NODES_SEC, action="store_const")
1458
1459 m_node_opt = cli_option("--node", dest="multi_mode",
1460 help="Filter by nodes (primary and secondary)",
1461 const=_EXPAND_NODES_BOTH, action="store_const")
1462
1463 m_clust_opt = cli_option("--all", dest="multi_mode",
1464 help="Select all instances in the cluster",
1465 const=_EXPAND_CLUSTER, action="store_const")
1466
1467 m_inst_opt = cli_option("--instance", dest="multi_mode",
1468 help="Filter by instance name [default]",
1469 const=_EXPAND_INSTANCES, action="store_const")
1470
1471 m_node_tags_opt = cli_option("--node-tags", dest="multi_mode",
1472 help="Filter by node tag",
1473 const=_EXPAND_NODES_BOTH_BY_TAGS,
1474 action="store_const")
1475
1476 m_pri_node_tags_opt = cli_option("--pri-node-tags", dest="multi_mode",
1477 help="Filter by primary node tag",
1478 const=_EXPAND_NODES_PRI_BY_TAGS,
1479 action="store_const")
1480
1481 m_sec_node_tags_opt = cli_option("--sec-node-tags", dest="multi_mode",
1482 help="Filter by secondary node tag",
1483 const=_EXPAND_NODES_SEC_BY_TAGS,
1484 action="store_const")
1485
1486 m_inst_tags_opt = cli_option("--tags", dest="multi_mode",
1487 help="Filter by instance tag",
1488 const=_EXPAND_INSTANCES_BY_TAGS,
1489 action="store_const")
1490
1491
1492 add_opts = [
1493 NOSTART_OPT,
1494 OS_OPT,
1495 FORCE_VARIANT_OPT,
1496 NO_INSTALL_OPT,
1497 IGNORE_IPOLICY_OPT,
1498 ]
1499
1500 commands = {
1501 "add": (
1502 AddInstance, [ArgHost(min=1, max=1)], COMMON_CREATE_OPTS + add_opts,
1503 "[...] -t disk-type -n node[:secondary-node] -o os-type <name>",
1504 "Creates and adds a new instance to the cluster"),
1505 "batch-create": (
1506 BatchCreate, [ArgFile(min=1, max=1)],
1507 [DRY_RUN_OPT, PRIORITY_OPT, IALLOCATOR_OPT] + SUBMIT_OPTS,
1508 "<instances.json>",
1509 "Create a bunch of instances based on specs in the file."),
1510 "console": (
1511 ConnectToInstanceConsole, ARGS_ONE_INSTANCE,
1512 [SHOWCMD_OPT, PRIORITY_OPT],
1513 "[--show-cmd] <instance>", "Opens a console on the specified instance"),
1514 "failover": (
1515 FailoverInstance, ARGS_ONE_INSTANCE,
1516 [FORCE_OPT, IGNORE_CONSIST_OPT] + SUBMIT_OPTS +
1517 [SHUTDOWN_TIMEOUT_OPT,
1518 DRY_RUN_OPT, PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT,
1519 IGNORE_IPOLICY_OPT, CLEANUP_OPT],
1520 "[-f] <instance>", "Stops the instance, changes its primary node and"
1521 " (if it was originally running) starts it on the new node"
1522 " (the secondary for mirrored instances or any node"
1523 " for shared storage)."),
1524 "migrate": (
1525 MigrateInstance, ARGS_ONE_INSTANCE,
1526 [FORCE_OPT, NONLIVE_OPT, MIGRATION_MODE_OPT, CLEANUP_OPT, DRY_RUN_OPT,
1527 PRIORITY_OPT, DST_NODE_OPT, IALLOCATOR_OPT, ALLOW_FAILOVER_OPT,
1528 IGNORE_IPOLICY_OPT, NORUNTIME_CHGS_OPT] + SUBMIT_OPTS,
1529 "[-f] <instance>", "Migrate instance to its secondary node"
1530 " (only for mirrored instances)"),
1531 "move": (
1532 MoveInstance, ARGS_ONE_INSTANCE,
1533 [FORCE_OPT] + SUBMIT_OPTS +
1534 [SINGLE_NODE_OPT, COMPRESS_OPT,
1535 SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_CONSIST_OPT,
1536 IGNORE_IPOLICY_OPT],
1537 "[-f] <instance>", "Move instance to an arbitrary node"
1538 " (only for instances of type file and lv)"),
1539 "info": (
1540 ShowInstanceConfig, ARGS_MANY_INSTANCES,
1541 [STATIC_OPT, ALL_OPT, ROMAN_OPT, PRIORITY_OPT],
1542 "[-s] {--all | <instance>...}",
1543 "Show information on the specified instance(s)"),
1544 "list": (
1545 ListInstances, ARGS_MANY_INSTANCES,
1546 [NOHDR_OPT, SEP_OPT, USEUNITS_OPT, FIELDS_OPT, VERBOSE_OPT,
1547 FORCE_FILTER_OPT],
1548 "[<instance>...]",
1549 "Lists the instances and their status. The available fields can be shown"
1550 " using the \"list-fields\" command (see the man page for details)."
1551 " The default field list is (in order): %s." %
1552 utils.CommaJoin(_LIST_DEF_FIELDS),
1553 ),
1554 "list-fields": (
1555 ListInstanceFields, [ArgUnknown()],
1556 [NOHDR_OPT, SEP_OPT],
1557 "[fields...]",
1558 "Lists all available fields for instances"),
1559 "reinstall": (
1560 ReinstallInstance, [ArgInstance()],
1561 [FORCE_OPT, OS_OPT, FORCE_VARIANT_OPT, m_force_multi, m_node_opt,
1562 m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt, m_node_tags_opt,
1563 m_pri_node_tags_opt, m_sec_node_tags_opt, m_inst_tags_opt, SELECT_OS_OPT]
1564 + SUBMIT_OPTS + [DRY_RUN_OPT, PRIORITY_OPT, OSPARAMS_OPT],
1565 "[-f] <instance>", "Reinstall a stopped instance"),
1566 "remove": (
1567 RemoveInstance, ARGS_ONE_INSTANCE,
1568 [FORCE_OPT, SHUTDOWN_TIMEOUT_OPT, IGNORE_FAILURES_OPT] + SUBMIT_OPTS
1569 + [DRY_RUN_OPT, PRIORITY_OPT],
1570 "[-f] <instance>", "Shuts down the instance and removes it"),
1571 "rename": (
1572 RenameInstance,
1573 [ArgInstance(min=1, max=1), ArgHost(min=1, max=1)],
1574 [NOIPCHECK_OPT, NONAMECHECK_OPT] + SUBMIT_OPTS
1575 + [DRY_RUN_OPT, PRIORITY_OPT],
1576 "<instance> <new_name>", "Rename the instance"),
1577 "replace-disks": (
1578 ReplaceDisks, ARGS_ONE_INSTANCE,
1579 [AUTO_REPLACE_OPT, DISKIDX_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT,
1580 NEW_SECONDARY_OPT, ON_PRIMARY_OPT, ON_SECONDARY_OPT] + SUBMIT_OPTS
1581 + [DRY_RUN_OPT, PRIORITY_OPT, IGNORE_IPOLICY_OPT],
1582 "[-s|-p|-a|-n NODE|-I NAME] <instance>",
1583 "Replaces disks for the instance"),
1584 "modify": (
1585 SetInstanceParams, ARGS_ONE_INSTANCE,
1586 [BACKEND_OPT, DISK_OPT, FORCE_OPT, HVOPTS_OPT, NET_OPT] + SUBMIT_OPTS +
1587 [DISK_TEMPLATE_OPT, SINGLE_NODE_OPT, OS_OPT, FORCE_VARIANT_OPT,
1588 OSPARAMS_OPT, DRY_RUN_OPT, PRIORITY_OPT, NWSYNC_OPT, OFFLINE_INST_OPT,
1589 ONLINE_INST_OPT, IGNORE_IPOLICY_OPT, RUNTIME_MEM_OPT,
1590 NOCONFLICTSCHECK_OPT, NEW_PRIMARY_OPT, HOTPLUG_OPT,
1591 HOTPLUG_IF_POSSIBLE_OPT],
1592 "<instance>", "Alters the parameters of an instance"),
1593 "shutdown": (
1594 GenericManyOps("shutdown", _ShutdownInstance), [ArgInstance()],
1595 [FORCE_OPT, m_node_opt, m_pri_node_opt, m_sec_node_opt, m_clust_opt,
1596 m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
1597 m_inst_tags_opt, m_inst_opt, m_force_multi, TIMEOUT_OPT] + SUBMIT_OPTS
1598 + [DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT, NO_REMEMBER_OPT],
1599 "<instance>", "Stops an instance"),
1600 "startup": (
1601 GenericManyOps("startup", _StartupInstance), [ArgInstance()],
1602 [FORCE_OPT, m_force_multi, m_node_opt, m_pri_node_opt, m_sec_node_opt,
1603 m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
1604 m_inst_tags_opt, m_clust_opt, m_inst_opt] + SUBMIT_OPTS +
1605 [HVOPTS_OPT,
1606 BACKEND_OPT, DRY_RUN_OPT, PRIORITY_OPT, IGNORE_OFFLINE_OPT,
1607 NO_REMEMBER_OPT, STARTUP_PAUSED_OPT],
1608 "<instance>", "Starts an instance"),
1609 "reboot": (
1610 GenericManyOps("reboot", _RebootInstance), [ArgInstance()],
1611 [m_force_multi, REBOOT_TYPE_OPT, IGNORE_SECONDARIES_OPT, m_node_opt,
1612 m_pri_node_opt, m_sec_node_opt, m_clust_opt, m_inst_opt] + SUBMIT_OPTS +
1613 [m_node_tags_opt, m_pri_node_tags_opt, m_sec_node_tags_opt,
1614 m_inst_tags_opt, SHUTDOWN_TIMEOUT_OPT, DRY_RUN_OPT, PRIORITY_OPT],
1615 "<instance>", "Reboots an instance"),
1616 "activate-disks": (
1617 ActivateDisks, ARGS_ONE_INSTANCE,
1618 SUBMIT_OPTS + [IGNORE_SIZE_OPT, PRIORITY_OPT, WFSYNC_OPT],
1619 "<instance>", "Activate an instance's disks"),
1620 "deactivate-disks": (
1621 DeactivateDisks, ARGS_ONE_INSTANCE,
1622 [FORCE_OPT] + SUBMIT_OPTS + [DRY_RUN_OPT, PRIORITY_OPT],
1623 "[-f] <instance>", "Deactivate an instance's disks"),
1624 "recreate-disks": (
1625 RecreateDisks, ARGS_ONE_INSTANCE,
1626 SUBMIT_OPTS +
1627 [DISK_OPT, NODE_PLACEMENT_OPT, DRY_RUN_OPT, PRIORITY_OPT,
1628 IALLOCATOR_OPT],
1629 "<instance>", "Recreate an instance's disks"),
1630 "grow-disk": (
1631 GrowDisk,
1632 [ArgInstance(min=1, max=1), ArgUnknown(min=1, max=1),
1633 ArgUnknown(min=1, max=1)],
1634 SUBMIT_OPTS + [NWSYNC_OPT, DRY_RUN_OPT, PRIORITY_OPT, ABSOLUTE_OPT],
1635 "<instance> <disk> <size>", "Grow an instance's disk"),
1636 "change-group": (
1637 ChangeGroup, ARGS_ONE_INSTANCE,
1638 [TO_GROUP_OPT, IALLOCATOR_OPT, EARLY_RELEASE_OPT, PRIORITY_OPT]
1639 + SUBMIT_OPTS,
1640 "[-I <iallocator>] [--to <group>]", "Change group of instance"),
1641 "list-tags": (
1642 ListTags, ARGS_ONE_INSTANCE, [],
1643 "<instance_name>", "List the tags of the given instance"),
1644 "add-tags": (
1645 AddTags, [ArgInstance(min=1, max=1), ArgUnknown()],
1646 [TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS,
1647 "<instance_name> tag...", "Add tags to the given instance"),
1648 "remove-tags": (
1649 RemoveTags, [ArgInstance(min=1, max=1), ArgUnknown()],
1650 [TAG_SRC_OPT, PRIORITY_OPT] + SUBMIT_OPTS,
1651 "<instance_name> tag...", "Remove tags from given instance"),
1652 }
1653
1654
1655 aliases = {
1656 "start": "startup",
1657 "stop": "shutdown",
1658 "show": "info",
1659 }
1660
1661
1663 return GenericMain(commands, aliases=aliases,
1664 override={"tag_type": constants.TAG_INSTANCE},
1665 env_override=_ENV_OVERRIDE)
1666