1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 """Module dealing with command line parsing"""
23
24
25 import sys
26 import textwrap
27 import os.path
28 import time
29 import logging
30 import errno
31 from cStringIO import StringIO
32
33 from ganeti import utils
34 from ganeti import errors
35 from ganeti import constants
36 from ganeti import opcodes
37 from ganeti import luxi
38 from ganeti import ssconf
39 from ganeti import rpc
40 from ganeti import ssh
41 from ganeti import compat
42 from ganeti import netutils
43 from ganeti import qlang
44
45 from optparse import (OptionParser, TitledHelpFormatter,
46 Option, OptionValueError)
47
48
49 __all__ = [
50
51 "ADD_UIDS_OPT",
52 "ALLOCATABLE_OPT",
53 "ALLOC_POLICY_OPT",
54 "ALL_OPT",
55 "AUTO_PROMOTE_OPT",
56 "AUTO_REPLACE_OPT",
57 "BACKEND_OPT",
58 "BLK_OS_OPT",
59 "CAPAB_MASTER_OPT",
60 "CAPAB_VM_OPT",
61 "CLEANUP_OPT",
62 "CLUSTER_DOMAIN_SECRET_OPT",
63 "CONFIRM_OPT",
64 "CP_SIZE_OPT",
65 "DEBUG_OPT",
66 "DEBUG_SIMERR_OPT",
67 "DISKIDX_OPT",
68 "DISK_OPT",
69 "DISK_TEMPLATE_OPT",
70 "DRAINED_OPT",
71 "DRY_RUN_OPT",
72 "DRBD_HELPER_OPT",
73 "EARLY_RELEASE_OPT",
74 "ENABLED_HV_OPT",
75 "ERROR_CODES_OPT",
76 "FIELDS_OPT",
77 "FILESTORE_DIR_OPT",
78 "FILESTORE_DRIVER_OPT",
79 "FORCE_OPT",
80 "FORCE_VARIANT_OPT",
81 "GLOBAL_FILEDIR_OPT",
82 "HID_OS_OPT",
83 "HVLIST_OPT",
84 "HVOPTS_OPT",
85 "HYPERVISOR_OPT",
86 "IALLOCATOR_OPT",
87 "DEFAULT_IALLOCATOR_OPT",
88 "IDENTIFY_DEFAULTS_OPT",
89 "IGNORE_CONSIST_OPT",
90 "IGNORE_FAILURES_OPT",
91 "IGNORE_OFFLINE_OPT",
92 "IGNORE_REMOVE_FAILURES_OPT",
93 "IGNORE_SECONDARIES_OPT",
94 "IGNORE_SIZE_OPT",
95 "INTERVAL_OPT",
96 "MAC_PREFIX_OPT",
97 "MAINTAIN_NODE_HEALTH_OPT",
98 "MASTER_NETDEV_OPT",
99 "MC_OPT",
100 "MIGRATION_MODE_OPT",
101 "NET_OPT",
102 "NEW_CLUSTER_CERT_OPT",
103 "NEW_CLUSTER_DOMAIN_SECRET_OPT",
104 "NEW_CONFD_HMAC_KEY_OPT",
105 "NEW_RAPI_CERT_OPT",
106 "NEW_SECONDARY_OPT",
107 "NIC_PARAMS_OPT",
108 "NODE_FORCE_JOIN_OPT",
109 "NODE_LIST_OPT",
110 "NODE_PLACEMENT_OPT",
111 "NODEGROUP_OPT",
112 "NODE_PARAMS_OPT",
113 "NODE_POWERED_OPT",
114 "NODRBD_STORAGE_OPT",
115 "NOHDR_OPT",
116 "NOIPCHECK_OPT",
117 "NO_INSTALL_OPT",
118 "NONAMECHECK_OPT",
119 "NOLVM_STORAGE_OPT",
120 "NOMODIFY_ETCHOSTS_OPT",
121 "NOMODIFY_SSH_SETUP_OPT",
122 "NONICS_OPT",
123 "NONLIVE_OPT",
124 "NONPLUS1_OPT",
125 "NOSHUTDOWN_OPT",
126 "NOSTART_OPT",
127 "NOSSH_KEYCHECK_OPT",
128 "NOVOTING_OPT",
129 "NO_REMEMBER_OPT",
130 "NWSYNC_OPT",
131 "ON_PRIMARY_OPT",
132 "ON_SECONDARY_OPT",
133 "OFFLINE_OPT",
134 "OSPARAMS_OPT",
135 "OS_OPT",
136 "OS_SIZE_OPT",
137 "PREALLOC_WIPE_DISKS_OPT",
138 "PRIMARY_IP_VERSION_OPT",
139 "PRIORITY_OPT",
140 "RAPI_CERT_OPT",
141 "READD_OPT",
142 "REBOOT_TYPE_OPT",
143 "REMOVE_INSTANCE_OPT",
144 "REMOVE_UIDS_OPT",
145 "RESERVED_LVS_OPT",
146 "ROMAN_OPT",
147 "SECONDARY_IP_OPT",
148 "SELECT_OS_OPT",
149 "SEP_OPT",
150 "SHOWCMD_OPT",
151 "SHUTDOWN_TIMEOUT_OPT",
152 "SINGLE_NODE_OPT",
153 "SRC_DIR_OPT",
154 "SRC_NODE_OPT",
155 "SUBMIT_OPT",
156 "STATIC_OPT",
157 "SYNC_OPT",
158 "TAG_SRC_OPT",
159 "TIMEOUT_OPT",
160 "UIDPOOL_OPT",
161 "USEUNITS_OPT",
162 "USE_REPL_NET_OPT",
163 "VERBOSE_OPT",
164 "VG_NAME_OPT",
165 "YES_DOIT_OPT",
166
167 "GenericMain",
168 "GenericInstanceCreate",
169 "GenericList",
170 "GenericListFields",
171 "GetClient",
172 "GetOnlineNodes",
173 "JobExecutor",
174 "JobSubmittedException",
175 "ParseTimespec",
176 "RunWhileClusterStopped",
177 "SubmitOpCode",
178 "SubmitOrSend",
179 "UsesRPC",
180
181 "ToStderr", "ToStdout",
182 "FormatError",
183 "FormatQueryResult",
184 "FormatParameterDict",
185 "GenerateTable",
186 "AskUser",
187 "FormatTimestamp",
188 "FormatLogMessage",
189
190 "ListTags",
191 "AddTags",
192 "RemoveTags",
193
194 "ARGS_MANY_INSTANCES",
195 "ARGS_MANY_NODES",
196 "ARGS_MANY_GROUPS",
197 "ARGS_NONE",
198 "ARGS_ONE_INSTANCE",
199 "ARGS_ONE_NODE",
200 "ARGS_ONE_GROUP",
201 "ARGS_ONE_OS",
202 "ArgChoice",
203 "ArgCommand",
204 "ArgFile",
205 "ArgGroup",
206 "ArgHost",
207 "ArgInstance",
208 "ArgJobId",
209 "ArgNode",
210 "ArgOs",
211 "ArgSuggest",
212 "ArgUnknown",
213 "OPT_COMPL_INST_ADD_NODES",
214 "OPT_COMPL_MANY_NODES",
215 "OPT_COMPL_ONE_IALLOCATOR",
216 "OPT_COMPL_ONE_INSTANCE",
217 "OPT_COMPL_ONE_NODE",
218 "OPT_COMPL_ONE_NODEGROUP",
219 "OPT_COMPL_ONE_OS",
220 "cli_option",
221 "SplitNodeOption",
222 "CalculateOSNames",
223 "ParseFields",
224 "COMMON_CREATE_OPTS",
225 ]
226
227 NO_PREFIX = "no_"
228 UN_PREFIX = "-"
229
230
231 _PRIORITY_NAMES = [
232 ("low", constants.OP_PRIO_LOW),
233 ("normal", constants.OP_PRIO_NORMAL),
234 ("high", constants.OP_PRIO_HIGH),
235 ]
236
237
238
239
240 _PRIONAME_TO_VALUE = dict(_PRIORITY_NAMES)
241
242
243 (QR_NORMAL,
244 QR_UNKNOWN,
245 QR_INCOMPLETE) = range(3)
246
247
250 self.min = min
251 self.max = max
252
254 return ("<%s min=%s max=%s>" %
255 (self.__class__.__name__, self.min, self.max))
256
257
259 """Suggesting argument.
260
261 Value can be any of the ones passed to the constructor.
262
263 """
264
265 - def __init__(self, min=0, max=None, choices=None):
268
270 return ("<%s min=%s max=%s choices=%r>" %
271 (self.__class__.__name__, self.min, self.max, self.choices))
272
273
275 """Choice argument.
276
277 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
278 but value must be one of the choices.
279
280 """
281
282
284 """Unknown argument to program (e.g. determined at runtime).
285
286 """
287
288
290 """Instances argument.
291
292 """
293
294
296 """Node argument.
297
298 """
299
300
302 """Node group argument.
303
304 """
305
306
308 """Job ID argument.
309
310 """
311
312
314 """File path argument.
315
316 """
317
318
320 """Command argument.
321
322 """
323
324
326 """Host argument.
327
328 """
329
330
332 """OS argument.
333
334 """
335
336
337 ARGS_NONE = []
338 ARGS_MANY_INSTANCES = [ArgInstance()]
339 ARGS_MANY_NODES = [ArgNode()]
340 ARGS_MANY_GROUPS = [ArgGroup()]
341 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
342 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
343
344 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
345 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
346
347
367
368
396
397
414
415
431
432
448
449
451 """OptParsers custom converter for units.
452
453 """
454 try:
455 return utils.ParseUnit(value)
456 except errors.UnitParseError, err:
457 raise OptionValueError("option %s: %s" % (opt, err))
458
459
461 """Convert a KeyVal string into a dict.
462
463 This function will convert a key=val[,...] string into a dict. Empty
464 values will be converted specially: keys which have the prefix 'no_'
465 will have the value=False and the prefix stripped, the others will
466 have value=True.
467
468 @type opt: string
469 @param opt: a string holding the option name for which we process the
470 data, used in building error messages
471 @type data: string
472 @param data: a string of the format key=val,key=val,...
473 @rtype: dict
474 @return: {key=val, key=val}
475 @raises errors.ParameterError: if there are duplicate keys
476
477 """
478 kv_dict = {}
479 if data:
480 for elem in utils.UnescapeAndSplit(data, sep=","):
481 if "=" in elem:
482 key, val = elem.split("=", 1)
483 else:
484 if elem.startswith(NO_PREFIX):
485 key, val = elem[len(NO_PREFIX):], False
486 elif elem.startswith(UN_PREFIX):
487 key, val = elem[len(UN_PREFIX):], None
488 else:
489 key, val = elem, True
490 if key in kv_dict:
491 raise errors.ParameterError("Duplicate key '%s' in option %s" %
492 (key, opt))
493 kv_dict[key] = val
494 return kv_dict
495
496
498 """Custom parser for ident:key=val,key=val options.
499
500 This will store the parsed values as a tuple (ident, {key: val}). As such,
501 multiple uses of this option via action=append is possible.
502
503 """
504 if ":" not in value:
505 ident, rest = value, ''
506 else:
507 ident, rest = value.split(":", 1)
508
509 if ident.startswith(NO_PREFIX):
510 if rest:
511 msg = "Cannot pass options when removing parameter groups: %s" % value
512 raise errors.ParameterError(msg)
513 retval = (ident[len(NO_PREFIX):], False)
514 elif ident.startswith(UN_PREFIX):
515 if rest:
516 msg = "Cannot pass options when removing parameter groups: %s" % value
517 raise errors.ParameterError(msg)
518 retval = (ident[len(UN_PREFIX):], None)
519 else:
520 kv_dict = _SplitKeyVal(opt, rest)
521 retval = (ident, kv_dict)
522 return retval
523
524
526 """Custom parser class for key=val,key=val options.
527
528 This will store the parsed values as a dict {key: val}.
529
530 """
531 return _SplitKeyVal(opt, value)
532
533
535 """Custom parser for yes/no options.
536
537 This will store the parsed value as either True or False.
538
539 """
540 value = value.lower()
541 if value == constants.VALUE_FALSE or value == "no":
542 return False
543 elif value == constants.VALUE_TRUE or value == "yes":
544 return True
545 else:
546 raise errors.ParameterError("Invalid boolean value '%s'" % value)
547
548
549
550
551 (OPT_COMPL_MANY_NODES,
552 OPT_COMPL_ONE_NODE,
553 OPT_COMPL_ONE_INSTANCE,
554 OPT_COMPL_ONE_OS,
555 OPT_COMPL_ONE_IALLOCATOR,
556 OPT_COMPL_INST_ADD_NODES,
557 OPT_COMPL_ONE_NODEGROUP) = range(100, 107)
558
559 OPT_COMPL_ALL = frozenset([
560 OPT_COMPL_MANY_NODES,
561 OPT_COMPL_ONE_NODE,
562 OPT_COMPL_ONE_INSTANCE,
563 OPT_COMPL_ONE_OS,
564 OPT_COMPL_ONE_IALLOCATOR,
565 OPT_COMPL_INST_ADD_NODES,
566 OPT_COMPL_ONE_NODEGROUP,
567 ])
568
569
588
589
590
591 cli_option = CliOption
592
593
594 _YORNO = "yes|no"
595
596 DEBUG_OPT = cli_option("-d", "--debug", default=0, action="count",
597 help="Increase debugging level")
598
599 NOHDR_OPT = cli_option("--no-headers", default=False,
600 action="store_true", dest="no_headers",
601 help="Don't display column headers")
602
603 SEP_OPT = cli_option("--separator", default=None,
604 action="store", dest="separator",
605 help=("Separator between output fields"
606 " (defaults to one space)"))
607
608 USEUNITS_OPT = cli_option("--units", default=None,
609 dest="units", choices=('h', 'm', 'g', 't'),
610 help="Specify units for output (one of h/m/g/t)")
611
612 FIELDS_OPT = cli_option("-o", "--output", dest="output", action="store",
613 type="string", metavar="FIELDS",
614 help="Comma separated list of output fields")
615
616 FORCE_OPT = cli_option("-f", "--force", dest="force", action="store_true",
617 default=False, help="Force the operation")
618
619 CONFIRM_OPT = cli_option("--yes", dest="confirm", action="store_true",
620 default=False, help="Do not require confirmation")
621
622 IGNORE_OFFLINE_OPT = cli_option("--ignore-offline", dest="ignore_offline",
623 action="store_true", default=False,
624 help=("Ignore offline nodes and do as much"
625 " as possible"))
626
627 TAG_SRC_OPT = cli_option("--from", dest="tags_source",
628 default=None, help="File with tag names")
629
630 SUBMIT_OPT = cli_option("--submit", dest="submit_only",
631 default=False, action="store_true",
632 help=("Submit the job and return the job ID, but"
633 " don't wait for the job to finish"))
634
635 SYNC_OPT = cli_option("--sync", dest="do_locking",
636 default=False, action="store_true",
637 help=("Grab locks while doing the queries"
638 " in order to ensure more consistent results"))
639
640 DRY_RUN_OPT = cli_option("--dry-run", default=False,
641 action="store_true",
642 help=("Do not execute the operation, just run the"
643 " check steps and verify it it could be"
644 " executed"))
645
646 VERBOSE_OPT = cli_option("-v", "--verbose", default=False,
647 action="store_true",
648 help="Increase the verbosity of the operation")
649
650 DEBUG_SIMERR_OPT = cli_option("--debug-simulate-errors", default=False,
651 action="store_true", dest="simulate_errors",
652 help="Debugging option that makes the operation"
653 " treat most runtime checks as failed")
654
655 NWSYNC_OPT = cli_option("--no-wait-for-sync", dest="wait_for_sync",
656 default=True, action="store_false",
657 help="Don't wait for sync (DANGEROUS!)")
658
659 DISK_TEMPLATE_OPT = cli_option("-t", "--disk-template", dest="disk_template",
660 help="Custom disk setup (diskless, file,"
661 " plain or drbd)",
662 default=None, metavar="TEMPL",
663 choices=list(constants.DISK_TEMPLATES))
664
665 NONICS_OPT = cli_option("--no-nics", default=False, action="store_true",
666 help="Do not create any network cards for"
667 " the instance")
668
669 FILESTORE_DIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
670 help="Relative path under default cluster-wide"
671 " file storage dir to store file-based disks",
672 default=None, metavar="<DIR>")
673
674 FILESTORE_DRIVER_OPT = cli_option("--file-driver", dest="file_driver",
675 help="Driver to use for image files",
676 default="loop", metavar="<DRIVER>",
677 choices=list(constants.FILE_DRIVER))
678
679 IALLOCATOR_OPT = cli_option("-I", "--iallocator", metavar="<NAME>",
680 help="Select nodes for the instance automatically"
681 " using the <NAME> iallocator plugin",
682 default=None, type="string",
683 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
684
685 DEFAULT_IALLOCATOR_OPT = cli_option("-I", "--default-iallocator",
686 metavar="<NAME>",
687 help="Set the default instance allocator plugin",
688 default=None, type="string",
689 completion_suggest=OPT_COMPL_ONE_IALLOCATOR)
690
691 OS_OPT = cli_option("-o", "--os-type", dest="os", help="What OS to run",
692 metavar="<os>",
693 completion_suggest=OPT_COMPL_ONE_OS)
694
695 OSPARAMS_OPT = cli_option("-O", "--os-parameters", dest="osparams",
696 type="keyval", default={},
697 help="OS parameters")
698
699 FORCE_VARIANT_OPT = cli_option("--force-variant", dest="force_variant",
700 action="store_true", default=False,
701 help="Force an unknown variant")
702
703 NO_INSTALL_OPT = cli_option("--no-install", dest="no_install",
704 action="store_true", default=False,
705 help="Do not install the OS (will"
706 " enable no-start)")
707
708 BACKEND_OPT = cli_option("-B", "--backend-parameters", dest="beparams",
709 type="keyval", default={},
710 help="Backend parameters")
711
712 HVOPTS_OPT = cli_option("-H", "--hypervisor-parameters", type="keyval",
713 default={}, dest="hvparams",
714 help="Hypervisor parameters")
715
716 HYPERVISOR_OPT = cli_option("-H", "--hypervisor-parameters", dest="hypervisor",
717 help="Hypervisor and hypervisor options, in the"
718 " format hypervisor:option=value,option=value,...",
719 default=None, type="identkeyval")
720
721 HVLIST_OPT = cli_option("-H", "--hypervisor-parameters", dest="hvparams",
722 help="Hypervisor and hypervisor options, in the"
723 " format hypervisor:option=value,option=value,...",
724 default=[], action="append", type="identkeyval")
725
726 NOIPCHECK_OPT = cli_option("--no-ip-check", dest="ip_check", default=True,
727 action="store_false",
728 help="Don't check that the instance's IP"
729 " is alive")
730
731 NONAMECHECK_OPT = cli_option("--no-name-check", dest="name_check",
732 default=True, action="store_false",
733 help="Don't check that the instance's name"
734 " is resolvable")
735
736 NET_OPT = cli_option("--net",
737 help="NIC parameters", default=[],
738 dest="nics", action="append", type="identkeyval")
739
740 DISK_OPT = cli_option("--disk", help="Disk parameters", default=[],
741 dest="disks", action="append", type="identkeyval")
742
743 DISKIDX_OPT = cli_option("--disks", dest="disks", default=None,
744 help="Comma-separated list of disks"
745 " indices to act on (e.g. 0,2) (optional,"
746 " defaults to all disks)")
747
748 OS_SIZE_OPT = cli_option("-s", "--os-size", dest="sd_size",
749 help="Enforces a single-disk configuration using the"
750 " given disk size, in MiB unless a suffix is used",
751 default=None, type="unit", metavar="<size>")
752
753 IGNORE_CONSIST_OPT = cli_option("--ignore-consistency",
754 dest="ignore_consistency",
755 action="store_true", default=False,
756 help="Ignore the consistency of the disks on"
757 " the secondary")
758
759 NONLIVE_OPT = cli_option("--non-live", dest="live",
760 default=True, action="store_false",
761 help="Do a non-live migration (this usually means"
762 " freeze the instance, save the state, transfer and"
763 " only then resume running on the secondary node)")
764
765 MIGRATION_MODE_OPT = cli_option("--migration-mode", dest="migration_mode",
766 default=None,
767 choices=list(constants.HT_MIGRATION_MODES),
768 help="Override default migration mode (choose"
769 " either live or non-live")
770
771 NODE_PLACEMENT_OPT = cli_option("-n", "--node", dest="node",
772 help="Target node and optional secondary node",
773 metavar="<pnode>[:<snode>]",
774 completion_suggest=OPT_COMPL_INST_ADD_NODES)
775
776 NODE_LIST_OPT = cli_option("-n", "--node", dest="nodes", default=[],
777 action="append", metavar="<node>",
778 help="Use only this node (can be used multiple"
779 " times, if not given defaults to all nodes)",
780 completion_suggest=OPT_COMPL_ONE_NODE)
781
782 NODEGROUP_OPT = cli_option("-g", "--node-group",
783 dest="nodegroup",
784 help="Node group (name or uuid)",
785 metavar="<nodegroup>",
786 default=None, type="string",
787 completion_suggest=OPT_COMPL_ONE_NODEGROUP)
788
789 SINGLE_NODE_OPT = cli_option("-n", "--node", dest="node", help="Target node",
790 metavar="<node>",
791 completion_suggest=OPT_COMPL_ONE_NODE)
792
793 NOSTART_OPT = cli_option("--no-start", dest="start", default=True,
794 action="store_false",
795 help="Don't start the instance after creation")
796
797 SHOWCMD_OPT = cli_option("--show-cmd", dest="show_command",
798 action="store_true", default=False,
799 help="Show command instead of executing it")
800
801 CLEANUP_OPT = cli_option("--cleanup", dest="cleanup",
802 default=False, action="store_true",
803 help="Instead of performing the migration, try to"
804 " recover from a failed cleanup. This is safe"
805 " to run even if the instance is healthy, but it"
806 " will create extra replication traffic and "
807 " disrupt briefly the replication (like during the"
808 " migration")
809
810 STATIC_OPT = cli_option("-s", "--static", dest="static",
811 action="store_true", default=False,
812 help="Only show configuration data, not runtime data")
813
814 ALL_OPT = cli_option("--all", dest="show_all",
815 default=False, action="store_true",
816 help="Show info on all instances on the cluster."
817 " This can take a long time to run, use wisely")
818
819 SELECT_OS_OPT = cli_option("--select-os", dest="select_os",
820 action="store_true", default=False,
821 help="Interactive OS reinstall, lists available"
822 " OS templates for selection")
823
824 IGNORE_FAILURES_OPT = cli_option("--ignore-failures", dest="ignore_failures",
825 action="store_true", default=False,
826 help="Remove the instance from the cluster"
827 " configuration even if there are failures"
828 " during the removal process")
829
830 IGNORE_REMOVE_FAILURES_OPT = cli_option("--ignore-remove-failures",
831 dest="ignore_remove_failures",
832 action="store_true", default=False,
833 help="Remove the instance from the"
834 " cluster configuration even if there"
835 " are failures during the removal"
836 " process")
837
838 REMOVE_INSTANCE_OPT = cli_option("--remove-instance", dest="remove_instance",
839 action="store_true", default=False,
840 help="Remove the instance from the cluster")
841
842 NEW_SECONDARY_OPT = cli_option("-n", "--new-secondary", dest="dst_node",
843 help="Specifies the new secondary node",
844 metavar="NODE", default=None,
845 completion_suggest=OPT_COMPL_ONE_NODE)
846
847 ON_PRIMARY_OPT = cli_option("-p", "--on-primary", dest="on_primary",
848 default=False, action="store_true",
849 help="Replace the disk(s) on the primary"
850 " node (only for the drbd template)")
851
852 ON_SECONDARY_OPT = cli_option("-s", "--on-secondary", dest="on_secondary",
853 default=False, action="store_true",
854 help="Replace the disk(s) on the secondary"
855 " node (only for the drbd template)")
856
857 AUTO_PROMOTE_OPT = cli_option("--auto-promote", dest="auto_promote",
858 default=False, action="store_true",
859 help="Lock all nodes and auto-promote as needed"
860 " to MC status")
861
862 AUTO_REPLACE_OPT = cli_option("-a", "--auto", dest="auto",
863 default=False, action="store_true",
864 help="Automatically replace faulty disks"
865 " (only for the drbd template)")
866
867 IGNORE_SIZE_OPT = cli_option("--ignore-size", dest="ignore_size",
868 default=False, action="store_true",
869 help="Ignore current recorded size"
870 " (useful for forcing activation when"
871 " the recorded size is wrong)")
872
873 SRC_NODE_OPT = cli_option("--src-node", dest="src_node", help="Source node",
874 metavar="<node>",
875 completion_suggest=OPT_COMPL_ONE_NODE)
876
877 SRC_DIR_OPT = cli_option("--src-dir", dest="src_dir", help="Source directory",
878 metavar="<dir>")
879
880 SECONDARY_IP_OPT = cli_option("-s", "--secondary-ip", dest="secondary_ip",
881 help="Specify the secondary ip for the node",
882 metavar="ADDRESS", default=None)
883
884 READD_OPT = cli_option("--readd", dest="readd",
885 default=False, action="store_true",
886 help="Readd old node after replacing it")
887
888 NOSSH_KEYCHECK_OPT = cli_option("--no-ssh-key-check", dest="ssh_key_check",
889 default=True, action="store_false",
890 help="Disable SSH key fingerprint checking")
891
892 NODE_FORCE_JOIN_OPT = cli_option("--force-join", dest="force_join",
893 default=False, action="store_true",
894 help="Force the joining of a node")
895
896 MC_OPT = cli_option("-C", "--master-candidate", dest="master_candidate",
897 type="bool", default=None, metavar=_YORNO,
898 help="Set the master_candidate flag on the node")
899
900 OFFLINE_OPT = cli_option("-O", "--offline", dest="offline", metavar=_YORNO,
901 type="bool", default=None,
902 help=("Set the offline flag on the node"
903 " (cluster does not communicate with offline"
904 " nodes)"))
905
906 DRAINED_OPT = cli_option("-D", "--drained", dest="drained", metavar=_YORNO,
907 type="bool", default=None,
908 help=("Set the drained flag on the node"
909 " (excluded from allocation operations)"))
910
911 CAPAB_MASTER_OPT = cli_option("--master-capable", dest="master_capable",
912 type="bool", default=None, metavar=_YORNO,
913 help="Set the master_capable flag on the node")
914
915 CAPAB_VM_OPT = cli_option("--vm-capable", dest="vm_capable",
916 type="bool", default=None, metavar=_YORNO,
917 help="Set the vm_capable flag on the node")
918
919 ALLOCATABLE_OPT = cli_option("--allocatable", dest="allocatable",
920 type="bool", default=None, metavar=_YORNO,
921 help="Set the allocatable flag on a volume")
922
923 NOLVM_STORAGE_OPT = cli_option("--no-lvm-storage", dest="lvm_storage",
924 help="Disable support for lvm based instances"
925 " (cluster-wide)",
926 action="store_false", default=True)
927
928 ENABLED_HV_OPT = cli_option("--enabled-hypervisors",
929 dest="enabled_hypervisors",
930 help="Comma-separated list of hypervisors",
931 type="string", default=None)
932
933 NIC_PARAMS_OPT = cli_option("-N", "--nic-parameters", dest="nicparams",
934 type="keyval", default={},
935 help="NIC parameters")
936
937 CP_SIZE_OPT = cli_option("-C", "--candidate-pool-size", default=None,
938 dest="candidate_pool_size", type="int",
939 help="Set the candidate pool size")
940
941 VG_NAME_OPT = cli_option("--vg-name", dest="vg_name",
942 help=("Enables LVM and specifies the volume group"
943 " name (cluster-wide) for disk allocation"
944 " [%s]" % constants.DEFAULT_VG),
945 metavar="VG", default=None)
946
947 YES_DOIT_OPT = cli_option("--yes-do-it", dest="yes_do_it",
948 help="Destroy cluster", action="store_true")
949
950 NOVOTING_OPT = cli_option("--no-voting", dest="no_voting",
951 help="Skip node agreement check (dangerous)",
952 action="store_true", default=False)
953
954 MAC_PREFIX_OPT = cli_option("-m", "--mac-prefix", dest="mac_prefix",
955 help="Specify the mac prefix for the instance IP"
956 " addresses, in the format XX:XX:XX",
957 metavar="PREFIX",
958 default=None)
959
960 MASTER_NETDEV_OPT = cli_option("--master-netdev", dest="master_netdev",
961 help="Specify the node interface (cluster-wide)"
962 " on which the master IP address will be added"
963 " (cluster init default: %s)" %
964 constants.DEFAULT_BRIDGE,
965 metavar="NETDEV",
966 default=None)
967
968 GLOBAL_FILEDIR_OPT = cli_option("--file-storage-dir", dest="file_storage_dir",
969 help="Specify the default directory (cluster-"
970 "wide) for storing the file-based disks [%s]" %
971 constants.DEFAULT_FILE_STORAGE_DIR,
972 metavar="DIR",
973 default=constants.DEFAULT_FILE_STORAGE_DIR)
974
975 NOMODIFY_ETCHOSTS_OPT = cli_option("--no-etc-hosts", dest="modify_etc_hosts",
976 help="Don't modify /etc/hosts",
977 action="store_false", default=True)
978
979 NOMODIFY_SSH_SETUP_OPT = cli_option("--no-ssh-init", dest="modify_ssh_setup",
980 help="Don't initialize SSH keys",
981 action="store_false", default=True)
982
983 ERROR_CODES_OPT = cli_option("--error-codes", dest="error_codes",
984 help="Enable parseable error messages",
985 action="store_true", default=False)
986
987 NONPLUS1_OPT = cli_option("--no-nplus1-mem", dest="skip_nplusone_mem",
988 help="Skip N+1 memory redundancy tests",
989 action="store_true", default=False)
990
991 REBOOT_TYPE_OPT = cli_option("-t", "--type", dest="reboot_type",
992 help="Type of reboot: soft/hard/full",
993 default=constants.INSTANCE_REBOOT_HARD,
994 metavar="<REBOOT>",
995 choices=list(constants.REBOOT_TYPES))
996
997 IGNORE_SECONDARIES_OPT = cli_option("--ignore-secondaries",
998 dest="ignore_secondaries",
999 default=False, action="store_true",
1000 help="Ignore errors from secondaries")
1001
1002 NOSHUTDOWN_OPT = cli_option("--noshutdown", dest="shutdown",
1003 action="store_false", default=True,
1004 help="Don't shutdown the instance (unsafe)")
1005
1006 TIMEOUT_OPT = cli_option("--timeout", dest="timeout", type="int",
1007 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1008 help="Maximum time to wait")
1009
1010 SHUTDOWN_TIMEOUT_OPT = cli_option("--shutdown-timeout",
1011 dest="shutdown_timeout", type="int",
1012 default=constants.DEFAULT_SHUTDOWN_TIMEOUT,
1013 help="Maximum time to wait for instance shutdown")
1014
1015 INTERVAL_OPT = cli_option("--interval", dest="interval", type="int",
1016 default=None,
1017 help=("Number of seconds between repetions of the"
1018 " command"))
1019
1020 EARLY_RELEASE_OPT = cli_option("--early-release",
1021 dest="early_release", default=False,
1022 action="store_true",
1023 help="Release the locks on the secondary"
1024 " node(s) early")
1025
1026 NEW_CLUSTER_CERT_OPT = cli_option("--new-cluster-certificate",
1027 dest="new_cluster_cert",
1028 default=False, action="store_true",
1029 help="Generate a new cluster certificate")
1030
1031 RAPI_CERT_OPT = cli_option("--rapi-certificate", dest="rapi_cert",
1032 default=None,
1033 help="File containing new RAPI certificate")
1034
1035 NEW_RAPI_CERT_OPT = cli_option("--new-rapi-certificate", dest="new_rapi_cert",
1036 default=None, action="store_true",
1037 help=("Generate a new self-signed RAPI"
1038 " certificate"))
1039
1040 NEW_CONFD_HMAC_KEY_OPT = cli_option("--new-confd-hmac-key",
1041 dest="new_confd_hmac_key",
1042 default=False, action="store_true",
1043 help=("Create a new HMAC key for %s" %
1044 constants.CONFD))
1045
1046 CLUSTER_DOMAIN_SECRET_OPT = cli_option("--cluster-domain-secret",
1047 dest="cluster_domain_secret",
1048 default=None,
1049 help=("Load new new cluster domain"
1050 " secret from file"))
1051
1052 NEW_CLUSTER_DOMAIN_SECRET_OPT = cli_option("--new-cluster-domain-secret",
1053 dest="new_cluster_domain_secret",
1054 default=False, action="store_true",
1055 help=("Create a new cluster domain"
1056 " secret"))
1057
1058 USE_REPL_NET_OPT = cli_option("--use-replication-network",
1059 dest="use_replication_network",
1060 help="Whether to use the replication network"
1061 " for talking to the nodes",
1062 action="store_true", default=False)
1063
1064 MAINTAIN_NODE_HEALTH_OPT = \
1065 cli_option("--maintain-node-health", dest="maintain_node_health",
1066 metavar=_YORNO, default=None, type="bool",
1067 help="Configure the cluster to automatically maintain node"
1068 " health, by shutting down unknown instances, shutting down"
1069 " unknown DRBD devices, etc.")
1070
1071 IDENTIFY_DEFAULTS_OPT = \
1072 cli_option("--identify-defaults", dest="identify_defaults",
1073 default=False, action="store_true",
1074 help="Identify which saved instance parameters are equal to"
1075 " the current cluster defaults and set them as such, instead"
1076 " of marking them as overridden")
1077
1078 UIDPOOL_OPT = cli_option("--uid-pool", default=None,
1079 action="store", dest="uid_pool",
1080 help=("A list of user-ids or user-id"
1081 " ranges separated by commas"))
1082
1083 ADD_UIDS_OPT = cli_option("--add-uids", default=None,
1084 action="store", dest="add_uids",
1085 help=("A list of user-ids or user-id"
1086 " ranges separated by commas, to be"
1087 " added to the user-id pool"))
1088
1089 REMOVE_UIDS_OPT = cli_option("--remove-uids", default=None,
1090 action="store", dest="remove_uids",
1091 help=("A list of user-ids or user-id"
1092 " ranges separated by commas, to be"
1093 " removed from the user-id pool"))
1094
1095 RESERVED_LVS_OPT = cli_option("--reserved-lvs", default=None,
1096 action="store", dest="reserved_lvs",
1097 help=("A comma-separated list of reserved"
1098 " logical volumes names, that will be"
1099 " ignored by cluster verify"))
1100
1101 ROMAN_OPT = cli_option("--roman",
1102 dest="roman_integers", default=False,
1103 action="store_true",
1104 help="Use roman numbers for positive integers")
1105
1106 DRBD_HELPER_OPT = cli_option("--drbd-usermode-helper", dest="drbd_helper",
1107 action="store", default=None,
1108 help="Specifies usermode helper for DRBD")
1109
1110 NODRBD_STORAGE_OPT = cli_option("--no-drbd-storage", dest="drbd_storage",
1111 action="store_false", default=True,
1112 help="Disable support for DRBD")
1113
1114 PRIMARY_IP_VERSION_OPT = \
1115 cli_option("--primary-ip-version", default=constants.IP4_VERSION,
1116 action="store", dest="primary_ip_version",
1117 metavar="%d|%d" % (constants.IP4_VERSION,
1118 constants.IP6_VERSION),
1119 help="Cluster-wide IP version for primary IP")
1120
1121 PRIORITY_OPT = cli_option("--priority", default=None, dest="priority",
1122 metavar="|".join(name for name, _ in _PRIORITY_NAMES),
1123 choices=_PRIONAME_TO_VALUE.keys(),
1124 help="Priority for opcode processing")
1125
1126 HID_OS_OPT = cli_option("--hidden", dest="hidden",
1127 type="bool", default=None, metavar=_YORNO,
1128 help="Sets the hidden flag on the OS")
1129
1130 BLK_OS_OPT = cli_option("--blacklisted", dest="blacklisted",
1131 type="bool", default=None, metavar=_YORNO,
1132 help="Sets the blacklisted flag on the OS")
1133
1134 PREALLOC_WIPE_DISKS_OPT = cli_option("--prealloc-wipe-disks", default=None,
1135 type="bool", metavar=_YORNO,
1136 dest="prealloc_wipe_disks",
1137 help=("Wipe disks prior to instance"
1138 " creation"))
1139
1140 NODE_PARAMS_OPT = cli_option("--node-parameters", dest="ndparams",
1141 type="keyval", default=None,
1142 help="Node parameters")
1143
1144 ALLOC_POLICY_OPT = cli_option("--alloc-policy", dest="alloc_policy",
1145 action="store", metavar="POLICY", default=None,
1146 help="Allocation policy for the node group")
1147
1148 NODE_POWERED_OPT = cli_option("--node-powered", default=None,
1149 type="bool", metavar=_YORNO,
1150 dest="node_powered",
1151 help="Specify if the SoR for node is powered")
1152
1153 NO_REMEMBER_OPT = cli_option("--no-remember",
1154 dest="no_remember",
1155 action="store_true", default=False,
1156 help="Perform but do not record the change"
1157 " in the configuration")
1158
1159
1160
1161 COMMON_OPTS = [DEBUG_OPT]
1162
1163
1164
1165 COMMON_CREATE_OPTS = [
1166 BACKEND_OPT,
1167 DISK_OPT,
1168 DISK_TEMPLATE_OPT,
1169 FILESTORE_DIR_OPT,
1170 FILESTORE_DRIVER_OPT,
1171 HYPERVISOR_OPT,
1172 IALLOCATOR_OPT,
1173 NET_OPT,
1174 NODE_PLACEMENT_OPT,
1175 NOIPCHECK_OPT,
1176 NONAMECHECK_OPT,
1177 NONICS_OPT,
1178 NWSYNC_OPT,
1179 OSPARAMS_OPT,
1180 OS_SIZE_OPT,
1181 SUBMIT_OPT,
1182 DRY_RUN_OPT,
1183 PRIORITY_OPT,
1184 ]
1185
1186
1188 """Parser for the command line arguments.
1189
1190 This function parses the arguments and returns the function which
1191 must be executed together with its (modified) arguments.
1192
1193 @param argv: the command line
1194 @param commands: dictionary with special contents, see the design
1195 doc for cmdline handling
1196 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1197
1198 """
1199 if len(argv) == 0:
1200 binary = "<command>"
1201 else:
1202 binary = argv[0].split("/")[-1]
1203
1204 if len(argv) > 1 and argv[1] == "--version":
1205 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1206 constants.RELEASE_VERSION)
1207
1208
1209 sys.exit(0)
1210
1211 if len(argv) < 2 or not (argv[1] in commands or
1212 argv[1] in aliases):
1213
1214 sortedcmds = commands.keys()
1215 sortedcmds.sort()
1216
1217 ToStdout("Usage: %s {command} [options...] [argument...]", binary)
1218 ToStdout("%s <command> --help to see details, or man %s", binary, binary)
1219 ToStdout("")
1220
1221
1222 mlen = max([len(" %s" % cmd) for cmd in commands])
1223 mlen = min(60, mlen)
1224
1225
1226 ToStdout("Commands:")
1227 for cmd in sortedcmds:
1228 cmdstr = " %s" % (cmd,)
1229 help_text = commands[cmd][4]
1230 help_lines = textwrap.wrap(help_text, 79 - 3 - mlen)
1231 ToStdout("%-*s - %s", mlen, cmdstr, help_lines.pop(0))
1232 for line in help_lines:
1233 ToStdout("%-*s %s", mlen, "", line)
1234
1235 ToStdout("")
1236
1237 return None, None, None
1238
1239
1240 cmd = argv.pop(1)
1241 if cmd in aliases:
1242 if cmd in commands:
1243 raise errors.ProgrammerError("Alias '%s' overrides an existing"
1244 " command" % cmd)
1245
1246 if aliases[cmd] not in commands:
1247 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
1248 " command '%s'" % (cmd, aliases[cmd]))
1249
1250 cmd = aliases[cmd]
1251
1252 func, args_def, parser_opts, usage, description = commands[cmd]
1253 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
1254 description=description,
1255 formatter=TitledHelpFormatter(),
1256 usage="%%prog %s %s" % (cmd, usage))
1257 parser.disable_interspersed_args()
1258 options, args = parser.parse_args()
1259
1260 if not _CheckArguments(cmd, args_def, args):
1261 return None, None, None
1262
1263 return func, options, args
1264
1265
1267 """Verifies the arguments using the argument definition.
1268
1269 Algorithm:
1270
1271 1. Abort with error if values specified by user but none expected.
1272
1273 1. For each argument in definition
1274
1275 1. Keep running count of minimum number of values (min_count)
1276 1. Keep running count of maximum number of values (max_count)
1277 1. If it has an unlimited number of values
1278
1279 1. Abort with error if it's not the last argument in the definition
1280
1281 1. If last argument has limited number of values
1282
1283 1. Abort with error if number of values doesn't match or is too large
1284
1285 1. Abort with error if user didn't pass enough values (min_count)
1286
1287 """
1288 if args and not args_def:
1289 ToStderr("Error: Command %s expects no arguments", cmd)
1290 return False
1291
1292 min_count = None
1293 max_count = None
1294 check_max = None
1295
1296 last_idx = len(args_def) - 1
1297
1298 for idx, arg in enumerate(args_def):
1299 if min_count is None:
1300 min_count = arg.min
1301 elif arg.min is not None:
1302 min_count += arg.min
1303
1304 if max_count is None:
1305 max_count = arg.max
1306 elif arg.max is not None:
1307 max_count += arg.max
1308
1309 if idx == last_idx:
1310 check_max = (arg.max is not None)
1311
1312 elif arg.max is None:
1313 raise errors.ProgrammerError("Only the last argument can have max=None")
1314
1315 if check_max:
1316
1317 if (min_count is not None and max_count is not None and
1318 min_count == max_count and len(args) != min_count):
1319 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
1320 return False
1321
1322
1323 if max_count is not None and len(args) > max_count:
1324 ToStderr("Error: Command %s expects only %d argument(s)",
1325 cmd, max_count)
1326 return False
1327
1328
1329 if min_count is not None and len(args) < min_count:
1330 ToStderr("Error: Command %s expects at least %d argument(s)",
1331 cmd, min_count)
1332 return False
1333
1334 return True
1335
1336
1338 """Splits the value of a --node option.
1339
1340 """
1341 if value and ':' in value:
1342 return value.split(':', 1)
1343 else:
1344 return (value, None)
1345
1346
1348 """Calculates all the names an OS can be called, according to its variants.
1349
1350 @type os_name: string
1351 @param os_name: base name of the os
1352 @type os_variants: list or None
1353 @param os_variants: list of supported variants
1354 @rtype: list
1355 @return: list of valid names
1356
1357 """
1358 if os_variants:
1359 return ['%s+%s' % (os_name, v) for v in os_variants]
1360 else:
1361 return [os_name]
1362
1363
1365 """Parses the values of "--field"-like options.
1366
1367 @type selected: string or None
1368 @param selected: User-selected options
1369 @type default: list
1370 @param default: Default fields
1371
1372 """
1373 if selected is None:
1374 return default
1375
1376 if selected.startswith("+"):
1377 return default + selected[1:].split(",")
1378
1379 return selected.split(",")
1380
1381
1382 UsesRPC = rpc.RunWithRPC
1383
1384
1386 """Ask the user a question.
1387
1388 @param text: the question to ask
1389
1390 @param choices: list with elements tuples (input_char, return_value,
1391 description); if not given, it will default to: [('y', True,
1392 'Perform the operation'), ('n', False, 'Do no do the operation')];
1393 note that the '?' char is reserved for help
1394
1395 @return: one of the return values from the choices list; if input is
1396 not possible (i.e. not running with a tty, we return the last
1397 entry from the list
1398
1399 """
1400 if choices is None:
1401 choices = [('y', True, 'Perform the operation'),
1402 ('n', False, 'Do not perform the operation')]
1403 if not choices or not isinstance(choices, list):
1404 raise errors.ProgrammerError("Invalid choices argument to AskUser")
1405 for entry in choices:
1406 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == '?':
1407 raise errors.ProgrammerError("Invalid choices element to AskUser")
1408
1409 answer = choices[-1][1]
1410 new_text = []
1411 for line in text.splitlines():
1412 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
1413 text = "\n".join(new_text)
1414 try:
1415 f = file("/dev/tty", "a+")
1416 except IOError:
1417 return answer
1418 try:
1419 chars = [entry[0] for entry in choices]
1420 chars[-1] = "[%s]" % chars[-1]
1421 chars.append('?')
1422 maps = dict([(entry[0], entry[1]) for entry in choices])
1423 while True:
1424 f.write(text)
1425 f.write('\n')
1426 f.write("/".join(chars))
1427 f.write(": ")
1428 line = f.readline(2).strip().lower()
1429 if line in maps:
1430 answer = maps[line]
1431 break
1432 elif line == '?':
1433 for entry in choices:
1434 f.write(" %s - %s\n" % (entry[0], entry[2]))
1435 f.write("\n")
1436 continue
1437 finally:
1438 f.close()
1439 return answer
1440
1441
1443 """Job was submitted, client should exit.
1444
1445 This exception has one argument, the ID of the job that was
1446 submitted. The handler should print this ID.
1447
1448 This is not an error, just a structured way to exit from clients.
1449
1450 """
1451
1452
1454 """Function to submit an opcode without waiting for the results.
1455
1456 @type ops: list
1457 @param ops: list of opcodes
1458 @type cl: luxi.Client
1459 @param cl: the luxi client to use for communicating with the master;
1460 if None, a new client will be created
1461
1462 """
1463 if cl is None:
1464 cl = GetClient()
1465
1466 job_id = cl.SubmitJob(ops)
1467
1468 return job_id
1469
1470
1472 """Generic job-polling function.
1473
1474 @type job_id: number
1475 @param job_id: Job ID
1476 @type cbs: Instance of L{JobPollCbBase}
1477 @param cbs: Data callbacks
1478 @type report_cbs: Instance of L{JobPollReportCbBase}
1479 @param report_cbs: Reporting callbacks
1480
1481 """
1482 prev_job_info = None
1483 prev_logmsg_serial = None
1484
1485 status = None
1486
1487 while True:
1488 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
1489 prev_logmsg_serial)
1490 if not result:
1491
1492 raise errors.JobLost("Job with id %s lost" % job_id)
1493
1494 if result == constants.JOB_NOTCHANGED:
1495 report_cbs.ReportNotChanged(job_id, status)
1496
1497
1498 continue
1499
1500
1501 (job_info, log_entries) = result
1502 (status, ) = job_info
1503
1504 if log_entries:
1505 for log_entry in log_entries:
1506 (serial, timestamp, log_type, message) = log_entry
1507 report_cbs.ReportLogMessage(job_id, serial, timestamp,
1508 log_type, message)
1509 prev_logmsg_serial = max(prev_logmsg_serial, serial)
1510
1511
1512 elif status in (constants.JOB_STATUS_SUCCESS,
1513 constants.JOB_STATUS_ERROR,
1514 constants.JOB_STATUS_CANCELING,
1515 constants.JOB_STATUS_CANCELED):
1516 break
1517
1518 prev_job_info = job_info
1519
1520 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
1521 if not jobs:
1522 raise errors.JobLost("Job with id %s lost" % job_id)
1523
1524 status, opstatus, result = jobs[0]
1525
1526 if status == constants.JOB_STATUS_SUCCESS:
1527 return result
1528
1529 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
1530 raise errors.OpExecError("Job was canceled")
1531
1532 has_ok = False
1533 for idx, (status, msg) in enumerate(zip(opstatus, result)):
1534 if status == constants.OP_STATUS_SUCCESS:
1535 has_ok = True
1536 elif status == constants.OP_STATUS_ERROR:
1537 errors.MaybeRaise(msg)
1538
1539 if has_ok:
1540 raise errors.OpExecError("partial failure (opcode %d): %s" %
1541 (idx, msg))
1542
1543 raise errors.OpExecError(str(msg))
1544
1545
1546 raise errors.OpExecError(result)
1547
1548
1550 """Base class for L{GenericPollJob} callbacks.
1551
1552 """
1554 """Initializes this class.
1555
1556 """
1557
1560 """Waits for changes on a job.
1561
1562 """
1563 raise NotImplementedError()
1564
1566 """Returns the selected fields for the selected job IDs.
1567
1568 @type job_ids: list of numbers
1569 @param job_ids: Job IDs
1570 @type fields: list of strings
1571 @param fields: Fields
1572
1573 """
1574 raise NotImplementedError()
1575
1576
1578 """Base class for L{GenericPollJob} reporting callbacks.
1579
1580 """
1582 """Initializes this class.
1583
1584 """
1585
1587 """Handles a log message.
1588
1589 """
1590 raise NotImplementedError()
1591
1593 """Called for if a job hasn't changed in a while.
1594
1595 @type job_id: number
1596 @param job_id: Job ID
1597 @type status: string or None
1598 @param status: Job status if available
1599
1600 """
1601 raise NotImplementedError()
1602
1603
1611
1614 """Waits for changes on a job.
1615
1616 """
1617 return self.cl.WaitForJobChangeOnce(job_id, fields,
1618 prev_job_info, prev_log_serial)
1619
1621 """Returns the selected fields for the selected job IDs.
1622
1623 """
1624 return self.cl.QueryJobs(job_ids, fields)
1625
1626
1629 """Initializes this class.
1630
1631 """
1632 JobPollReportCbBase.__init__(self)
1633
1634 self.feedback_fn = feedback_fn
1635
1636 assert callable(feedback_fn)
1637
1639 """Handles a log message.
1640
1641 """
1642 self.feedback_fn((timestamp, log_type, log_msg))
1643
1645 """Called if a job hasn't changed in a while.
1646
1647 """
1648
1649
1650
1653 """Initializes this class.
1654
1655 """
1656 JobPollReportCbBase.__init__(self)
1657
1658 self.notified_queued = False
1659 self.notified_waitlock = False
1660
1667
1669 """Called if a job hasn't changed in a while.
1670
1671 """
1672 if status is None:
1673 return
1674
1675 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
1676 ToStderr("Job %s is waiting in queue", job_id)
1677 self.notified_queued = True
1678
1679 elif status == constants.JOB_STATUS_WAITLOCK and not self.notified_waitlock:
1680 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
1681 self.notified_waitlock = True
1682
1683
1692
1693
1694 -def PollJob(job_id, cl=None, feedback_fn=None, reporter=None):
1695 """Function to poll for the result of a job.
1696
1697 @type job_id: job identified
1698 @param job_id: the job to poll for results
1699 @type cl: luxi.Client
1700 @param cl: the luxi client to use for communicating with the master;
1701 if None, a new client will be created
1702
1703 """
1704 if cl is None:
1705 cl = GetClient()
1706
1707 if reporter is None:
1708 if feedback_fn:
1709 reporter = FeedbackFnJobPollReportCb(feedback_fn)
1710 else:
1711 reporter = StdioJobPollReportCb()
1712 elif feedback_fn:
1713 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1714
1715 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter)
1716
1717
1718 -def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1719 """Legacy function to submit an opcode.
1720
1721 This is just a simple wrapper over the construction of the processor
1722 instance. It should be extended to better handle feedback and
1723 interaction functions.
1724
1725 """
1726 if cl is None:
1727 cl = GetClient()
1728
1729 SetGenericOpcodeOpts([op], opts)
1730
1731 job_id = SendJob([op], cl=cl)
1732
1733 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1734 reporter=reporter)
1735
1736 return op_results[0]
1737
1738
1740 """Wrapper around SubmitOpCode or SendJob.
1741
1742 This function will decide, based on the 'opts' parameter, whether to
1743 submit and wait for the result of the opcode (and return it), or
1744 whether to just send the job and print its identifier. It is used in
1745 order to simplify the implementation of the '--submit' option.
1746
1747 It will also process the opcodes if we're sending the via SendJob
1748 (otherwise SubmitOpCode does it).
1749
1750 """
1751 if opts and opts.submit_only:
1752 job = [op]
1753 SetGenericOpcodeOpts(job, opts)
1754 job_id = SendJob(job, cl=cl)
1755 raise JobSubmittedException(job_id)
1756 else:
1757 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1758
1759
1761 """Processor for generic options.
1762
1763 This function updates the given opcodes based on generic command
1764 line options (like debug, dry-run, etc.).
1765
1766 @param opcode_list: list of opcodes
1767 @param options: command line options or None
1768 @return: None (in-place modification)
1769
1770 """
1771 if not options:
1772 return
1773 for op in opcode_list:
1774 op.debug_level = options.debug
1775 if hasattr(options, "dry_run"):
1776 op.dry_run = options.dry_run
1777 if getattr(options, "priority", None) is not None:
1778 op.priority = _PRIONAME_TO_VALUE[options.priority]
1779
1780
1802
1803
1887
1888
1889 -def GenericMain(commands, override=None, aliases=None):
1890 """Generic main function for all the gnt-* commands.
1891
1892 Arguments:
1893 - commands: a dictionary with a special structure, see the design doc
1894 for command line handling.
1895 - override: if not None, we expect a dictionary with keys that will
1896 override command line options; this can be used to pass
1897 options from the scripts to generic functions
1898 - aliases: dictionary with command aliases {'alias': 'target, ...}
1899
1900 """
1901
1902 if sys.argv:
1903 binary = os.path.basename(sys.argv[0]) or sys.argv[0]
1904 if len(sys.argv) >= 2:
1905 binary += " " + sys.argv[1]
1906 old_cmdline = " ".join(sys.argv[2:])
1907 else:
1908 old_cmdline = ""
1909 else:
1910 binary = "<unknown program>"
1911 old_cmdline = ""
1912
1913 if aliases is None:
1914 aliases = {}
1915
1916 try:
1917 func, options, args = _ParseArgs(sys.argv, commands, aliases)
1918 except errors.ParameterError, err:
1919 result, err_msg = FormatError(err)
1920 ToStderr(err_msg)
1921 return 1
1922
1923 if func is None:
1924 return 1
1925
1926 if override is not None:
1927 for key, val in override.iteritems():
1928 setattr(options, key, val)
1929
1930 utils.SetupLogging(constants.LOG_COMMANDS, binary, debug=options.debug,
1931 stderr_logging=True)
1932
1933 if old_cmdline:
1934 logging.info("run with arguments '%s'", old_cmdline)
1935 else:
1936 logging.info("run with no arguments")
1937
1938 try:
1939 result = func(options, args)
1940 except (errors.GenericError, luxi.ProtocolError,
1941 JobSubmittedException), err:
1942 result, err_msg = FormatError(err)
1943 logging.exception("Error during command processing")
1944 ToStderr(err_msg)
1945 except KeyboardInterrupt:
1946 result = constants.EXIT_FAILURE
1947 ToStderr("Aborted. Note that if the operation created any jobs, they"
1948 " might have been submitted and"
1949 " will continue to run in the background.")
1950 except IOError, err:
1951 if err.errno == errno.EPIPE:
1952
1953 sys.exit(constants.EXIT_FAILURE)
1954 else:
1955 raise
1956
1957 return result
1958
1959
1961 """Parses the value of the --net option(s).
1962
1963 """
1964 try:
1965 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1966 except (TypeError, ValueError), err:
1967 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err))
1968
1969 nics = [{}] * nic_max
1970 for nidx, ndict in optvalue:
1971 nidx = int(nidx)
1972
1973 if not isinstance(ndict, dict):
1974 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1975 " got %s" % (nidx, ndict))
1976
1977 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1978
1979 nics[nidx] = ndict
1980
1981 return nics
1982
1983
1985 """Add an instance to the cluster via either creation or import.
1986
1987 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1988 @param opts: the command line options selected by the user
1989 @type args: list
1990 @param args: should contain only one element, the new instance name
1991 @rtype: int
1992 @return: the desired exit code
1993
1994 """
1995 instance = args[0]
1996
1997 (pnode, snode) = SplitNodeOption(opts.node)
1998
1999 hypervisor = None
2000 hvparams = {}
2001 if opts.hypervisor:
2002 hypervisor, hvparams = opts.hypervisor
2003
2004 if opts.nics:
2005 nics = ParseNicOption(opts.nics)
2006 elif opts.no_nics:
2007
2008 nics = []
2009 elif mode == constants.INSTANCE_CREATE:
2010
2011 nics = [{}]
2012 else:
2013
2014 nics = []
2015
2016 if opts.disk_template == constants.DT_DISKLESS:
2017 if opts.disks or opts.sd_size is not None:
2018 raise errors.OpPrereqError("Diskless instance but disk"
2019 " information passed")
2020 disks = []
2021 else:
2022 if (not opts.disks and not opts.sd_size
2023 and mode == constants.INSTANCE_CREATE):
2024 raise errors.OpPrereqError("No disk information specified")
2025 if opts.disks and opts.sd_size is not None:
2026 raise errors.OpPrereqError("Please use either the '--disk' or"
2027 " '-s' option")
2028 if opts.sd_size is not None:
2029 opts.disks = [(0, {"size": opts.sd_size})]
2030
2031 if opts.disks:
2032 try:
2033 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
2034 except ValueError, err:
2035 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err))
2036 disks = [{}] * disk_max
2037 else:
2038 disks = []
2039 for didx, ddict in opts.disks:
2040 didx = int(didx)
2041 if not isinstance(ddict, dict):
2042 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
2043 raise errors.OpPrereqError(msg)
2044 elif "size" in ddict:
2045 if "adopt" in ddict:
2046 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
2047 " (disk %d)" % didx)
2048 try:
2049 ddict["size"] = utils.ParseUnit(ddict["size"])
2050 except ValueError, err:
2051 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
2052 (didx, err))
2053 elif "adopt" in ddict:
2054 if mode == constants.INSTANCE_IMPORT:
2055 raise errors.OpPrereqError("Disk adoption not allowed for instance"
2056 " import")
2057 ddict["size"] = 0
2058 else:
2059 raise errors.OpPrereqError("Missing size or adoption source for"
2060 " disk %d" % didx)
2061 disks[didx] = ddict
2062
2063 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_TYPES)
2064 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
2065
2066 if mode == constants.INSTANCE_CREATE:
2067 start = opts.start
2068 os_type = opts.os
2069 force_variant = opts.force_variant
2070 src_node = None
2071 src_path = None
2072 no_install = opts.no_install
2073 identify_defaults = False
2074 elif mode == constants.INSTANCE_IMPORT:
2075 start = False
2076 os_type = None
2077 force_variant = False
2078 src_node = opts.src_node
2079 src_path = opts.src_dir
2080 no_install = None
2081 identify_defaults = opts.identify_defaults
2082 else:
2083 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
2084
2085 op = opcodes.OpInstanceCreate(instance_name=instance,
2086 disks=disks,
2087 disk_template=opts.disk_template,
2088 nics=nics,
2089 pnode=pnode, snode=snode,
2090 ip_check=opts.ip_check,
2091 name_check=opts.name_check,
2092 wait_for_sync=opts.wait_for_sync,
2093 file_storage_dir=opts.file_storage_dir,
2094 file_driver=opts.file_driver,
2095 iallocator=opts.iallocator,
2096 hypervisor=hypervisor,
2097 hvparams=hvparams,
2098 beparams=opts.beparams,
2099 osparams=opts.osparams,
2100 mode=mode,
2101 start=start,
2102 os_type=os_type,
2103 force_variant=force_variant,
2104 src_node=src_node,
2105 src_path=src_path,
2106 no_install=no_install,
2107 identify_defaults=identify_defaults)
2108
2109 SubmitOrSend(op, opts)
2110 return 0
2111
2112
2114 """Helper class for L{RunWhileClusterStopped} to simplify state management
2115
2116 """
2117 - def __init__(self, feedback_fn, cluster_name, master_node, online_nodes):
2118 """Initializes this class.
2119
2120 @type feedback_fn: callable
2121 @param feedback_fn: Feedback function
2122 @type cluster_name: string
2123 @param cluster_name: Cluster name
2124 @type master_node: string
2125 @param master_node Master node name
2126 @type online_nodes: list
2127 @param online_nodes: List of names of online nodes
2128
2129 """
2130 self.feedback_fn = feedback_fn
2131 self.cluster_name = cluster_name
2132 self.master_node = master_node
2133 self.online_nodes = online_nodes
2134
2135 self.ssh = ssh.SshRunner(self.cluster_name)
2136
2137 self.nonmaster_nodes = [name for name in online_nodes
2138 if name != master_node]
2139
2140 assert self.master_node not in self.nonmaster_nodes
2141
2142 - def _RunCmd(self, node_name, cmd):
2143 """Runs a command on the local or a remote machine.
2144
2145 @type node_name: string
2146 @param node_name: Machine name
2147 @type cmd: list
2148 @param cmd: Command
2149
2150 """
2151 if node_name is None or node_name == self.master_node:
2152
2153 result = utils.RunCmd(cmd)
2154 else:
2155 result = self.ssh.Run(node_name, "root", utils.ShellQuoteArgs(cmd))
2156
2157 if result.failed:
2158 errmsg = ["Failed to run command %s" % result.cmd]
2159 if node_name:
2160 errmsg.append("on node %s" % node_name)
2161 errmsg.append(": exitcode %s and error %s" %
2162 (result.exit_code, result.output))
2163 raise errors.OpExecError(" ".join(errmsg))
2164
2165 - def Call(self, fn, *args):
2166 """Call function while all daemons are stopped.
2167
2168 @type fn: callable
2169 @param fn: Function to be called
2170
2171 """
2172
2173 self.feedback_fn("Blocking watcher")
2174 watcher_block = utils.FileLock.Open(constants.WATCHER_STATEFILE)
2175 try:
2176
2177
2178 watcher_block.Exclusive(blocking=True)
2179
2180
2181
2182 self.feedback_fn("Stopping master daemons")
2183 self._RunCmd(None, [constants.DAEMON_UTIL, "stop-master"])
2184 try:
2185
2186 for node_name in self.online_nodes:
2187 self.feedback_fn("Stopping daemons on %s" % node_name)
2188 self._RunCmd(node_name, [constants.DAEMON_UTIL, "stop-all"])
2189
2190
2191 try:
2192 return fn(self, *args)
2193 except Exception, err:
2194 _, errmsg = FormatError(err)
2195 logging.exception("Caught exception")
2196 self.feedback_fn(errmsg)
2197 raise
2198 finally:
2199
2200 for node_name in self.nonmaster_nodes + [self.master_node]:
2201 self.feedback_fn("Starting daemons on %s" % node_name)
2202 self._RunCmd(node_name, [constants.DAEMON_UTIL, "start-all"])
2203 finally:
2204
2205 watcher_block.Close()
2206
2207
2209 """Calls a function while all cluster daemons are stopped.
2210
2211 @type feedback_fn: callable
2212 @param feedback_fn: Feedback function
2213 @type fn: callable
2214 @param fn: Function to be called when daemons are stopped
2215
2216 """
2217 feedback_fn("Gathering cluster information")
2218
2219
2220 cl = GetClient()
2221
2222 (cluster_name, master_node) = \
2223 cl.QueryConfigValues(["cluster_name", "master_node"])
2224
2225 online_nodes = GetOnlineNodes([], cl=cl)
2226
2227
2228 del cl
2229
2230 assert master_node in online_nodes
2231
2232 return _RunWhileClusterStoppedHelper(feedback_fn, cluster_name, master_node,
2233 online_nodes).Call(fn, *args)
2234
2235
2236 -def GenerateTable(headers, fields, separator, data,
2237 numfields=None, unitfields=None,
2238 units=None):
2239 """Prints a table with headers and different fields.
2240
2241 @type headers: dict
2242 @param headers: dictionary mapping field names to headers for
2243 the table
2244 @type fields: list
2245 @param fields: the field names corresponding to each row in
2246 the data field
2247 @param separator: the separator to be used; if this is None,
2248 the default 'smart' algorithm is used which computes optimal
2249 field width, otherwise just the separator is used between
2250 each field
2251 @type data: list
2252 @param data: a list of lists, each sublist being one row to be output
2253 @type numfields: list
2254 @param numfields: a list with the fields that hold numeric
2255 values and thus should be right-aligned
2256 @type unitfields: list
2257 @param unitfields: a list with the fields that hold numeric
2258 values that should be formatted with the units field
2259 @type units: string or None
2260 @param units: the units we should use for formatting, or None for
2261 automatic choice (human-readable for non-separator usage, otherwise
2262 megabytes); this is a one-letter string
2263
2264 """
2265 if units is None:
2266 if separator:
2267 units = "m"
2268 else:
2269 units = "h"
2270
2271 if numfields is None:
2272 numfields = []
2273 if unitfields is None:
2274 unitfields = []
2275
2276 numfields = utils.FieldSet(*numfields)
2277 unitfields = utils.FieldSet(*unitfields)
2278
2279 format_fields = []
2280 for field in fields:
2281 if headers and field not in headers:
2282
2283
2284
2285 headers[field] = field
2286 if separator is not None:
2287 format_fields.append("%s")
2288 elif numfields.Matches(field):
2289 format_fields.append("%*s")
2290 else:
2291 format_fields.append("%-*s")
2292
2293 if separator is None:
2294 mlens = [0 for name in fields]
2295 format_str = ' '.join(format_fields)
2296 else:
2297 format_str = separator.replace("%", "%%").join(format_fields)
2298
2299 for row in data:
2300 if row is None:
2301 continue
2302 for idx, val in enumerate(row):
2303 if unitfields.Matches(fields[idx]):
2304 try:
2305 val = int(val)
2306 except (TypeError, ValueError):
2307 pass
2308 else:
2309 val = row[idx] = utils.FormatUnit(val, units)
2310 val = row[idx] = str(val)
2311 if separator is None:
2312 mlens[idx] = max(mlens[idx], len(val))
2313
2314 result = []
2315 if headers:
2316 args = []
2317 for idx, name in enumerate(fields):
2318 hdr = headers[name]
2319 if separator is None:
2320 mlens[idx] = max(mlens[idx], len(hdr))
2321 args.append(mlens[idx])
2322 args.append(hdr)
2323 result.append(format_str % tuple(args))
2324
2325 if separator is None:
2326 assert len(mlens) == len(fields)
2327
2328 if fields and not numfields.Matches(fields[-1]):
2329 mlens[-1] = 0
2330
2331 for line in data:
2332 args = []
2333 if line is None:
2334 line = ['-' for _ in fields]
2335 for idx in range(len(fields)):
2336 if separator is None:
2337 args.append(mlens[idx])
2338 args.append(line[idx])
2339 result.append(format_str % tuple(args))
2340
2341 return result
2342
2343
2351
2352
2353
2354 _DEFAULT_FORMAT_QUERY = {
2355 constants.QFT_TEXT: (str, False),
2356 constants.QFT_BOOL: (_FormatBool, False),
2357 constants.QFT_NUMBER: (str, True),
2358 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
2359 constants.QFT_OTHER: (str, False),
2360 constants.QFT_UNKNOWN: (str, False),
2361 }
2362
2363
2393
2394
2436
2437
2472
2473 columns = []
2474 for fdef in result.fields:
2475 assert fdef.title and fdef.name
2476 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
2477 columns.append(TableColumn(fdef.title,
2478 _QueryColumnFormatter(fn, _RecordStatus,
2479 verbose),
2480 align_right))
2481
2482 table = FormatTable(result.data, columns, header, separator)
2483
2484
2485 assert len(stats) == len(constants.RS_ALL)
2486 assert compat.all(count >= 0 for count in stats.values())
2487
2488
2489
2490 if (stats[constants.RS_UNKNOWN] or
2491 (not result.data and _GetUnknownFields(result.fields))):
2492 status = QR_UNKNOWN
2493 elif compat.any(count > 0 for key, count in stats.items()
2494 if key != constants.RS_NORMAL):
2495 status = QR_INCOMPLETE
2496 else:
2497 status = QR_NORMAL
2498
2499 return (status, table)
2500
2501
2503 """Returns list of unknown fields included in C{fdefs}.
2504
2505 @type fdefs: list of L{objects.QueryFieldDefinition}
2506
2507 """
2508 return [fdef for fdef in fdefs
2509 if fdef.kind == constants.QFT_UNKNOWN]
2510
2511
2513 """Prints a warning to stderr if a query included unknown fields.
2514
2515 @type fdefs: list of L{objects.QueryFieldDefinition}
2516
2517 """
2518 unknown = _GetUnknownFields(fdefs)
2519 if unknown:
2520 ToStderr("Warning: Queried for unknown fields %s",
2521 utils.CommaJoin(fdef.name for fdef in unknown))
2522 return True
2523
2524 return False
2525
2526
2527 -def GenericList(resource, fields, names, unit, separator, header, cl=None,
2528 format_override=None, verbose=False):
2529 """Generic implementation for listing all items of a resource.
2530
2531 @param resource: One of L{constants.QR_OP_LUXI}
2532 @type fields: list of strings
2533 @param fields: List of fields to query for
2534 @type names: list of strings
2535 @param names: Names of items to query for
2536 @type unit: string or None
2537 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2538 None for automatic choice (human-readable for non-separator usage,
2539 otherwise megabytes); this is a one-letter string
2540 @type separator: string or None
2541 @param separator: String used to separate fields
2542 @type header: bool
2543 @param header: Whether to show header row
2544 @type format_override: dict
2545 @param format_override: Dictionary for overriding field formatting functions,
2546 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2547 @type verbose: boolean
2548 @param verbose: whether to use verbose field descriptions or not
2549
2550 """
2551 if cl is None:
2552 cl = GetClient()
2553
2554 if not names:
2555 names = None
2556
2557 response = cl.Query(resource, fields, qlang.MakeSimpleFilter("name", names))
2558
2559 found_unknown = _WarnUnknownFields(response.fields)
2560
2561 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2562 header=header,
2563 format_override=format_override,
2564 verbose=verbose)
2565
2566 for line in data:
2567 ToStdout(line)
2568
2569 assert ((found_unknown and status == QR_UNKNOWN) or
2570 (not found_unknown and status != QR_UNKNOWN))
2571
2572 if status == QR_UNKNOWN:
2573 return constants.EXIT_UNKNOWN_FIELD
2574
2575
2576 return constants.EXIT_SUCCESS
2577
2578
2580 """Generic implementation for listing fields for a resource.
2581
2582 @param resource: One of L{constants.QR_OP_LUXI}
2583 @type fields: list of strings
2584 @param fields: List of fields to query for
2585 @type separator: string or None
2586 @param separator: String used to separate fields
2587 @type header: bool
2588 @param header: Whether to show header row
2589
2590 """
2591 if cl is None:
2592 cl = GetClient()
2593
2594 if not fields:
2595 fields = None
2596
2597 response = cl.QueryFields(resource, fields)
2598
2599 found_unknown = _WarnUnknownFields(response.fields)
2600
2601 columns = [
2602 TableColumn("Name", str, False),
2603 TableColumn("Title", str, False),
2604
2605 ]
2606
2607 rows = [[fdef.name, fdef.title] for fdef in response.fields]
2608
2609 for line in FormatTable(rows, columns, header, separator):
2610 ToStdout(line)
2611
2612 if found_unknown:
2613 return constants.EXIT_UNKNOWN_FIELD
2614
2615 return constants.EXIT_SUCCESS
2616
2617
2619 """Describes a column for L{FormatTable}.
2620
2621 """
2622 - def __init__(self, title, fn, align_right):
2623 """Initializes this class.
2624
2625 @type title: string
2626 @param title: Column title
2627 @type fn: callable
2628 @param fn: Formatting function
2629 @type align_right: bool
2630 @param align_right: Whether to align values on the right-hand side
2631
2632 """
2633 self.title = title
2634 self.format = fn
2635 self.align_right = align_right
2636
2637
2648
2649
2697
2698
2713
2714
2716 """Parse a time specification.
2717
2718 The following suffixed will be recognized:
2719
2720 - s: seconds
2721 - m: minutes
2722 - h: hours
2723 - d: day
2724 - w: weeks
2725
2726 Without any suffix, the value will be taken to be in seconds.
2727
2728 """
2729 value = str(value)
2730 if not value:
2731 raise errors.OpPrereqError("Empty time specification passed")
2732 suffix_map = {
2733 's': 1,
2734 'm': 60,
2735 'h': 3600,
2736 'd': 86400,
2737 'w': 604800,
2738 }
2739 if value[-1] not in suffix_map:
2740 try:
2741 value = int(value)
2742 except (TypeError, ValueError):
2743 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2744 else:
2745 multiplier = suffix_map[value[-1]]
2746 value = value[:-1]
2747 if not value:
2748 raise errors.OpPrereqError("Invalid time specification (only"
2749 " suffix passed)")
2750 try:
2751 value = int(value) * multiplier
2752 except (TypeError, ValueError):
2753 raise errors.OpPrereqError("Invalid time specification '%s'" % value)
2754 return value
2755
2756
2757 -def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2758 filter_master=False):
2759 """Returns the names of online nodes.
2760
2761 This function will also log a warning on stderr with the names of
2762 the online nodes.
2763
2764 @param nodes: if not empty, use only this subset of nodes (minus the
2765 offline ones)
2766 @param cl: if not None, luxi client to use
2767 @type nowarn: boolean
2768 @param nowarn: by default, this function will output a note with the
2769 offline nodes that are skipped; if this parameter is True the
2770 note is not displayed
2771 @type secondary_ips: boolean
2772 @param secondary_ips: if True, return the secondary IPs instead of the
2773 names, useful for doing network traffic over the replication interface
2774 (if any)
2775 @type filter_master: boolean
2776 @param filter_master: if True, do not return the master node in the list
2777 (useful in coordination with secondary_ips where we cannot check our
2778 node name against the list)
2779
2780 """
2781 if cl is None:
2782 cl = GetClient()
2783
2784 if secondary_ips:
2785 name_idx = 2
2786 else:
2787 name_idx = 0
2788
2789 if filter_master:
2790 master_node = cl.QueryConfigValues(["master_node"])[0]
2791 filter_fn = lambda x: x != master_node
2792 else:
2793 filter_fn = lambda _: True
2794
2795 result = cl.QueryNodes(names=nodes, fields=["name", "offline", "sip"],
2796 use_locking=False)
2797 offline = [row[0] for row in result if row[1]]
2798 if offline and not nowarn:
2799 ToStderr("Note: skipping offline node(s): %s" % utils.CommaJoin(offline))
2800 return [row[name_idx] for row in result if not row[1] and filter_fn(row[0])]
2801
2802
2804 """Write a message to a stream, bypassing the logging system
2805
2806 @type stream: file object
2807 @param stream: the file to which we should write
2808 @type txt: str
2809 @param txt: the message
2810
2811 """
2812 try:
2813 if args:
2814 args = tuple(args)
2815 stream.write(txt % args)
2816 else:
2817 stream.write(txt)
2818 stream.write('\n')
2819 stream.flush()
2820 except IOError, err:
2821 if err.errno == errno.EPIPE:
2822
2823 sys.exit(constants.EXIT_FAILURE)
2824 else:
2825 raise
2826
2827
2829 """Write a message to stdout only, bypassing the logging system
2830
2831 This is just a wrapper over _ToStream.
2832
2833 @type txt: str
2834 @param txt: the message
2835
2836 """
2837 _ToStream(sys.stdout, txt, *args)
2838
2839
2841 """Write a message to stderr only, bypassing the logging system
2842
2843 This is just a wrapper over _ToStream.
2844
2845 @type txt: str
2846 @param txt: the message
2847
2848 """
2849 _ToStream(sys.stderr, txt, *args)
2850
2851
2853 """Class which manages the submission and execution of multiple jobs.
2854
2855 Note that instances of this class should not be reused between
2856 GetResults() calls.
2857
2858 """
2859 - def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2860 self.queue = []
2861 if cl is None:
2862 cl = GetClient()
2863 self.cl = cl
2864 self.verbose = verbose
2865 self.jobs = []
2866 self.opts = opts
2867 self.feedback_fn = feedback_fn
2868
2870 """Record a job for later submit.
2871
2872 @type name: string
2873 @param name: a description of the job, will be used in WaitJobSet
2874 """
2875 SetGenericOpcodeOpts(ops, self.opts)
2876 self.queue.append((name, ops))
2877
2879 """Submit all pending jobs.
2880
2881 """
2882 if each:
2883 results = []
2884 for row in self.queue:
2885
2886
2887 results.append([True, self.cl.SubmitJob(row[1])])
2888 else:
2889 results = self.cl.SubmitManyJobs([row[1] for row in self.queue])
2890 for (idx, ((status, data), (name, _))) in enumerate(zip(results,
2891 self.queue)):
2892 self.jobs.append((idx, status, data, name))
2893
2895 """Choose a non-waiting/queued job to poll next.
2896
2897 """
2898 assert self.jobs, "_ChooseJob called with empty job list"
2899
2900 result = self.cl.QueryJobs([i[2] for i in self.jobs], ["status"])
2901 assert result
2902
2903 for job_data, status in zip(self.jobs, result):
2904 if (isinstance(status, list) and status and
2905 status[0] in (constants.JOB_STATUS_QUEUED,
2906 constants.JOB_STATUS_WAITLOCK,
2907 constants.JOB_STATUS_CANCELING)):
2908
2909 continue
2910
2911 self.jobs.remove(job_data)
2912 return job_data
2913
2914
2915 return self.jobs.pop(0)
2916
2918 """Wait for and return the results of all jobs.
2919
2920 @rtype: list
2921 @return: list of tuples (success, job results), in the same order
2922 as the submitted jobs; if a job has failed, instead of the result
2923 there will be the error message
2924
2925 """
2926 if not self.jobs:
2927 self.SubmitPending()
2928 results = []
2929 if self.verbose:
2930 ok_jobs = [row[2] for row in self.jobs if row[1]]
2931 if ok_jobs:
2932 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2933
2934
2935 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2936 for idx, _, jid, name in failures:
2937 ToStderr("Failed to submit job for %s: %s", name, jid)
2938 results.append((idx, False, jid))
2939
2940 while self.jobs:
2941 (idx, _, jid, name) = self._ChooseJob()
2942 ToStdout("Waiting for job %s for %s...", jid, name)
2943 try:
2944 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2945 success = True
2946 except errors.JobLost, err:
2947 _, job_result = FormatError(err)
2948 ToStderr("Job %s for %s has been archived, cannot check its result",
2949 jid, name)
2950 success = False
2951 except (errors.GenericError, luxi.ProtocolError), err:
2952 _, job_result = FormatError(err)
2953 success = False
2954
2955 ToStderr("Job %s for %s has failed: %s", jid, name, job_result)
2956
2957 results.append((idx, success, job_result))
2958
2959
2960 results.sort()
2961 results = [i[1:] for i in results]
2962
2963 return results
2964
2966 """Wait for job results or only print the job IDs.
2967
2968 @type wait: boolean
2969 @param wait: whether to wait or not
2970
2971 """
2972 if wait:
2973 return self.GetResults()
2974 else:
2975 if not self.jobs:
2976 self.SubmitPending()
2977 for _, status, result, name in self.jobs:
2978 if status:
2979 ToStdout("%s: %s", result, name)
2980 else:
2981 ToStderr("Failure for %s: %s", name, result)
2982 return [row[1:3] for row in self.jobs]
2983
2984
3001