1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Module dealing with command line parsing"""
32
33
34 import sys
35 import textwrap
36 import os.path
37 import time
38 import logging
39 import errno
40 import itertools
41 import shlex
42 from cStringIO import StringIO
43
44 from ganeti import utils
45 from ganeti import errors
46 from ganeti import constants
47 from ganeti import opcodes
48 import ganeti.rpc.errors as rpcerr
49 import ganeti.rpc.node as rpc
50 from ganeti import ssh
51 from ganeti import compat
52 from ganeti import netutils
53 from ganeti import qlang
54 from ganeti import objects
55 from ganeti import pathutils
56 from ganeti import serializer
57 import ganeti.cli_opts
58
59 from ganeti.cli_opts import *
60
61 from ganeti.runtime import (GetClient)
62
63 from optparse import (OptionParser, TitledHelpFormatter)
64
65
66 __all__ = [
67
68 "ConfirmOperation",
69 "CreateIPolicyFromOpts",
70 "GenericMain",
71 "GenericInstanceCreate",
72 "GenericList",
73 "GenericListFields",
74 "GetClient",
75 "GetOnlineNodes",
76 "GetNodesSshPorts",
77 "GetNodeUUIDs",
78 "JobExecutor",
79 "JobSubmittedException",
80 "ParseTimespec",
81 "RunWhileClusterStopped",
82 "RunWhileDaemonsStopped",
83 "SubmitOpCode",
84 "SubmitOpCodeToDrainedQueue",
85 "SubmitOrSend",
86 "UsesRPC",
87
88 "ToStderr", "ToStdout",
89 "ToStdoutAndLoginfo",
90 "FormatError",
91 "FormatQueryResult",
92 "FormatParamsDictInfo",
93 "FormatPolicyInfo",
94 "PrintIPolicyCommand",
95 "PrintGenericInfo",
96 "GenerateTable",
97 "AskUser",
98 "FormatTimestamp",
99 "FormatLogMessage",
100
101 "ListTags",
102 "AddTags",
103 "RemoveTags",
104
105 "ARGS_MANY_INSTANCES",
106 "ARGS_MANY_NODES",
107 "ARGS_MANY_GROUPS",
108 "ARGS_MANY_NETWORKS",
109 "ARGS_MANY_FILTERS",
110 "ARGS_NONE",
111 "ARGS_ONE_INSTANCE",
112 "ARGS_ONE_NODE",
113 "ARGS_ONE_GROUP",
114 "ARGS_ONE_OS",
115 "ARGS_ONE_NETWORK",
116 "ARGS_ONE_FILTER",
117 "ArgChoice",
118 "ArgCommand",
119 "ArgFile",
120 "ArgGroup",
121 "ArgHost",
122 "ArgInstance",
123 "ArgJobId",
124 "ArgNetwork",
125 "ArgNode",
126 "ArgOs",
127 "ArgExtStorage",
128 "ArgFilter",
129 "ArgSuggest",
130 "ArgUnknown",
131 "FixHvParams",
132 "SplitNodeOption",
133 "CalculateOSNames",
134 "ParseFields",
135 ] + ganeti.cli_opts.__all__
136
137
138 (QR_NORMAL,
139 QR_UNKNOWN,
140 QR_INCOMPLETE) = range(3)
141
142
143 _CHOOSE_BATCH = 25
144
145
146
147 TISPECS_GROUP_TYPES = {
148 constants.ISPECS_MIN: constants.VTYPE_INT,
149 constants.ISPECS_MAX: constants.VTYPE_INT,
150 }
151
152 TISPECS_CLUSTER_TYPES = {
153 constants.ISPECS_MIN: constants.VTYPE_INT,
154 constants.ISPECS_MAX: constants.VTYPE_INT,
155 constants.ISPECS_STD: constants.VTYPE_INT,
156 }
157
158
159 _QFT_NAMES = {
160 constants.QFT_UNKNOWN: "Unknown",
161 constants.QFT_TEXT: "Text",
162 constants.QFT_BOOL: "Boolean",
163 constants.QFT_NUMBER: "Number",
164 constants.QFT_NUMBER_FLOAT: "Floating-point number",
165 constants.QFT_UNIT: "Storage size",
166 constants.QFT_TIMESTAMP: "Timestamp",
167 constants.QFT_OTHER: "Custom",
168 }
173 self.min = min
174 self.max = max
175
177 return ("<%s min=%s max=%s>" %
178 (self.__class__.__name__, self.min, self.max))
179
182 """Suggesting argument.
183
184 Value can be any of the ones passed to the constructor.
185
186 """
187
188 - def __init__(self, min=0, max=None, choices=None):
191
193 return ("<%s min=%s max=%s choices=%r>" %
194 (self.__class__.__name__, self.min, self.max, self.choices))
195
198 """Choice argument.
199
200 Value can be any of the ones passed to the constructor. Like L{ArgSuggest},
201 but value must be one of the choices.
202
203 """
204
207 """Unknown argument to program (e.g. determined at runtime).
208
209 """
210
213 """Instances argument.
214
215 """
216
219 """Node argument.
220
221 """
222
225 """Network argument.
226
227 """
228
231 """Node group argument.
232
233 """
234
237 """Job ID argument.
238
239 """
240
243 """File path argument.
244
245 """
246
249 """Command argument.
250
251 """
252
255 """Host argument.
256
257 """
258
259
260 -class ArgOs(_Argument):
261 """OS argument.
262
263 """
264
267 """ExtStorage argument.
268
269 """
270
273 """Filter UUID argument.
274
275 """
276
277
278 ARGS_NONE = []
279 ARGS_MANY_INSTANCES = [ArgInstance()]
280 ARGS_MANY_NETWORKS = [ArgNetwork()]
281 ARGS_MANY_NODES = [ArgNode()]
282 ARGS_MANY_GROUPS = [ArgGroup()]
283 ARGS_MANY_FILTERS = [ArgFilter()]
284 ARGS_ONE_INSTANCE = [ArgInstance(min=1, max=1)]
285 ARGS_ONE_NETWORK = [ArgNetwork(min=1, max=1)]
286 ARGS_ONE_NODE = [ArgNode(min=1, max=1)]
287 ARGS_ONE_GROUP = [ArgGroup(min=1, max=1)]
288 ARGS_ONE_OS = [ArgOs(min=1, max=1)]
289 ARGS_ONE_FILTER = [ArgFilter(min=1, max=1)]
315
344
362
379
396
399 """Exception class for L{_ParseArgs}.
400
401 """
403 """Initializes instances of this class.
404
405 @type exit_error: bool
406 @param exit_error: Whether to report failure on exit
407
408 """
409 Exception.__init__(self)
410 self.exit_error = exit_error
411
414 """Exception class for L{_ParseArgs}.
415
416 """
417
418
419 -def _ParseArgs(binary, argv, commands, aliases, env_override):
420 """Parser for the command line arguments.
421
422 This function parses the arguments and returns the function which
423 must be executed together with its (modified) arguments.
424
425 @param binary: Script name
426 @param argv: Command line arguments
427 @param commands: Dictionary containing command definitions
428 @param aliases: dictionary with command aliases {"alias": "target", ...}
429 @param env_override: list of env variables allowed for default args
430 @raise _ShowUsage: If usage description should be shown
431 @raise _ShowVersion: If version should be shown
432
433 """
434 assert not (env_override - set(commands))
435 assert not (set(aliases.keys()) & set(commands.keys()))
436
437 if len(argv) > 1:
438 cmd = argv[1]
439 else:
440
441 raise _ShowUsage(exit_error=True)
442
443 if cmd == "--version":
444 raise _ShowVersion()
445 elif cmd == "--help":
446 raise _ShowUsage(exit_error=False)
447 elif not (cmd in commands or cmd in aliases):
448 raise _ShowUsage(exit_error=True)
449
450
451 if cmd in aliases:
452 if aliases[cmd] not in commands:
453 raise errors.ProgrammerError("Alias '%s' maps to non-existing"
454 " command '%s'" % (cmd, aliases[cmd]))
455
456 cmd = aliases[cmd]
457
458 if cmd in env_override:
459 args_env_name = ("%s_%s" % (binary.replace("-", "_"), cmd)).upper()
460 env_args = os.environ.get(args_env_name)
461 if env_args:
462 argv = utils.InsertAtPos(argv, 2, shlex.split(env_args))
463
464 func, args_def, parser_opts, usage, description = commands[cmd]
465 parser = OptionParser(option_list=parser_opts + COMMON_OPTS,
466 description=description,
467 formatter=TitledHelpFormatter(),
468 usage="%%prog %s %s" % (cmd, usage))
469 parser.disable_interspersed_args()
470 options, args = parser.parse_args(args=argv[2:])
471
472 if not _CheckArguments(cmd, args_def, args):
473 return None, None, None
474
475 return func, options, args
476
501
504 """Verifies the arguments using the argument definition.
505
506 Algorithm:
507
508 1. Abort with error if values specified by user but none expected.
509
510 1. For each argument in definition
511
512 1. Keep running count of minimum number of values (min_count)
513 1. Keep running count of maximum number of values (max_count)
514 1. If it has an unlimited number of values
515
516 1. Abort with error if it's not the last argument in the definition
517
518 1. If last argument has limited number of values
519
520 1. Abort with error if number of values doesn't match or is too large
521
522 1. Abort with error if user didn't pass enough values (min_count)
523
524 """
525 if args and not args_def:
526 ToStderr("Error: Command %s expects no arguments", cmd)
527 return False
528
529 min_count = None
530 max_count = None
531 check_max = None
532
533 last_idx = len(args_def) - 1
534
535 for idx, arg in enumerate(args_def):
536 if min_count is None:
537 min_count = arg.min
538 elif arg.min is not None:
539 min_count += arg.min
540
541 if max_count is None:
542 max_count = arg.max
543 elif arg.max is not None:
544 max_count += arg.max
545
546 if idx == last_idx:
547 check_max = (arg.max is not None)
548
549 elif arg.max is None:
550 raise errors.ProgrammerError("Only the last argument can have max=None")
551
552 if check_max:
553
554 if (min_count is not None and max_count is not None and
555 min_count == max_count and len(args) != min_count):
556 ToStderr("Error: Command %s expects %d argument(s)", cmd, min_count)
557 return False
558
559
560 if max_count is not None and len(args) > max_count:
561 ToStderr("Error: Command %s expects only %d argument(s)",
562 cmd, max_count)
563 return False
564
565
566 if min_count is not None and len(args) < min_count:
567 ToStderr("Error: Command %s expects at least %d argument(s)",
568 cmd, min_count)
569 return False
570
571 return True
572
575 """Splits the value of a --node option.
576
577 """
578 if value and ":" in value:
579 return value.split(":", 1)
580 else:
581 return (value, None)
582
585 """Calculates all the names an OS can be called, according to its variants.
586
587 @type os_name: string
588 @param os_name: base name of the os
589 @type os_variants: list or None
590 @param os_variants: list of supported variants
591 @rtype: list
592 @return: list of valid names
593
594 """
595 if os_variants:
596 return ["%s+%s" % (os_name, v) for v in os_variants]
597 else:
598 return [os_name]
599
602 """Parses the values of "--field"-like options.
603
604 @type selected: string or None
605 @param selected: User-selected options
606 @type default: list
607 @param default: Default fields
608
609 """
610 if selected is None:
611 return default
612
613 if selected.startswith("+"):
614 return default + selected[1:].split(",")
615
616 return selected.split(",")
617
618
619 UsesRPC = rpc.RunWithRPC
620
621
622 -def AskUser(text, choices=None):
623 """Ask the user a question.
624
625 @param text: the question to ask
626
627 @param choices: list with elements tuples (input_char, return_value,
628 description); if not given, it will default to: [('y', True,
629 'Perform the operation'), ('n', False, 'Do no do the operation')];
630 note that the '?' char is reserved for help
631
632 @return: one of the return values from the choices list; if input is
633 not possible (i.e. not running with a tty, we return the last
634 entry from the list
635
636 """
637 if choices is None:
638 choices = [("y", True, "Perform the operation"),
639 ("n", False, "Do not perform the operation")]
640 if not choices or not isinstance(choices, list):
641 raise errors.ProgrammerError("Invalid choices argument to AskUser")
642 for entry in choices:
643 if not isinstance(entry, tuple) or len(entry) < 3 or entry[0] == "?":
644 raise errors.ProgrammerError("Invalid choices element to AskUser")
645
646 answer = choices[-1][1]
647 new_text = []
648 for line in text.splitlines():
649 new_text.append(textwrap.fill(line, 70, replace_whitespace=False))
650 text = "\n".join(new_text)
651 try:
652 f = file("/dev/tty", "a+")
653 except IOError:
654 return answer
655 try:
656 chars = [entry[0] for entry in choices]
657 chars[-1] = "[%s]" % chars[-1]
658 chars.append("?")
659 maps = dict([(entry[0], entry[1]) for entry in choices])
660 while True:
661 f.write(text)
662 f.write("\n")
663 f.write("/".join(chars))
664 f.write(": ")
665 line = f.readline(2).strip().lower()
666 if line in maps:
667 answer = maps[line]
668 break
669 elif line == "?":
670 for entry in choices:
671 f.write(" %s - %s\n" % (entry[0], entry[2]))
672 f.write("\n")
673 continue
674 finally:
675 f.close()
676 return answer
677
680 """Job was submitted, client should exit.
681
682 This exception has one argument, the ID of the job that was
683 submitted. The handler should print this ID.
684
685 This is not an error, just a structured way to exit from clients.
686
687 """
688
691 """Function to submit an opcode without waiting for the results.
692
693 @type ops: list
694 @param ops: list of opcodes
695 @type cl: luxi.Client
696 @param cl: the luxi client to use for communicating with the master;
697 if None, a new client will be created
698
699 """
700 if cl is None:
701 cl = GetClient()
702
703 job_id = cl.SubmitJob(ops)
704
705 return job_id
706
710 """Generic job-polling function.
711
712 @type job_id: number
713 @param job_id: Job ID
714 @type cbs: Instance of L{JobPollCbBase}
715 @param cbs: Data callbacks
716 @type report_cbs: Instance of L{JobPollReportCbBase}
717 @param report_cbs: Reporting callbacks
718 @type cancel_fn: Function returning a boolean
719 @param cancel_fn: Function to check if we should cancel the running job
720 @type update_freq: int/long
721 @param update_freq: number of seconds between each WFJC reports
722 @return: the opresult of the job
723 @raise errors.JobLost: If job can't be found
724 @raise errors.JobCanceled: If job is canceled
725 @raise errors.OpExecError: If job didn't succeed
726
727 """
728 prev_job_info = None
729 prev_logmsg_serial = None
730
731 status = None
732 should_cancel = False
733
734 if update_freq <= 0:
735 raise errors.ParameterError("Update frequency must be a positive number")
736
737 while True:
738 if cancel_fn:
739 timer = 0
740 while timer < update_freq:
741 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
742 prev_logmsg_serial,
743 timeout=constants.CLI_WFJC_FREQUENCY)
744 should_cancel = cancel_fn()
745 if should_cancel or not result or result != constants.JOB_NOTCHANGED:
746 break
747 timer += constants.CLI_WFJC_FREQUENCY
748 else:
749 result = cbs.WaitForJobChangeOnce(job_id, ["status"], prev_job_info,
750 prev_logmsg_serial, timeout=update_freq)
751 if not result:
752
753 raise errors.JobLost("Job with id %s lost" % job_id)
754
755 if should_cancel:
756 logging.info("Job %s canceled because the client timed out.", job_id)
757 cbs.CancelJob(job_id)
758 raise errors.JobCanceled("Job was canceled")
759
760 if result == constants.JOB_NOTCHANGED:
761 report_cbs.ReportNotChanged(job_id, status)
762
763 continue
764
765
766 (job_info, log_entries) = result
767 (status, ) = job_info
768
769 if log_entries:
770 for log_entry in log_entries:
771 (serial, timestamp, log_type, message) = log_entry
772 report_cbs.ReportLogMessage(job_id, serial, timestamp,
773 log_type, message)
774 prev_logmsg_serial = max(prev_logmsg_serial, serial)
775
776
777 elif status in (constants.JOB_STATUS_SUCCESS,
778 constants.JOB_STATUS_ERROR,
779 constants.JOB_STATUS_CANCELING,
780 constants.JOB_STATUS_CANCELED):
781 break
782
783 prev_job_info = job_info
784
785 jobs = cbs.QueryJobs([job_id], ["status", "opstatus", "opresult"])
786 if not jobs:
787 raise errors.JobLost("Job with id %s lost" % job_id)
788
789 status, opstatus, result = jobs[0]
790
791 if status == constants.JOB_STATUS_SUCCESS:
792 return result
793
794 if status in (constants.JOB_STATUS_CANCELING, constants.JOB_STATUS_CANCELED):
795 raise errors.JobCanceled("Job was canceled")
796
797 has_ok = False
798 for idx, (status, msg) in enumerate(zip(opstatus, result)):
799 if status == constants.OP_STATUS_SUCCESS:
800 has_ok = True
801 elif status == constants.OP_STATUS_ERROR:
802 errors.MaybeRaise(msg)
803
804 if has_ok:
805 raise errors.OpExecError("partial failure (opcode %d): %s" %
806 (idx, msg))
807
808 raise errors.OpExecError(str(msg))
809
810
811 raise errors.OpExecError(result)
812
815 """Base class for L{GenericPollJob} callbacks.
816
817 """
819 """Initializes this class.
820
821 """
822
826 """Waits for changes on a job.
827
828 """
829 raise NotImplementedError()
830
832 """Returns the selected fields for the selected job IDs.
833
834 @type job_ids: list of numbers
835 @param job_ids: Job IDs
836 @type fields: list of strings
837 @param fields: Fields
838
839 """
840 raise NotImplementedError()
841
843 """Cancels a currently running job.
844
845 @type job_id: number
846 @param job_id: The ID of the Job we want to cancel
847
848 """
849 raise NotImplementedError()
850
853 """Base class for L{GenericPollJob} reporting callbacks.
854
855 """
857 """Initializes this class.
858
859 """
860
862 """Handles a log message.
863
864 """
865 raise NotImplementedError()
866
868 """Called for if a job hasn't changed in a while.
869
870 @type job_id: number
871 @param job_id: Job ID
872 @type status: string or None
873 @param status: Job status if available
874
875 """
876 raise NotImplementedError()
877
886
890 """Waits for changes on a job.
891
892 """
893 return self.cl.WaitForJobChangeOnce(job_id, fields,
894 prev_job_info, prev_log_serial,
895 timeout=timeout)
896
898 """Returns the selected fields for the selected job IDs.
899
900 """
901 return self.cl.QueryJobs(job_ids, fields)
902
904 """Cancels a currently running job.
905
906 """
907 return self.cl.CancelJob(job_id)
908
912 """Initializes this class.
913
914 """
915 JobPollReportCbBase.__init__(self)
916
917 self.feedback_fn = feedback_fn
918
919 assert callable(feedback_fn)
920
922 """Handles a log message.
923
924 """
925 self.feedback_fn((timestamp, log_type, log_msg))
926
928 """Called if a job hasn't changed in a while.
929
930 """
931
936 """Initializes this class.
937
938 """
939 JobPollReportCbBase.__init__(self)
940
941 self.notified_queued = False
942 self.notified_waitlock = False
943
950
952 """Called if a job hasn't changed in a while.
953
954 """
955 if status is None:
956 return
957
958 if status == constants.JOB_STATUS_QUEUED and not self.notified_queued:
959 ToStderr("Job %s is waiting in queue", job_id)
960 self.notified_queued = True
961
962 elif status == constants.JOB_STATUS_WAITING and not self.notified_waitlock:
963 ToStderr("Job %s is trying to acquire all necessary locks", job_id)
964 self.notified_waitlock = True
965
975
979 """Function to poll for the result of a job.
980
981 @type job_id: job identified
982 @param job_id: the job to poll for results
983 @type cl: luxi.Client
984 @param cl: the luxi client to use for communicating with the master;
985 if None, a new client will be created
986 @type cancel_fn: Function returning a boolean
987 @param cancel_fn: Function to check if we should cancel the running job
988 @type update_freq: int/long
989 @param update_freq: number of seconds between each WFJC report
990
991 """
992 if cl is None:
993 cl = GetClient()
994
995 if reporter is None:
996 if feedback_fn:
997 reporter = FeedbackFnJobPollReportCb(feedback_fn)
998 else:
999 reporter = StdioJobPollReportCb()
1000 elif feedback_fn:
1001 raise errors.ProgrammerError("Can't specify reporter and feedback function")
1002
1003 return GenericPollJob(job_id, _LuxiJobPollCb(cl), reporter,
1004 cancel_fn=cancel_fn, update_freq=update_freq)
1005
1006
1007 -def SubmitOpCode(op, cl=None, feedback_fn=None, opts=None, reporter=None):
1008 """Legacy function to submit an opcode.
1009
1010 This is just a simple wrapper over the construction of the processor
1011 instance. It should be extended to better handle feedback and
1012 interaction functions.
1013
1014 """
1015 if cl is None:
1016 cl = GetClient()
1017
1018 SetGenericOpcodeOpts([op], opts)
1019
1020 job_id = SendJob([op], cl=cl)
1021 if hasattr(opts, "print_jobid") and opts.print_jobid:
1022 ToStdout("%d" % job_id)
1023
1024 op_results = PollJob(job_id, cl=cl, feedback_fn=feedback_fn,
1025 reporter=reporter)
1026
1027 return op_results[0]
1028
1031 """Forcefully insert a job in the queue, even if it is drained.
1032
1033 """
1034 cl = GetClient()
1035 job_id = cl.SubmitJobToDrainedQueue([op])
1036 op_results = PollJob(job_id, cl=cl)
1037 return op_results[0]
1038
1039
1040 -def SubmitOrSend(op, opts, cl=None, feedback_fn=None):
1041 """Wrapper around SubmitOpCode or SendJob.
1042
1043 This function will decide, based on the 'opts' parameter, whether to
1044 submit and wait for the result of the opcode (and return it), or
1045 whether to just send the job and print its identifier. It is used in
1046 order to simplify the implementation of the '--submit' option.
1047
1048 It will also process the opcodes if we're sending the via SendJob
1049 (otherwise SubmitOpCode does it).
1050
1051 """
1052 if opts and opts.submit_only:
1053 job = [op]
1054 SetGenericOpcodeOpts(job, opts)
1055 job_id = SendJob(job, cl=cl)
1056 if opts.print_jobid:
1057 ToStdout("%d" % job_id)
1058 raise JobSubmittedException(job_id)
1059 else:
1060 return SubmitOpCode(op, cl=cl, feedback_fn=feedback_fn, opts=opts)
1061
1064 """Builds the first part of the reason trail
1065
1066 Builds the initial part of the reason trail, adding the user provided reason
1067 (if it exists) and the name of the command starting the operation.
1068
1069 @param op: the opcode the reason trail will be added to
1070 @param opts: the command line options selected by the user
1071
1072 """
1073 assert len(sys.argv) >= 2
1074 trail = []
1075
1076 if opts.reason:
1077 trail.append((constants.OPCODE_REASON_SRC_USER,
1078 opts.reason,
1079 utils.EpochNano()))
1080
1081 binary = os.path.basename(sys.argv[0])
1082 source = "%s:%s" % (constants.OPCODE_REASON_SRC_CLIENT, binary)
1083 command = sys.argv[1]
1084 trail.append((source, command, utils.EpochNano()))
1085 op.reason = trail
1086
1089 """Processor for generic options.
1090
1091 This function updates the given opcodes based on generic command
1092 line options (like debug, dry-run, etc.).
1093
1094 @param opcode_list: list of opcodes
1095 @param options: command line options or None
1096 @return: None (in-place modification)
1097
1098 """
1099 if not options:
1100 return
1101 for op in opcode_list:
1102 op.debug_level = options.debug
1103 if hasattr(options, "dry_run"):
1104 op.dry_run = options.dry_run
1105 if getattr(options, "priority", None) is not None:
1106 op.priority = options.priority
1107 _InitReasonTrail(op, options)
1108
1202
1203
1204 -def GenericMain(commands, override=None, aliases=None,
1205 env_override=frozenset()):
1206 """Generic main function for all the gnt-* commands.
1207
1208 @param commands: a dictionary with a special structure, see the design doc
1209 for command line handling.
1210 @param override: if not None, we expect a dictionary with keys that will
1211 override command line options; this can be used to pass
1212 options from the scripts to generic functions
1213 @param aliases: dictionary with command aliases {'alias': 'target, ...}
1214 @param env_override: list of environment names which are allowed to submit
1215 default args for commands
1216
1217 """
1218
1219 if sys.argv:
1220 binary = os.path.basename(sys.argv[0])
1221 if not binary:
1222 binary = sys.argv[0]
1223
1224 if len(sys.argv) >= 2:
1225 logname = utils.ShellQuoteArgs([binary, sys.argv[1]])
1226 else:
1227 logname = binary
1228
1229 cmdline = utils.ShellQuoteArgs([binary] + sys.argv[1:])
1230 else:
1231 binary = "<unknown program>"
1232 cmdline = "<unknown>"
1233
1234 if aliases is None:
1235 aliases = {}
1236
1237 try:
1238 (func, options, args) = _ParseArgs(binary, sys.argv, commands, aliases,
1239 env_override)
1240 except _ShowVersion:
1241 ToStdout("%s (ganeti %s) %s", binary, constants.VCS_VERSION,
1242 constants.RELEASE_VERSION)
1243 return constants.EXIT_SUCCESS
1244 except _ShowUsage, err:
1245 for line in _FormatUsage(binary, commands):
1246 ToStdout(line)
1247
1248 if err.exit_error:
1249 return constants.EXIT_FAILURE
1250 else:
1251 return constants.EXIT_SUCCESS
1252 except errors.ParameterError, err:
1253 result, err_msg = FormatError(err)
1254 ToStderr(err_msg)
1255 return 1
1256
1257 if func is None:
1258 return 1
1259
1260 if override is not None:
1261 for key, val in override.iteritems():
1262 setattr(options, key, val)
1263
1264 utils.SetupLogging(pathutils.LOG_COMMANDS, logname, debug=options.debug,
1265 stderr_logging=True)
1266
1267 logging.debug("Command line: %s", cmdline)
1268
1269 try:
1270 result = func(options, args)
1271 except (errors.GenericError, rpcerr.ProtocolError,
1272 JobSubmittedException), err:
1273 result, err_msg = FormatError(err)
1274 logging.exception("Error during command processing")
1275 ToStderr(err_msg)
1276 except KeyboardInterrupt:
1277 result = constants.EXIT_FAILURE
1278 ToStderr("Aborted. Note that if the operation created any jobs, they"
1279 " might have been submitted and"
1280 " will continue to run in the background.")
1281 except IOError, err:
1282 if err.errno == errno.EPIPE:
1283
1284 sys.exit(constants.EXIT_FAILURE)
1285 else:
1286 raise
1287
1288 return result
1289
1292 """Parses the value of the --net option(s).
1293
1294 """
1295 try:
1296 nic_max = max(int(nidx[0]) + 1 for nidx in optvalue)
1297 except (TypeError, ValueError), err:
1298 raise errors.OpPrereqError("Invalid NIC index passed: %s" % str(err),
1299 errors.ECODE_INVAL)
1300
1301 nics = [{}] * nic_max
1302 for nidx, ndict in optvalue:
1303 nidx = int(nidx)
1304
1305 if not isinstance(ndict, dict):
1306 raise errors.OpPrereqError("Invalid nic/%d value: expected dict,"
1307 " got %s" % (nidx, ndict), errors.ECODE_INVAL)
1308
1309 utils.ForceDictType(ndict, constants.INIC_PARAMS_TYPES)
1310
1311 nics[nidx] = ndict
1312
1313 return nics
1314
1329
1332 """Add an instance to the cluster via either creation or import.
1333
1334 @param mode: constants.INSTANCE_CREATE or constants.INSTANCE_IMPORT
1335 @param opts: the command line options selected by the user
1336 @type args: list
1337 @param args: should contain only one element, the new instance name
1338 @rtype: int
1339 @return: the desired exit code
1340
1341 """
1342 instance = args[0]
1343 forthcoming = opts.ensure_value("forthcoming", False)
1344 commit = opts.ensure_value("commit", False)
1345
1346 if forthcoming and commit:
1347 raise errors.OpPrereqError("Creating an instance only forthcoming and"
1348 " commiting it are mutally exclusive",
1349 errors.ECODE_INVAL)
1350
1351 (pnode, snode) = SplitNodeOption(opts.node)
1352
1353 hypervisor = None
1354 hvparams = {}
1355 if opts.hypervisor:
1356 hypervisor, hvparams = opts.hypervisor
1357
1358 if opts.nics:
1359 nics = ParseNicOption(opts.nics)
1360 elif opts.no_nics:
1361
1362 nics = []
1363 elif mode == constants.INSTANCE_CREATE:
1364
1365 nics = [{}]
1366 else:
1367
1368 nics = []
1369
1370 if opts.disk_template == constants.DT_DISKLESS:
1371 if opts.disks or opts.sd_size is not None:
1372 raise errors.OpPrereqError("Diskless instance but disk"
1373 " information passed", errors.ECODE_INVAL)
1374 disks = []
1375 else:
1376 if (not opts.disks and not opts.sd_size
1377 and mode == constants.INSTANCE_CREATE):
1378 raise errors.OpPrereqError("No disk information specified",
1379 errors.ECODE_INVAL)
1380 if opts.disks and opts.sd_size is not None:
1381 raise errors.OpPrereqError("Please use either the '--disk' or"
1382 " '-s' option", errors.ECODE_INVAL)
1383 if opts.sd_size is not None:
1384 opts.disks = [(0, {constants.IDISK_SIZE: opts.sd_size})]
1385
1386 if opts.disks:
1387 try:
1388 disk_max = max(int(didx[0]) + 1 for didx in opts.disks)
1389 except ValueError, err:
1390 raise errors.OpPrereqError("Invalid disk index passed: %s" % str(err),
1391 errors.ECODE_INVAL)
1392 disks = [{}] * disk_max
1393 else:
1394 disks = []
1395 for didx, ddict in opts.disks:
1396 didx = int(didx)
1397 if not isinstance(ddict, dict):
1398 msg = "Invalid disk/%d value: expected dict, got %s" % (didx, ddict)
1399 raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
1400 elif constants.IDISK_SIZE in ddict:
1401 if constants.IDISK_ADOPT in ddict:
1402 raise errors.OpPrereqError("Only one of 'size' and 'adopt' allowed"
1403 " (disk %d)" % didx, errors.ECODE_INVAL)
1404 try:
1405 ddict[constants.IDISK_SIZE] = \
1406 utils.ParseUnit(ddict[constants.IDISK_SIZE])
1407 except ValueError, err:
1408 raise errors.OpPrereqError("Invalid disk size for disk %d: %s" %
1409 (didx, err), errors.ECODE_INVAL)
1410 elif constants.IDISK_ADOPT in ddict:
1411 if constants.IDISK_SPINDLES in ddict:
1412 raise errors.OpPrereqError("spindles is not a valid option when"
1413 " adopting a disk", errors.ECODE_INVAL)
1414 if mode == constants.INSTANCE_IMPORT:
1415 raise errors.OpPrereqError("Disk adoption not allowed for instance"
1416 " import", errors.ECODE_INVAL)
1417 ddict[constants.IDISK_SIZE] = 0
1418 else:
1419 raise errors.OpPrereqError("Missing size or adoption source for"
1420 " disk %d" % didx, errors.ECODE_INVAL)
1421 if constants.IDISK_SPINDLES in ddict:
1422 ddict[constants.IDISK_SPINDLES] = int(ddict[constants.IDISK_SPINDLES])
1423
1424 disks[didx] = ddict
1425
1426 if opts.tags is not None:
1427 tags = opts.tags.split(",")
1428 else:
1429 tags = []
1430
1431 utils.ForceDictType(opts.beparams, constants.BES_PARAMETER_COMPAT)
1432 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
1433 FixHvParams(hvparams)
1434
1435 osparams_private = opts.osparams_private or serializer.PrivateDict()
1436 osparams_secret = opts.osparams_secret or serializer.PrivateDict()
1437
1438 helper_startup_timeout = opts.helper_startup_timeout
1439 helper_shutdown_timeout = opts.helper_shutdown_timeout
1440
1441 if mode == constants.INSTANCE_CREATE:
1442 start = opts.start
1443 os_type = opts.os
1444 force_variant = opts.force_variant
1445 src_node = None
1446 src_path = None
1447 no_install = opts.no_install
1448 identify_defaults = False
1449 compress = constants.IEC_NONE
1450 if opts.instance_communication is None:
1451 instance_communication = False
1452 else:
1453 instance_communication = opts.instance_communication
1454 elif mode == constants.INSTANCE_IMPORT:
1455 if forthcoming:
1456 raise errors.OpPrereqError("forthcoming instances can only be created,"
1457 " not imported")
1458 start = False
1459 os_type = None
1460 force_variant = False
1461 src_node = opts.src_node
1462 src_path = opts.src_dir
1463 no_install = None
1464 identify_defaults = opts.identify_defaults
1465 compress = opts.compress
1466 instance_communication = False
1467 else:
1468 raise errors.ProgrammerError("Invalid creation mode %s" % mode)
1469
1470 op = opcodes.OpInstanceCreate(
1471 forthcoming=forthcoming,
1472 commit=commit,
1473 instance_name=instance,
1474 disks=disks,
1475 disk_template=opts.disk_template,
1476 group_name=opts.nodegroup,
1477 nics=nics,
1478 conflicts_check=opts.conflicts_check,
1479 pnode=pnode, snode=snode,
1480 ip_check=opts.ip_check,
1481 name_check=opts.name_check,
1482 wait_for_sync=opts.wait_for_sync,
1483 file_storage_dir=opts.file_storage_dir,
1484 file_driver=opts.file_driver,
1485 iallocator=opts.iallocator,
1486 hypervisor=hypervisor,
1487 hvparams=hvparams,
1488 beparams=opts.beparams,
1489 osparams=opts.osparams,
1490 osparams_private=osparams_private,
1491 osparams_secret=osparams_secret,
1492 mode=mode,
1493 opportunistic_locking=opts.opportunistic_locking,
1494 start=start,
1495 os_type=os_type,
1496 force_variant=force_variant,
1497 src_node=src_node,
1498 src_path=src_path,
1499 compress=compress,
1500 tags=tags,
1501 no_install=no_install,
1502 identify_defaults=identify_defaults,
1503 ignore_ipolicy=opts.ignore_ipolicy,
1504 instance_communication=instance_communication,
1505 helper_startup_timeout=helper_startup_timeout,
1506 helper_shutdown_timeout=helper_shutdown_timeout)
1507
1508 SubmitOrSend(op, opts)
1509 return 0
1510
1513 """Helper class for L{RunWhileDaemonsStopped} to simplify state management
1514
1515 """
1516 - def __init__(self, feedback_fn, cluster_name, master_node,
1517 online_nodes, ssh_ports, exclude_daemons, debug,
1518 verbose):
1519 """Initializes this class.
1520
1521 @type feedback_fn: callable
1522 @param feedback_fn: Feedback function
1523 @type cluster_name: string
1524 @param cluster_name: Cluster name
1525 @type master_node: string
1526 @param master_node Master node name
1527 @type online_nodes: list
1528 @param online_nodes: List of names of online nodes
1529 @type ssh_ports: list
1530 @param ssh_ports: List of SSH ports of online nodes
1531 @type exclude_daemons: list of string
1532 @param exclude_daemons: list of daemons that will be restarted on master
1533 after all others are shutdown
1534 @type debug: boolean
1535 @param debug: show debug output
1536 @type verbose: boolesn
1537 @param verbose: show verbose output
1538
1539 """
1540 self.feedback_fn = feedback_fn
1541 self.cluster_name = cluster_name
1542 self.master_node = master_node
1543 self.online_nodes = online_nodes
1544 self.ssh_ports = dict(zip(online_nodes, ssh_ports))
1545
1546 self.ssh = ssh.SshRunner(self.cluster_name)
1547
1548 self.nonmaster_nodes = [name for name in online_nodes
1549 if name != master_node]
1550
1551 self.exclude_daemons = exclude_daemons
1552 self.debug = debug
1553 self.verbose = verbose
1554
1555 assert self.master_node not in self.nonmaster_nodes
1556
1557 - def _RunCmd(self, node_name, cmd):
1558 """Runs a command on the local or a remote machine.
1559
1560 @type node_name: string
1561 @param node_name: Machine name
1562 @type cmd: list
1563 @param cmd: Command
1564
1565 """
1566 if node_name is None or node_name == self.master_node:
1567
1568 result = utils.RunCmd(cmd)
1569 else:
1570 result = self.ssh.Run(node_name, constants.SSH_LOGIN_USER,
1571 utils.ShellQuoteArgs(cmd),
1572 port=self.ssh_ports[node_name])
1573
1574 if result.failed:
1575 errmsg = ["Failed to run command %s" % result.cmd]
1576 if node_name:
1577 errmsg.append("on node %s" % node_name)
1578 errmsg.append(": exitcode %s and error %s" %
1579 (result.exit_code, result.output))
1580 raise errors.OpExecError(" ".join(errmsg))
1581
1582 - def Call(self, fn, *args):
1583 """Call function while all daemons are stopped.
1584
1585 @type fn: callable
1586 @param fn: Function to be called
1587
1588 """
1589
1590 self.feedback_fn("Blocking watcher")
1591 watcher_block = utils.FileLock.Open(pathutils.WATCHER_LOCK_FILE)
1592 try:
1593
1594
1595 watcher_block.Exclusive(blocking=True)
1596
1597
1598
1599 self.feedback_fn("Stopping master daemons")
1600 self._RunCmd(None, [pathutils.DAEMON_UTIL, "stop-master"])
1601 try:
1602
1603 online_nodes = [self.master_node] + [n for n in self.online_nodes
1604 if n != self.master_node]
1605 for node_name in online_nodes:
1606 self.feedback_fn("Stopping daemons on %s" % node_name)
1607 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop-all"])
1608
1609 if node_name == self.master_node:
1610 for daemon in self.exclude_daemons:
1611 self.feedback_fn("Starting daemon '%s' on %s" % (daemon,
1612 node_name))
1613 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start", daemon])
1614
1615
1616 try:
1617 return fn(self, *args)
1618 except Exception, err:
1619 _, errmsg = FormatError(err)
1620 logging.exception("Caught exception")
1621 self.feedback_fn(errmsg)
1622 raise
1623 finally:
1624
1625 for node_name in self.nonmaster_nodes + [self.master_node]:
1626
1627
1628
1629 if node_name == self.master_node:
1630 self.exclude_daemons.reverse()
1631 for daemon in self.exclude_daemons:
1632 self.feedback_fn("Stopping daemon '%s' on %s" % (daemon,
1633 node_name))
1634 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "stop", daemon])
1635 self.feedback_fn("Starting daemons on %s" % node_name)
1636 self._RunCmd(node_name, [pathutils.DAEMON_UTIL, "start-all"])
1637
1638 finally:
1639
1640 watcher_block.Close()
1641
1644 """Calls a function while all cluster daemons are stopped.
1645
1646 @type feedback_fn: callable
1647 @param feedback_fn: Feedback function
1648 @type exclude_daemons: list of string
1649 @param exclude_daemons: list of daemons that stopped, but immediately
1650 restarted on the master to be available when calling
1651 'fn'. If None, all daemons will be stopped and none
1652 will be started before calling 'fn'.
1653 @type fn: callable
1654 @param fn: Function to be called when daemons are stopped
1655
1656 """
1657 feedback_fn("Gathering cluster information")
1658
1659
1660 cl = GetClient()
1661
1662 (cluster_name, master_node) = \
1663 cl.QueryConfigValues(["cluster_name", "master_node"])
1664
1665 online_nodes = GetOnlineNodes([], cl=cl)
1666 ssh_ports = GetNodesSshPorts(online_nodes, cl)
1667
1668
1669 del cl
1670
1671 assert master_node in online_nodes
1672 if exclude_daemons is None:
1673 exclude_daemons = []
1674
1675 debug = kwargs.get("debug", False)
1676 verbose = kwargs.get("verbose", False)
1677
1678 return _RunWhileDaemonsStoppedHelper(
1679 feedback_fn, cluster_name, master_node, online_nodes, ssh_ports,
1680 exclude_daemons, debug, verbose).Call(fn, *args)
1681
1684 """Calls a function while all cluster daemons are stopped.
1685
1686 @type feedback_fn: callable
1687 @param feedback_fn: Feedback function
1688 @type fn: callable
1689 @param fn: Function to be called when daemons are stopped
1690
1691 """
1692 RunWhileDaemonsStopped(feedback_fn, None, fn, *args)
1693
1694
1695 -def GenerateTable(headers, fields, separator, data,
1696 numfields=None, unitfields=None,
1697 units=None):
1698 """Prints a table with headers and different fields.
1699
1700 @type headers: dict
1701 @param headers: dictionary mapping field names to headers for
1702 the table
1703 @type fields: list
1704 @param fields: the field names corresponding to each row in
1705 the data field
1706 @param separator: the separator to be used; if this is None,
1707 the default 'smart' algorithm is used which computes optimal
1708 field width, otherwise just the separator is used between
1709 each field
1710 @type data: list
1711 @param data: a list of lists, each sublist being one row to be output
1712 @type numfields: list
1713 @param numfields: a list with the fields that hold numeric
1714 values and thus should be right-aligned
1715 @type unitfields: list
1716 @param unitfields: a list with the fields that hold numeric
1717 values that should be formatted with the units field
1718 @type units: string or None
1719 @param units: the units we should use for formatting, or None for
1720 automatic choice (human-readable for non-separator usage, otherwise
1721 megabytes); this is a one-letter string
1722
1723 """
1724 if units is None:
1725 if separator:
1726 units = "m"
1727 else:
1728 units = "h"
1729
1730 if numfields is None:
1731 numfields = []
1732 if unitfields is None:
1733 unitfields = []
1734
1735 numfields = utils.FieldSet(*numfields)
1736 unitfields = utils.FieldSet(*unitfields)
1737
1738 format_fields = []
1739 for field in fields:
1740 if headers and field not in headers:
1741
1742
1743
1744 headers[field] = field
1745 if separator is not None:
1746 format_fields.append("%s")
1747 elif numfields.Matches(field):
1748 format_fields.append("%*s")
1749 else:
1750 format_fields.append("%-*s")
1751
1752 if separator is None:
1753 mlens = [0 for name in fields]
1754 format_str = " ".join(format_fields)
1755 else:
1756 format_str = separator.replace("%", "%%").join(format_fields)
1757
1758 for row in data:
1759 if row is None:
1760 continue
1761 for idx, val in enumerate(row):
1762 if unitfields.Matches(fields[idx]):
1763 try:
1764 val = int(val)
1765 except (TypeError, ValueError):
1766 pass
1767 else:
1768 val = row[idx] = utils.FormatUnit(val, units)
1769 val = row[idx] = str(val)
1770 if separator is None:
1771 mlens[idx] = max(mlens[idx], len(val))
1772
1773 result = []
1774 if headers:
1775 args = []
1776 for idx, name in enumerate(fields):
1777 hdr = headers[name]
1778 if separator is None:
1779 mlens[idx] = max(mlens[idx], len(hdr))
1780 args.append(mlens[idx])
1781 args.append(hdr)
1782 result.append(format_str % tuple(args))
1783
1784 if separator is None:
1785 assert len(mlens) == len(fields)
1786
1787 if fields and not numfields.Matches(fields[-1]):
1788 mlens[-1] = 0
1789
1790 for line in data:
1791 args = []
1792 if line is None:
1793 line = ["-" for _ in fields]
1794 for idx in range(len(fields)):
1795 if separator is None:
1796 args.append(mlens[idx])
1797 args.append(line[idx])
1798 result.append(format_str % tuple(args))
1799
1800 return result
1801
1810
1811
1812
1813 _DEFAULT_FORMAT_QUERY = {
1814 constants.QFT_TEXT: (str, False),
1815 constants.QFT_BOOL: (_FormatBool, False),
1816 constants.QFT_NUMBER: (str, True),
1817 constants.QFT_NUMBER_FLOAT: (str, True),
1818 constants.QFT_TIMESTAMP: (utils.FormatTime, False),
1819 constants.QFT_OTHER: (str, False),
1820 constants.QFT_UNKNOWN: (str, False),
1821 }
1853
1890
1911
1947
1948 columns = []
1949 for fdef in result.fields:
1950 assert fdef.title and fdef.name
1951 (fn, align_right) = _GetColumnFormatter(fdef, format_override, unit)
1952 columns.append(TableColumn(fdef.title,
1953 _QueryColumnFormatter(fn, _RecordStatus,
1954 verbose),
1955 align_right))
1956
1957 table = FormatTable(result.data, columns, header, separator)
1958
1959
1960 assert len(stats) == len(constants.RS_ALL)
1961 assert compat.all(count >= 0 for count in stats.values())
1962
1963
1964
1965 if (stats[constants.RS_UNKNOWN] or
1966 (not result.data and _GetUnknownFields(result.fields))):
1967 status = QR_UNKNOWN
1968 elif compat.any(count > 0 for key, count in stats.items()
1969 if key != constants.RS_NORMAL):
1970 status = QR_INCOMPLETE
1971 else:
1972 status = QR_NORMAL
1973
1974 return (status, table)
1975
1978 """Returns list of unknown fields included in C{fdefs}.
1979
1980 @type fdefs: list of L{objects.QueryFieldDefinition}
1981
1982 """
1983 return [fdef for fdef in fdefs
1984 if fdef.kind == constants.QFT_UNKNOWN]
1985
1988 """Prints a warning to stderr if a query included unknown fields.
1989
1990 @type fdefs: list of L{objects.QueryFieldDefinition}
1991
1992 """
1993 unknown = _GetUnknownFields(fdefs)
1994 if unknown:
1995 ToStderr("Warning: Queried for unknown fields %s",
1996 utils.CommaJoin(fdef.name for fdef in unknown))
1997 return True
1998
1999 return False
2000
2001
2002 -def GenericList(resource, fields, names, unit, separator, header, cl=None,
2003 format_override=None, verbose=False, force_filter=False,
2004 namefield=None, qfilter=None, isnumeric=False):
2005 """Generic implementation for listing all items of a resource.
2006
2007 @param resource: One of L{constants.QR_VIA_LUXI}
2008 @type fields: list of strings
2009 @param fields: List of fields to query for
2010 @type names: list of strings
2011 @param names: Names of items to query for
2012 @type unit: string or None
2013 @param unit: Unit used for formatting fields of type L{constants.QFT_UNIT} or
2014 None for automatic choice (human-readable for non-separator usage,
2015 otherwise megabytes); this is a one-letter string
2016 @type separator: string or None
2017 @param separator: String used to separate fields
2018 @type header: bool
2019 @param header: Whether to show header row
2020 @type force_filter: bool
2021 @param force_filter: Whether to always treat names as filter
2022 @type format_override: dict
2023 @param format_override: Dictionary for overriding field formatting functions,
2024 indexed by field name, contents like L{_DEFAULT_FORMAT_QUERY}
2025 @type verbose: boolean
2026 @param verbose: whether to use verbose field descriptions or not
2027 @type namefield: string
2028 @param namefield: Name of field to use for simple filters (see
2029 L{qlang.MakeFilter} for details)
2030 @type qfilter: list or None
2031 @param qfilter: Query filter (in addition to names)
2032 @param isnumeric: bool
2033 @param isnumeric: Whether the namefield's type is numeric, and therefore
2034 any simple filters built by namefield should use integer values to
2035 reflect that
2036
2037 """
2038 if not names:
2039 names = None
2040
2041 namefilter = qlang.MakeFilter(names, force_filter, namefield=namefield,
2042 isnumeric=isnumeric)
2043
2044 if qfilter is None:
2045 qfilter = namefilter
2046 elif namefilter is not None:
2047 qfilter = [qlang.OP_AND, namefilter, qfilter]
2048
2049 if cl is None:
2050 cl = GetClient()
2051
2052 response = cl.Query(resource, fields, qfilter)
2053
2054 found_unknown = _WarnUnknownFields(response.fields)
2055
2056 (status, data) = FormatQueryResult(response, unit=unit, separator=separator,
2057 header=header,
2058 format_override=format_override,
2059 verbose=verbose)
2060
2061 for line in data:
2062 ToStdout(line)
2063
2064 assert ((found_unknown and status == QR_UNKNOWN) or
2065 (not found_unknown and status != QR_UNKNOWN))
2066
2067 if status == QR_UNKNOWN:
2068 return constants.EXIT_UNKNOWN_FIELD
2069
2070
2071 return constants.EXIT_SUCCESS
2072
2075 """Helper function for L{GenericListFields} to get query field description.
2076
2077 @type fdef: L{objects.QueryFieldDefinition}
2078 @rtype: list
2079
2080 """
2081 return [
2082 fdef.name,
2083 _QFT_NAMES.get(fdef.kind, fdef.kind),
2084 fdef.title,
2085 fdef.doc,
2086 ]
2087
2090 """Generic implementation for listing fields for a resource.
2091
2092 @param resource: One of L{constants.QR_VIA_LUXI}
2093 @type fields: list of strings
2094 @param fields: List of fields to query for
2095 @type separator: string or None
2096 @param separator: String used to separate fields
2097 @type header: bool
2098 @param header: Whether to show header row
2099
2100 """
2101 if cl is None:
2102 cl = GetClient()
2103
2104 if not fields:
2105 fields = None
2106
2107 response = cl.QueryFields(resource, fields)
2108
2109 found_unknown = _WarnUnknownFields(response.fields)
2110
2111 columns = [
2112 TableColumn("Name", str, False),
2113 TableColumn("Type", str, False),
2114 TableColumn("Title", str, False),
2115 TableColumn("Description", str, False),
2116 ]
2117
2118 rows = map(_FieldDescValues, response.fields)
2119
2120 for line in FormatTable(rows, columns, header, separator):
2121 ToStdout(line)
2122
2123 if found_unknown:
2124 return constants.EXIT_UNKNOWN_FIELD
2125
2126 return constants.EXIT_SUCCESS
2127
2130 """Describes a column for L{FormatTable}.
2131
2132 """
2133 - def __init__(self, title, fn, align_right):
2134 """Initializes this class.
2135
2136 @type title: string
2137 @param title: Column title
2138 @type fn: callable
2139 @param fn: Formatting function
2140 @type align_right: bool
2141 @param align_right: Whether to align values on the right-hand side
2142
2143 """
2144 self.title = title
2145 self.format = fn
2146 self.align_right = align_right
2147
2159
2208
2225
2228 """Parse a time specification.
2229
2230 The following suffixed will be recognized:
2231
2232 - s: seconds
2233 - m: minutes
2234 - h: hours
2235 - d: day
2236 - w: weeks
2237
2238 Without any suffix, the value will be taken to be in seconds.
2239
2240 """
2241 value = str(value)
2242 if not value:
2243 raise errors.OpPrereqError("Empty time specification passed",
2244 errors.ECODE_INVAL)
2245 suffix_map = {
2246 "s": 1,
2247 "m": 60,
2248 "h": 3600,
2249 "d": 86400,
2250 "w": 604800,
2251 }
2252 if value[-1] not in suffix_map:
2253 try:
2254 value = int(value)
2255 except (TypeError, ValueError):
2256 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
2257 errors.ECODE_INVAL)
2258 else:
2259 multiplier = suffix_map[value[-1]]
2260 value = value[:-1]
2261 if not value:
2262 raise errors.OpPrereqError("Invalid time specification (only"
2263 " suffix passed)", errors.ECODE_INVAL)
2264 try:
2265 value = int(value) * multiplier
2266 except (TypeError, ValueError):
2267 raise errors.OpPrereqError("Invalid time specification '%s'" % value,
2268 errors.ECODE_INVAL)
2269 return value
2270
2271
2272 -def GetOnlineNodes(nodes, cl=None, nowarn=False, secondary_ips=False,
2273 filter_master=False, nodegroup=None):
2274 """Returns the names of online nodes.
2275
2276 This function will also log a warning on stderr with the names of
2277 the online nodes.
2278
2279 @param nodes: if not empty, use only this subset of nodes (minus the
2280 offline ones)
2281 @param cl: if not None, luxi client to use
2282 @type nowarn: boolean
2283 @param nowarn: by default, this function will output a note with the
2284 offline nodes that are skipped; if this parameter is True the
2285 note is not displayed
2286 @type secondary_ips: boolean
2287 @param secondary_ips: if True, return the secondary IPs instead of the
2288 names, useful for doing network traffic over the replication interface
2289 (if any)
2290 @type filter_master: boolean
2291 @param filter_master: if True, do not return the master node in the list
2292 (useful in coordination with secondary_ips where we cannot check our
2293 node name against the list)
2294 @type nodegroup: string
2295 @param nodegroup: If set, only return nodes in this node group
2296
2297 """
2298 if cl is None:
2299 cl = GetClient()
2300
2301 qfilter = []
2302
2303 if nodes:
2304 qfilter.append(qlang.MakeSimpleFilter("name", nodes))
2305
2306 if nodegroup is not None:
2307 qfilter.append([qlang.OP_OR, [qlang.OP_EQUAL, "group", nodegroup],
2308 [qlang.OP_EQUAL, "group.uuid", nodegroup]])
2309
2310 if filter_master:
2311 qfilter.append([qlang.OP_NOT, [qlang.OP_TRUE, "master"]])
2312
2313 if qfilter:
2314 if len(qfilter) > 1:
2315 final_filter = [qlang.OP_AND] + qfilter
2316 else:
2317 assert len(qfilter) == 1
2318 final_filter = qfilter[0]
2319 else:
2320 final_filter = None
2321
2322 result = cl.Query(constants.QR_NODE, ["name", "offline", "sip"], final_filter)
2323
2324 def _IsOffline(row):
2325 (_, (_, offline), _) = row
2326 return offline
2327
2328 def _GetName(row):
2329 ((_, name), _, _) = row
2330 return name
2331
2332 def _GetSip(row):
2333 (_, _, (_, sip)) = row
2334 return sip
2335
2336 (offline, online) = compat.partition(result.data, _IsOffline)
2337
2338 if offline and not nowarn:
2339 ToStderr("Note: skipping offline node(s): %s" %
2340 utils.CommaJoin(map(_GetName, offline)))
2341
2342 if secondary_ips:
2343 fn = _GetSip
2344 else:
2345 fn = _GetName
2346
2347 return map(fn, online)
2348
2351 """Retrieves SSH ports of given nodes.
2352
2353 @param nodes: the names of nodes
2354 @type nodes: a list of strings
2355 @param cl: a client to use for the query
2356 @type cl: L{ganeti.luxi.Client}
2357 @return: the list of SSH ports corresponding to the nodes
2358 @rtype: a list of tuples
2359
2360 """
2361 return map(lambda t: t[0],
2362 cl.QueryNodes(names=nodes,
2363 fields=["ndp/ssh_port"],
2364 use_locking=False))
2365
2368 """Retrieves the UUIDs of given nodes.
2369
2370 @param nodes: the names of nodes
2371 @type nodes: a list of string
2372 @param cl: a client to use for the query
2373 @type cl: L{ganeti.luxi.Client}
2374 @return: the list of UUIDs corresponding to the nodes
2375 @rtype: a list of tuples
2376
2377 """
2378 return map(lambda t: t[0],
2379 cl.QueryNodes(names=nodes,
2380 fields=["uuid"],
2381 use_locking=False))
2382
2385 """Write a message to a stream, bypassing the logging system
2386
2387 @type stream: file object
2388 @param stream: the file to which we should write
2389 @type txt: str
2390 @param txt: the message
2391
2392 """
2393 try:
2394 if args:
2395 args = tuple(args)
2396 stream.write(txt % args)
2397 else:
2398 stream.write(txt)
2399 stream.write("\n")
2400 stream.flush()
2401 except IOError, err:
2402 if err.errno == errno.EPIPE:
2403
2404 sys.exit(constants.EXIT_FAILURE)
2405 else:
2406 raise
2407
2410 """Write a message to stdout only, bypassing the logging system
2411
2412 This is just a wrapper over _ToStream.
2413
2414 @type txt: str
2415 @param txt: the message
2416
2417 """
2418 _ToStream(sys.stdout, txt, *args)
2419
2422 """Write a message to stdout and additionally log it at INFO level"""
2423 ToStdout(txt, *args)
2424 logging.info(txt, *args)
2425
2428 """Write a message to stderr only, bypassing the logging system
2429
2430 This is just a wrapper over _ToStream.
2431
2432 @type txt: str
2433 @param txt: the message
2434
2435 """
2436 _ToStream(sys.stderr, txt, *args)
2437
2440 """Class which manages the submission and execution of multiple jobs.
2441
2442 Note that instances of this class should not be reused between
2443 GetResults() calls.
2444
2445 """
2446 - def __init__(self, cl=None, verbose=True, opts=None, feedback_fn=None):
2447 self.queue = []
2448 if cl is None:
2449 cl = GetClient()
2450 self.cl = cl
2451 self.verbose = verbose
2452 self.jobs = []
2453 self.opts = opts
2454 self.feedback_fn = feedback_fn
2455 self._counter = itertools.count()
2456
2457 @staticmethod
2459 """Helper function for formatting name.
2460
2461 """
2462 if name:
2463 return fmt % name
2464
2465 return ""
2466
2468 """Record a job for later submit.
2469
2470 @type name: string
2471 @param name: a description of the job, will be used in WaitJobSet
2472
2473 """
2474 SetGenericOpcodeOpts(ops, self.opts)
2475 self.queue.append((self._counter.next(), name, ops))
2476
2477 - def AddJobId(self, name, status, job_id):
2478 """Adds a job ID to the internal queue.
2479
2480 """
2481 self.jobs.append((self._counter.next(), status, job_id, name))
2482
2484 """Submit all pending jobs.
2485
2486 """
2487 if each:
2488 results = []
2489 for (_, _, ops) in self.queue:
2490
2491
2492 results.append([True, self.cl.SubmitJob(ops)[0]])
2493 else:
2494 results = self.cl.SubmitManyJobs([ops for (_, _, ops) in self.queue])
2495 for ((status, data), (idx, name, _)) in zip(results, self.queue):
2496 self.jobs.append((idx, status, data, name))
2497
2499 """Choose a non-waiting/queued job to poll next.
2500
2501 """
2502 assert self.jobs, "_ChooseJob called with empty job list"
2503
2504 result = self.cl.QueryJobs([i[2] for i in self.jobs[:_CHOOSE_BATCH]],
2505 ["status"])
2506 assert result
2507
2508 for job_data, status in zip(self.jobs, result):
2509 if (isinstance(status, list) and status and
2510 status[0] in (constants.JOB_STATUS_QUEUED,
2511 constants.JOB_STATUS_WAITING,
2512 constants.JOB_STATUS_CANCELING)):
2513
2514 continue
2515
2516 self.jobs.remove(job_data)
2517 return job_data
2518
2519
2520 return self.jobs.pop(0)
2521
2523 """Wait for and return the results of all jobs.
2524
2525 @rtype: list
2526 @return: list of tuples (success, job results), in the same order
2527 as the submitted jobs; if a job has failed, instead of the result
2528 there will be the error message
2529
2530 """
2531 if not self.jobs:
2532 self.SubmitPending()
2533 results = []
2534 if self.verbose:
2535 ok_jobs = [row[2] for row in self.jobs if row[1]]
2536 if ok_jobs:
2537 ToStdout("Submitted jobs %s", utils.CommaJoin(ok_jobs))
2538
2539
2540 self.jobs, failures = compat.partition(self.jobs, lambda x: x[1])
2541 for idx, _, jid, name in failures:
2542 ToStderr("Failed to submit job%s: %s", self._IfName(name, " for %s"), jid)
2543 results.append((idx, False, jid))
2544
2545 while self.jobs:
2546 (idx, _, jid, name) = self._ChooseJob()
2547 ToStdout("Waiting for job %s%s ...", jid, self._IfName(name, " for %s"))
2548 try:
2549 job_result = PollJob(jid, cl=self.cl, feedback_fn=self.feedback_fn)
2550 success = True
2551 except errors.JobLost, err:
2552 _, job_result = FormatError(err)
2553 ToStderr("Job %s%s has been archived, cannot check its result",
2554 jid, self._IfName(name, " for %s"))
2555 success = False
2556 except (errors.GenericError, rpcerr.ProtocolError), err:
2557 _, job_result = FormatError(err)
2558 success = False
2559
2560 ToStderr("Job %s%s has failed: %s",
2561 jid, self._IfName(name, " for %s"), job_result)
2562
2563 results.append((idx, success, job_result))
2564
2565
2566 results.sort()
2567 results = [i[1:] for i in results]
2568
2569 return results
2570
2572 """Wait for job results or only print the job IDs.
2573
2574 @type wait: boolean
2575 @param wait: whether to wait or not
2576
2577 """
2578 if wait:
2579 return self.GetResults()
2580 else:
2581 if not self.jobs:
2582 self.SubmitPending()
2583 for _, status, result, name in self.jobs:
2584 if status:
2585 ToStdout("%s: %s", result, name)
2586 else:
2587 ToStderr("Failure for %s: %s", name, result)
2588 return [row[1:3] for row in self.jobs]
2589
2611
2619
2678
2681 values = ("%s=%s" % (par, val) for (par, val) in sorted(specs.items()))
2682 buf.write(",".join(values))
2683
2686 """Print the command option used to generate the given instance policy.
2687
2688 Currently only the parts dealing with specs are supported.
2689
2690 @type buf: StringIO
2691 @param buf: stream to write into
2692 @type ipolicy: dict
2693 @param ipolicy: instance policy
2694 @type isgroup: bool
2695 @param isgroup: whether the policy is at group level
2696
2697 """
2698 if not isgroup:
2699 stdspecs = ipolicy.get("std")
2700 if stdspecs:
2701 buf.write(" %s " % IPOLICY_STD_SPECS_STR)
2702 _PrintSpecsParameters(buf, stdspecs)
2703 minmaxes = ipolicy.get("minmax", [])
2704 first = True
2705 for minmax in minmaxes:
2706 minspecs = minmax.get("min")
2707 maxspecs = minmax.get("max")
2708 if minspecs and maxspecs:
2709 if first:
2710 buf.write(" %s " % IPOLICY_BOUNDS_SPECS_STR)
2711 first = False
2712 else:
2713 buf.write("//")
2714 buf.write("min:")
2715 _PrintSpecsParameters(buf, minspecs)
2716 buf.write("/max:")
2717 _PrintSpecsParameters(buf, maxspecs)
2718
2721 """Ask the user to confirm an operation on a list of list_type.
2722
2723 This function is used to request confirmation for doing an operation
2724 on a given list of list_type.
2725
2726 @type names: list
2727 @param names: the list of names that we display when
2728 we ask for confirmation
2729 @type list_type: str
2730 @param list_type: Human readable name for elements in the list (e.g. nodes)
2731 @type text: str
2732 @param text: the operation that the user should confirm
2733 @rtype: boolean
2734 @return: True or False depending on user's confirmation.
2735
2736 """
2737 count = len(names)
2738 msg = ("The %s will operate on %d %s.\n%s"
2739 "Do you want to continue?" % (text, count, list_type, extra))
2740 affected = (("\nAffected %s:\n" % list_type) +
2741 "\n".join([" %s" % name for name in names]))
2742
2743 choices = [("y", True, "Yes, execute the %s" % text),
2744 ("n", False, "No, abort the %s" % text)]
2745
2746 if count > 20:
2747 choices.insert(1, ("v", "v", "View the list of affected %s" % list_type))
2748 question = msg
2749 else:
2750 question = msg + affected
2751
2752 choice = AskUser(question, choices)
2753 if choice == "v":
2754 choices.pop(1)
2755 choice = AskUser(msg + affected, choices)
2756 return choice
2757
2760 """Parses and returns an array of potential values with units.
2761
2762 """
2763 parsed = {}
2764 for k, v in elements.items():
2765 if v == constants.VALUE_DEFAULT:
2766 parsed[k] = v
2767 else:
2768 parsed[k] = utils.ParseUnit(v)
2769 return parsed
2770
2771
2772 -def _InitISpecsFromSplitOpts(ipolicy, ispecs_mem_size, ispecs_cpu_count,
2773 ispecs_disk_count, ispecs_disk_size,
2774 ispecs_nic_count, group_ipolicy, fill_all):
2775 try:
2776 if ispecs_mem_size:
2777 ispecs_mem_size = _MaybeParseUnit(ispecs_mem_size)
2778 if ispecs_disk_size:
2779 ispecs_disk_size = _MaybeParseUnit(ispecs_disk_size)
2780 except (TypeError, ValueError, errors.UnitParseError), err:
2781 raise errors.OpPrereqError("Invalid disk (%s) or memory (%s) size"
2782 " in policy: %s" %
2783 (ispecs_disk_size, ispecs_mem_size, err),
2784 errors.ECODE_INVAL)
2785
2786
2787 ispecs_transposed = {
2788 constants.ISPEC_MEM_SIZE: ispecs_mem_size,
2789 constants.ISPEC_CPU_COUNT: ispecs_cpu_count,
2790 constants.ISPEC_DISK_COUNT: ispecs_disk_count,
2791 constants.ISPEC_DISK_SIZE: ispecs_disk_size,
2792 constants.ISPEC_NIC_COUNT: ispecs_nic_count,
2793 }
2794
2795
2796 if group_ipolicy:
2797 forced_type = TISPECS_GROUP_TYPES
2798 else:
2799 forced_type = TISPECS_CLUSTER_TYPES
2800 for specs in ispecs_transposed.values():
2801 assert type(specs) is dict
2802 utils.ForceDictType(specs, forced_type)
2803
2804
2805 ispecs = {
2806 constants.ISPECS_MIN: {},
2807 constants.ISPECS_MAX: {},
2808 constants.ISPECS_STD: {},
2809 }
2810 for (name, specs) in ispecs_transposed.iteritems():
2811 assert name in constants.ISPECS_PARAMETERS
2812 for key, val in specs.items():
2813 assert key in ispecs
2814 ispecs[key][name] = val
2815 minmax_out = {}
2816 for key in constants.ISPECS_MINMAX_KEYS:
2817 if fill_all:
2818 minmax_out[key] = \
2819 objects.FillDict(constants.ISPECS_MINMAX_DEFAULTS[key], ispecs[key])
2820 else:
2821 minmax_out[key] = ispecs[key]
2822 ipolicy[constants.ISPECS_MINMAX] = [minmax_out]
2823 if fill_all:
2824 ipolicy[constants.ISPECS_STD] = \
2825 objects.FillDict(constants.IPOLICY_DEFAULTS[constants.ISPECS_STD],
2826 ispecs[constants.ISPECS_STD])
2827 else:
2828 ipolicy[constants.ISPECS_STD] = ispecs[constants.ISPECS_STD]
2829
2842
2853
2856 ret = None
2857 if (minmax_ispecs and allowed_values and len(minmax_ispecs) == 1 and
2858 len(minmax_ispecs[0]) == 1):
2859 for (key, spec) in minmax_ispecs[0].items():
2860
2861 if key in allowed_values and not spec:
2862 ret = key
2863 return ret
2864
2885
2886
2887 -def CreateIPolicyFromOpts(ispecs_mem_size=None,
2888 ispecs_cpu_count=None,
2889 ispecs_disk_count=None,
2890 ispecs_disk_size=None,
2891 ispecs_nic_count=None,
2892 minmax_ispecs=None,
2893 std_ispecs=None,
2894 ipolicy_disk_templates=None,
2895 ipolicy_vcpu_ratio=None,
2896 ipolicy_spindle_ratio=None,
2897 group_ipolicy=False,
2898 allowed_values=None,
2899 fill_all=False):
2900 """Creation of instance policy based on command line options.
2901
2902 @param fill_all: whether for cluster policies we should ensure that
2903 all values are filled
2904
2905 """
2906 assert not (fill_all and allowed_values)
2907
2908 split_specs = (ispecs_mem_size or ispecs_cpu_count or ispecs_disk_count or
2909 ispecs_disk_size or ispecs_nic_count)
2910 if (split_specs and (minmax_ispecs is not None or std_ispecs is not None)):
2911 raise errors.OpPrereqError("A --specs-xxx option cannot be specified"
2912 " together with any --ipolicy-xxx-specs option",
2913 errors.ECODE_INVAL)
2914
2915 ipolicy_out = objects.MakeEmptyIPolicy()
2916 if split_specs:
2917 assert fill_all
2918 _InitISpecsFromSplitOpts(ipolicy_out, ispecs_mem_size, ispecs_cpu_count,
2919 ispecs_disk_count, ispecs_disk_size,
2920 ispecs_nic_count, group_ipolicy, fill_all)
2921 elif (minmax_ispecs is not None or std_ispecs is not None):
2922 _InitISpecsFromFullOpts(ipolicy_out, minmax_ispecs, std_ispecs,
2923 group_ipolicy, allowed_values)
2924
2925 if ipolicy_disk_templates is not None:
2926 if allowed_values and ipolicy_disk_templates in allowed_values:
2927 ipolicy_out[constants.IPOLICY_DTS] = ipolicy_disk_templates
2928 else:
2929 ipolicy_out[constants.IPOLICY_DTS] = list(ipolicy_disk_templates)
2930 if ipolicy_vcpu_ratio is not None:
2931 ipolicy_out[constants.IPOLICY_VCPU_RATIO] = ipolicy_vcpu_ratio
2932 if ipolicy_spindle_ratio is not None:
2933 ipolicy_out[constants.IPOLICY_SPINDLE_RATIO] = ipolicy_spindle_ratio
2934
2935 assert not (frozenset(ipolicy_out.keys()) - constants.IPOLICY_ALL_KEYS)
2936
2937 if not group_ipolicy and fill_all:
2938 ipolicy_out = objects.FillIPolicy(constants.IPOLICY_DEFAULTS, ipolicy_out)
2939
2940 return ipolicy_out
2941
2944 """ Checks whether the input is not a container data type.
2945
2946 @rtype: bool
2947
2948 """
2949 return not (isinstance(data, (list, dict, tuple)))
2950
2953 """ Returns info about alignment if present in an encoded ordered dictionary.
2954
2955 @type data: list of tuple
2956 @param data: The encoded ordered dictionary, as defined in
2957 L{_SerializeGenericInfo}.
2958 @rtype: dict of any to int
2959 @return: The dictionary mapping alignment groups to the maximum length of the
2960 dictionary key found in the group.
2961
2962 """
2963 alignment_map = {}
2964 for entry in data:
2965 if len(entry) > 2:
2966 group_key = entry[2]
2967 key_length = len(entry[0])
2968 if group_key in alignment_map:
2969 alignment_map[group_key] = max(alignment_map[group_key], key_length)
2970 else:
2971 alignment_map[group_key] = key_length
2972
2973 return alignment_map
2974
2977 """Formatting core of L{PrintGenericInfo}.
2978
2979 @param buf: (string) stream to accumulate the result into
2980 @param data: data to format
2981 @type level: int
2982 @param level: depth in the data hierarchy, used for indenting
2983 @type afterkey: bool
2984 @param afterkey: True when we are in the middle of a line after a key (used
2985 to properly add newlines or indentation)
2986
2987 """
2988 baseind = " "
2989 if isinstance(data, dict):
2990 if not data:
2991 buf.write("\n")
2992 else:
2993 if afterkey:
2994 buf.write("\n")
2995 doindent = True
2996 else:
2997 doindent = False
2998 for key in sorted(data):
2999 if doindent:
3000 buf.write(baseind * level)
3001 else:
3002 doindent = True
3003 buf.write(key)
3004 buf.write(": ")
3005 _SerializeGenericInfo(buf, data[key], level + 1, afterkey=True)
3006 elif isinstance(data, list) and len(data) > 0 and isinstance(data[0], tuple):
3007
3008
3009
3010 if afterkey:
3011 buf.write("\n")
3012 doindent = True
3013 else:
3014 doindent = False
3015
3016 alignment_mapping = _GetAlignmentMapping(data)
3017 for entry in data:
3018 key, val = entry[0:2]
3019 if doindent:
3020 buf.write(baseind * level)
3021 else:
3022 doindent = True
3023 buf.write(key)
3024 buf.write(": ")
3025 if len(entry) > 2:
3026 max_key_length = alignment_mapping[entry[2]]
3027 buf.write(" " * (max_key_length - len(key)))
3028 _SerializeGenericInfo(buf, val, level + 1, afterkey=True)
3029 elif isinstance(data, tuple) and all(map(_NotAContainer, data)):
3030
3031 buf.write("[%s]\n" % utils.CommaJoin(data))
3032 elif isinstance(data, list) or isinstance(data, tuple):
3033
3034 if not data:
3035 buf.write("\n")
3036 else:
3037 if afterkey:
3038 buf.write("\n")
3039 doindent = True
3040 else:
3041 doindent = False
3042 for item in data:
3043 if doindent:
3044 buf.write(baseind * level)
3045 else:
3046 doindent = True
3047 buf.write("-")
3048 buf.write(baseind[1:])
3049 _SerializeGenericInfo(buf, item, level + 1)
3050 else:
3051
3052
3053 buf.write(str(data))
3054 buf.write("\n")
3055
3058 """Print information formatted according to the hierarchy.
3059
3060 The output is a valid YAML string.
3061
3062 @param data: the data to print. It's a hierarchical structure whose elements
3063 can be:
3064 - dictionaries, where keys are strings and values are of any of the
3065 types listed here
3066 - lists of tuples (key, value) or (key, value, alignment_group), where
3067 key is a string, value is of any of the types listed here, and
3068 alignment_group can be any hashable value; it's a way to encode
3069 ordered dictionaries; any entries sharing the same alignment group are
3070 aligned by appending whitespace before the value as needed
3071 - lists of any of the types listed here
3072 - strings
3073
3074 """
3075 buf = StringIO()
3076 _SerializeGenericInfo(buf, data, 0)
3077 ToStdout(buf.getvalue().rstrip("\n"))
3078