1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 """Remote API version 2 baserlib.library.
23
24 PUT or POST?
25 ============
26
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
30
31 To be in context of this module for instance creation POST on
32 /2/instances is legitim while PUT would be not, due to it does create a
33 new entity and not just replace /2/instances with it.
34
35 So when adding new methods, if they are operating on the URI entity itself,
36 PUT should be prefered over POST.
37
38 """
39
40
41
42
43
44 from ganeti import opcodes
45 from ganeti import http
46 from ganeti import constants
47 from ganeti import cli
48 from ganeti import utils
49 from ganeti import rapi
50 from ganeti.rapi import baserlib
51
52
53 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
54 I_FIELDS = ["name", "admin_state", "os",
55 "pnode", "snodes",
56 "disk_template",
57 "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
58 "network_port",
59 "disk.sizes", "disk_usage",
60 "beparams", "hvparams",
61 "oper_state", "oper_ram", "oper_vcpus", "status",
62 "custom_hvparams", "custom_beparams", "custom_nicparams",
63 ] + _COMMON_FIELDS
64
65 N_FIELDS = ["name", "offline", "master_candidate", "drained",
66 "dtotal", "dfree",
67 "mtotal", "mnode", "mfree",
68 "pinst_cnt", "sinst_cnt",
69 "ctotal", "cnodes", "csockets",
70 "pip", "sip", "role",
71 "pinst_list", "sinst_list",
72 "master_capable", "vm_capable",
73 "group.uuid",
74 ] + _COMMON_FIELDS
75
76 G_FIELDS = ["name", "uuid",
77 "alloc_policy",
78 "node_cnt", "node_list",
79 "ctime", "mtime", "serial_no",
80 ]
81
82 _NR_DRAINED = "drained"
83 _NR_MASTER_CANDIATE = "master-candidate"
84 _NR_MASTER = "master"
85 _NR_OFFLINE = "offline"
86 _NR_REGULAR = "regular"
87
88 _NR_MAP = {
89 "M": _NR_MASTER,
90 "C": _NR_MASTER_CANDIATE,
91 "D": _NR_DRAINED,
92 "O": _NR_OFFLINE,
93 "R": _NR_REGULAR,
94 }
95
96
97 _REQ_DATA_VERSION = "__version__"
98
99
100 _INST_CREATE_REQV1 = "instance-create-reqv1"
101
102
103 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
104
105
106 _WFJC_TIMEOUT = 10
110 """/version resource.
111
112 This resource should be used to determine the remote API version and
113 to adapt clients accordingly.
114
115 """
116 @staticmethod
122
125 """/2/info resource.
126
127 """
128 @staticmethod
135
138 """/2/features resource.
139
140 """
141 @staticmethod
147
148
149 -class R_2_os(baserlib.R_Generic):
150 """/2/os resource.
151
152 """
153 @staticmethod
155 """Return a list of all OSes.
156
157 Can return error 500 in case of a problem.
158
159 Example: ["debian-etch"]
160
161 """
162 cl = baserlib.GetClient()
163 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
164 job_id = baserlib.SubmitJob([op], cl)
165
166 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
167 diagnose_data = result[0]
168
169 if not isinstance(diagnose_data, list):
170 raise http.HttpBadGateway(message="Can't get OS list")
171
172 os_names = []
173 for (name, variants) in diagnose_data:
174 os_names.extend(cli.CalculateOSNames(name, variants))
175
176 return os_names
177
180 """/2/redistribute-config resource.
181
182 """
183 @staticmethod
189
192 """/2/modify resource.
193
194 """
205
208 """/2/jobs resource.
209
210 """
211 @staticmethod
213 """Returns a dictionary of jobs.
214
215 @return: a dictionary with jobs id and uri.
216
217 """
218 fields = ["id"]
219 cl = baserlib.GetClient()
220
221 result = [job_id for [job_id] in cl.QueryJobs(None, fields)]
222 return baserlib.BuildUriList(result, "/2/jobs/%s",
223 uri_fields=("id", "uri"))
224
227 """/2/jobs/[job_id] resource.
228
229 """
231 """Returns a job status.
232
233 @return: a dictionary with job parameters.
234 The result includes:
235 - id: job ID as a number
236 - status: current job status as a string
237 - ops: involved OpCodes as a list of dictionaries for each
238 opcodes in the job
239 - opstatus: OpCodes status as a list
240 - opresult: OpCodes results as a list of lists
241
242 """
243 fields = ["id", "ops", "status", "summary",
244 "opstatus", "opresult", "oplog",
245 "received_ts", "start_ts", "end_ts",
246 ]
247 job_id = self.items[0]
248 result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0]
249 if result is None:
250 raise http.HttpNotFound()
251 return baserlib.MapFields(fields, result)
252
254 """Cancel not-yet-started job.
255
256 """
257 job_id = self.items[0]
258 result = baserlib.GetClient().CancelJob(job_id)
259 return result
260
263 """/2/jobs/[job_id]/wait resource.
264
265 """
266
267
268 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
269
271 """Waits for job changes.
272
273 """
274 job_id = self.items[0]
275
276 fields = self.getBodyParameter("fields")
277 prev_job_info = self.getBodyParameter("previous_job_info", None)
278 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
279
280 if not isinstance(fields, list):
281 raise http.HttpBadRequest("The 'fields' parameter should be a list")
282
283 if not (prev_job_info is None or isinstance(prev_job_info, list)):
284 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
285 " be a list")
286
287 if not (prev_log_serial is None or
288 isinstance(prev_log_serial, (int, long))):
289 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
290 " be a number")
291
292 client = baserlib.GetClient()
293 result = client.WaitForJobChangeOnce(job_id, fields,
294 prev_job_info, prev_log_serial,
295 timeout=_WFJC_TIMEOUT)
296 if not result:
297 raise http.HttpNotFound()
298
299 if result == constants.JOB_NOTCHANGED:
300
301 return None
302
303 (job_info, log_entries) = result
304
305 return {
306 "job_info": job_info,
307 "log_entries": log_entries,
308 }
309
312 """/2/nodes resource.
313
314 """
329
332 """/2/nodes/[node_name] resource.
333
334 """
347
350 """ /2/nodes/[node_name]/role resource.
351
352 """
365
367 """Sets the node role.
368
369 @return: a job id
370
371 """
372 if not isinstance(self.request_body, basestring):
373 raise http.HttpBadRequest("Invalid body contents, not a string")
374
375 node_name = self.items[0]
376 role = self.request_body
377
378 if role == _NR_REGULAR:
379 candidate = False
380 offline = False
381 drained = False
382
383 elif role == _NR_MASTER_CANDIATE:
384 candidate = True
385 offline = drained = None
386
387 elif role == _NR_DRAINED:
388 drained = True
389 candidate = offline = None
390
391 elif role == _NR_OFFLINE:
392 offline = True
393 candidate = drained = None
394
395 else:
396 raise http.HttpBadRequest("Can't set '%s' role" % role)
397
398 op = opcodes.OpNodeSetParams(node_name=node_name,
399 master_candidate=candidate,
400 offline=offline,
401 drained=drained,
402 force=bool(self.useForce()))
403
404 return baserlib.SubmitJob([op])
405
408 """/2/nodes/[node_name]/evacuate resource.
409
410 """
412 """Evacuate all secondary instances off a node.
413
414 """
415 node_name = self.items[0]
416 remote_node = self._checkStringVariable("remote_node", default=None)
417 iallocator = self._checkStringVariable("iallocator", default=None)
418 early_r = bool(self._checkIntVariable("early_release", default=0))
419 dry_run = bool(self.dryRun())
420
421 cl = baserlib.GetClient()
422
423 op = opcodes.OpNodeEvacStrategy(nodes=[node_name],
424 iallocator=iallocator,
425 remote_node=remote_node)
426
427 job_id = baserlib.SubmitJob([op], cl)
428
429 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
430
431 jobs = []
432 for iname, node in result[0]:
433 if dry_run:
434 jid = None
435 else:
436 op = opcodes.OpInstanceReplaceDisks(instance_name=iname,
437 remote_node=node, disks=[],
438 mode=constants.REPLACE_DISK_CHG,
439 early_release=early_r)
440 jid = baserlib.SubmitJob([op])
441 jobs.append((jid, iname, node))
442
443 return jobs
444
470
496
499 """/2/nodes/[node_name]/storage/modify resource.
500
501 """
526
529 """/2/nodes/[node_name]/storage/repair resource.
530
531 """
549
552 """Parses a request for creating a node group.
553
554 @rtype: L{opcodes.OpGroupAdd}
555 @return: Group creation opcode
556
557 """
558 group_name = baserlib.CheckParameter(data, "name")
559 alloc_policy = baserlib.CheckParameter(data, "alloc_policy", default=None)
560
561 return opcodes.OpGroupAdd(group_name=group_name,
562 alloc_policy=alloc_policy,
563 dry_run=dry_run)
564
594
597 """/2/groups/[group_name] resource.
598
599 """
612
621
624 """Parses a request for modifying a node group.
625
626 @rtype: L{opcodes.OpGroupSetParams}
627 @return: Group modify opcode
628
629 """
630 alloc_policy = baserlib.CheckParameter(data, "alloc_policy", default=None)
631 return opcodes.OpGroupSetParams(group_name=name, alloc_policy=alloc_policy)
632
635 """/2/groups/[group_name]/modify resource.
636
637 """
649
652 """Parses a request for renaming a node group.
653
654 @type name: string
655 @param name: name of the node group to rename
656 @type data: dict
657 @param data: the body received by the rename request
658 @type dry_run: bool
659 @param dry_run: whether to perform a dry run
660
661 @rtype: L{opcodes.OpGroupRename}
662 @return: Node group rename opcode
663
664 """
665 old_name = name
666 new_name = baserlib.CheckParameter(data, "new_name")
667
668 return opcodes.OpGroupRename(old_name=old_name, new_name=new_name,
669 dry_run=dry_run)
670
673 """/2/groups/[group_name]/rename resource.
674
675 """
686
689 """/2/groups/[group_name]/assign-nodes resource.
690
691 """
705
708 """Parses an instance creation request version 1.
709
710 @rtype: L{opcodes.OpInstanceCreate}
711 @return: Instance creation opcode
712
713 """
714
715 disks_input = baserlib.CheckParameter(data, "disks", exptype=list)
716
717 disks = []
718 for idx, i in enumerate(disks_input):
719 baserlib.CheckType(i, dict, "Disk %d specification" % idx)
720
721
722 try:
723 size = i[constants.IDISK_SIZE]
724 except KeyError:
725 raise http.HttpBadRequest("Disk %d specification wrong: missing disk"
726 " size" % idx)
727
728 disk = {
729 constants.IDISK_SIZE: size,
730 }
731
732
733 try:
734 disk_access = i[constants.IDISK_MODE]
735 except KeyError:
736 pass
737 else:
738 disk[constants.IDISK_MODE] = disk_access
739
740 disks.append(disk)
741
742 assert len(disks_input) == len(disks)
743
744
745 nics_input = baserlib.CheckParameter(data, "nics", exptype=list)
746
747 nics = []
748 for idx, i in enumerate(nics_input):
749 baserlib.CheckType(i, dict, "NIC %d specification" % idx)
750
751 nic = {}
752
753 for field in constants.INIC_PARAMS:
754 try:
755 value = i[field]
756 except KeyError:
757 continue
758
759 nic[field] = value
760
761 nics.append(nic)
762
763 assert len(nics_input) == len(nics)
764
765
766 hvparams = baserlib.CheckParameter(data, "hvparams", default={})
767 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES)
768
769 beparams = baserlib.CheckParameter(data, "beparams", default={})
770 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES)
771
772 return opcodes.OpInstanceCreate(
773 mode=baserlib.CheckParameter(data, "mode"),
774 instance_name=baserlib.CheckParameter(data, "name"),
775 os_type=baserlib.CheckParameter(data, "os"),
776 osparams=baserlib.CheckParameter(data, "osparams", default={}),
777 force_variant=baserlib.CheckParameter(data, "force_variant",
778 default=False),
779 no_install=baserlib.CheckParameter(data, "no_install", default=False),
780 pnode=baserlib.CheckParameter(data, "pnode", default=None),
781 snode=baserlib.CheckParameter(data, "snode", default=None),
782 disk_template=baserlib.CheckParameter(data, "disk_template"),
783 disks=disks,
784 nics=nics,
785 src_node=baserlib.CheckParameter(data, "src_node", default=None),
786 src_path=baserlib.CheckParameter(data, "src_path", default=None),
787 start=baserlib.CheckParameter(data, "start", default=True),
788 wait_for_sync=True,
789 ip_check=baserlib.CheckParameter(data, "ip_check", default=True),
790 name_check=baserlib.CheckParameter(data, "name_check", default=True),
791 file_storage_dir=baserlib.CheckParameter(data, "file_storage_dir",
792 default=None),
793 file_driver=baserlib.CheckParameter(data, "file_driver",
794 default=constants.FD_LOOP),
795 source_handshake=baserlib.CheckParameter(data, "source_handshake",
796 default=None),
797 source_x509_ca=baserlib.CheckParameter(data, "source_x509_ca",
798 default=None),
799 source_instance_name=baserlib.CheckParameter(data, "source_instance_name",
800 default=None),
801 iallocator=baserlib.CheckParameter(data, "iallocator", default=None),
802 hypervisor=baserlib.CheckParameter(data, "hypervisor", default=None),
803 hvparams=hvparams,
804 beparams=beparams,
805 dry_run=dry_run,
806 )
807
810 """/2/instances resource.
811
812 """
828
830 """Parses an instance creation request version 0.
831
832 Request data version 0 is deprecated and should not be used anymore.
833
834 @rtype: L{opcodes.OpInstanceCreate}
835 @return: Instance creation opcode
836
837 """
838
839 beparams = baserlib.MakeParamsDict(self.request_body,
840 constants.BES_PARAMETERS)
841 hvparams = baserlib.MakeParamsDict(self.request_body,
842 constants.HVS_PARAMETERS)
843 fn = self.getBodyParameter
844
845
846 disk_data = fn('disks')
847 if not isinstance(disk_data, list):
848 raise http.HttpBadRequest("The 'disks' parameter should be a list")
849 disks = []
850 for idx, d in enumerate(disk_data):
851 if not isinstance(d, int):
852 raise http.HttpBadRequest("Disk %d specification wrong: should"
853 " be an integer" % idx)
854 disks.append({"size": d})
855
856
857 nics = [{"mac": fn("mac", constants.VALUE_AUTO)}]
858 if fn("ip", None) is not None:
859 nics[0]["ip"] = fn("ip")
860 if fn("mode", None) is not None:
861 nics[0]["mode"] = fn("mode")
862 if fn("link", None) is not None:
863 nics[0]["link"] = fn("link")
864 if fn("bridge", None) is not None:
865 nics[0]["bridge"] = fn("bridge")
866
867
868 return opcodes.OpInstanceCreate(
869 mode=constants.INSTANCE_CREATE,
870 instance_name=fn('name'),
871 disks=disks,
872 disk_template=fn('disk_template'),
873 os_type=fn('os'),
874 pnode=fn('pnode', None),
875 snode=fn('snode', None),
876 iallocator=fn('iallocator', None),
877 nics=nics,
878 start=fn('start', True),
879 ip_check=fn('ip_check', True),
880 name_check=fn('name_check', True),
881 wait_for_sync=True,
882 hypervisor=fn('hypervisor', None),
883 hvparams=hvparams,
884 beparams=beparams,
885 file_storage_dir=fn('file_storage_dir', None),
886 file_driver=fn('file_driver', constants.FD_LOOP),
887 dry_run=bool(self.dryRun()),
888 )
889
891 """Create an instance.
892
893 @return: a job id
894
895 """
896 if not isinstance(self.request_body, dict):
897 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
898
899
900 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
901
902 if data_version == 0:
903 op = self._ParseVersion0CreateRequest()
904 elif data_version == 1:
905 op = _ParseInstanceCreateRequestVersion1(self.request_body,
906 self.dryRun())
907 else:
908 raise http.HttpBadRequest("Unsupported request data version %s" %
909 data_version)
910
911 return baserlib.SubmitJob([op])
912
915 """/2/instances/[instance_name] resource.
916
917 """
931
940
943 """/2/instances/[instance_name]/info resource.
944
945 """
956
959 """/2/instances/[instance_name]/reboot resource.
960
961 Implements an instance reboot.
962
963 """
965 """Reboot an instance.
966
967 The URI takes type=[hard|soft|full] and
968 ignore_secondaries=[False|True] parameters.
969
970 """
971 instance_name = self.items[0]
972 reboot_type = self.queryargs.get('type',
973 [constants.INSTANCE_REBOOT_HARD])[0]
974 ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries'))
975 op = opcodes.OpInstanceReboot(instance_name=instance_name,
976 reboot_type=reboot_type,
977 ignore_secondaries=ignore_secondaries,
978 dry_run=bool(self.dryRun()))
979
980 return baserlib.SubmitJob([op])
981
984 """/2/instances/[instance_name]/startup resource.
985
986 Implements an instance startup.
987
988 """
990 """Startup an instance.
991
992 The URI takes force=[False|True] parameter to start the instance
993 if even if secondary disks are failing.
994
995 """
996 instance_name = self.items[0]
997 force_startup = bool(self._checkIntVariable('force'))
998 no_remember = bool(self._checkIntVariable('no_remember'))
999 op = opcodes.OpInstanceStartup(instance_name=instance_name,
1000 force=force_startup,
1001 dry_run=bool(self.dryRun()),
1002 no_remember=no_remember)
1003
1004 return baserlib.SubmitJob([op])
1005
1008 """/2/instances/[instance_name]/shutdown resource.
1009
1010 Implements an instance shutdown.
1011
1012 """
1024
1027 """Parses a request for reinstalling an instance.
1028
1029 """
1030 if not isinstance(data, dict):
1031 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1032
1033 ostype = baserlib.CheckParameter(data, "os", default=None)
1034 start = baserlib.CheckParameter(data, "start", exptype=bool,
1035 default=True)
1036 osparams = baserlib.CheckParameter(data, "osparams", default=None)
1037
1038 ops = [
1039 opcodes.OpInstanceShutdown(instance_name=name),
1040 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
1041 osparams=osparams),
1042 ]
1043
1044 if start:
1045 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
1046
1047 return ops
1048
1051 """/2/instances/[instance_name]/reinstall resource.
1052
1053 Implements an instance reinstall.
1054
1055 """
1057 """Reinstall an instance.
1058
1059 The URI takes os=name and nostartup=[0|1] optional
1060 parameters. By default, the instance will be started
1061 automatically.
1062
1063 """
1064 if self.request_body:
1065 if self.queryargs:
1066 raise http.HttpBadRequest("Can't combine query and body parameters")
1067
1068 body = self.request_body
1069 elif self.queryargs:
1070
1071 body = {
1072 "os": self._checkStringVariable("os"),
1073 "start": not self._checkIntVariable("nostartup"),
1074 }
1075 else:
1076 body = {}
1077
1078 ops = _ParseInstanceReinstallRequest(self.items[0], body)
1079
1080 return baserlib.SubmitJob(ops)
1081
1084 """/2/instances/[instance_name]/replace-disks resource.
1085
1086 """
1088 """Replaces disks on an instance.
1089
1090 """
1091 instance_name = self.items[0]
1092 remote_node = self._checkStringVariable("remote_node", default=None)
1093 mode = self._checkStringVariable("mode", default=None)
1094 raw_disks = self._checkStringVariable("disks", default=None)
1095 iallocator = self._checkStringVariable("iallocator", default=None)
1096
1097 if raw_disks:
1098 try:
1099 disks = [int(part) for part in raw_disks.split(",")]
1100 except ValueError, err:
1101 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
1102 else:
1103 disks = []
1104
1105 op = opcodes.OpInstanceReplaceDisks(instance_name=instance_name,
1106 remote_node=remote_node,
1107 mode=mode,
1108 disks=disks,
1109 iallocator=iallocator)
1110
1111 return baserlib.SubmitJob([op])
1112
1115 """/2/instances/[instance_name]/activate-disks resource.
1116
1117 """
1119 """Activate disks for an instance.
1120
1121 The URI might contain ignore_size to ignore current recorded size.
1122
1123 """
1124 instance_name = self.items[0]
1125 ignore_size = bool(self._checkIntVariable('ignore_size'))
1126
1127 op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
1128 ignore_size=ignore_size)
1129
1130 return baserlib.SubmitJob([op])
1131
1134 """/2/instances/[instance_name]/deactivate-disks resource.
1135
1136 """
1146
1149 """/2/instances/[instance_name]/prepare-export resource.
1150
1151 """
1165
1168 """Parses a request for an instance export.
1169
1170 @rtype: L{opcodes.OpBackupExport}
1171 @return: Instance export opcode
1172
1173 """
1174 mode = baserlib.CheckParameter(data, "mode",
1175 default=constants.EXPORT_MODE_LOCAL)
1176 target_node = baserlib.CheckParameter(data, "destination")
1177 shutdown = baserlib.CheckParameter(data, "shutdown", exptype=bool)
1178 remove_instance = baserlib.CheckParameter(data, "remove_instance",
1179 exptype=bool, default=False)
1180 x509_key_name = baserlib.CheckParameter(data, "x509_key_name", default=None)
1181 destination_x509_ca = baserlib.CheckParameter(data, "destination_x509_ca",
1182 default=None)
1183
1184 return opcodes.OpBackupExport(instance_name=name,
1185 mode=mode,
1186 target_node=target_node,
1187 shutdown=shutdown,
1188 remove_instance=remove_instance,
1189 x509_key_name=x509_key_name,
1190 destination_x509_ca=destination_x509_ca)
1191
1194 """/2/instances/[instance_name]/export resource.
1195
1196 """
1209
1212 """Parses a request for an instance migration.
1213
1214 @rtype: L{opcodes.OpInstanceMigrate}
1215 @return: Instance migration opcode
1216
1217 """
1218 mode = baserlib.CheckParameter(data, "mode", default=None)
1219 cleanup = baserlib.CheckParameter(data, "cleanup", exptype=bool,
1220 default=False)
1221
1222 return opcodes.OpInstanceMigrate(instance_name=name, mode=mode,
1223 cleanup=cleanup)
1224
1227 """/2/instances/[instance_name]/migrate resource.
1228
1229 """
1241
1244 """Parses a request for renaming an instance.
1245
1246 @rtype: L{opcodes.OpInstanceRename}
1247 @return: Instance rename opcode
1248
1249 """
1250 new_name = baserlib.CheckParameter(data, "new_name")
1251 ip_check = baserlib.CheckParameter(data, "ip_check", default=True)
1252 name_check = baserlib.CheckParameter(data, "name_check", default=True)
1253
1254 return opcodes.OpInstanceRename(instance_name=name, new_name=new_name,
1255 name_check=name_check, ip_check=ip_check)
1256
1259 """/2/instances/[instance_name]/rename resource.
1260
1261 """
1273
1276 """Parses a request for modifying an instance.
1277
1278 @rtype: L{opcodes.OpInstanceSetParams}
1279 @return: Instance modify opcode
1280
1281 """
1282 osparams = baserlib.CheckParameter(data, "osparams", default={})
1283 force = baserlib.CheckParameter(data, "force", default=False)
1284 nics = baserlib.CheckParameter(data, "nics", default=[])
1285 disks = baserlib.CheckParameter(data, "disks", default=[])
1286 disk_template = baserlib.CheckParameter(data, "disk_template", default=None)
1287 remote_node = baserlib.CheckParameter(data, "remote_node", default=None)
1288 os_name = baserlib.CheckParameter(data, "os_name", default=None)
1289 force_variant = baserlib.CheckParameter(data, "force_variant", default=False)
1290
1291
1292 hvparams = baserlib.CheckParameter(data, "hvparams", default={})
1293 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES,
1294 allowed_values=[constants.VALUE_DEFAULT])
1295
1296 beparams = baserlib.CheckParameter(data, "beparams", default={})
1297 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES,
1298 allowed_values=[constants.VALUE_DEFAULT])
1299
1300 return opcodes.OpInstanceSetParams(instance_name=name, hvparams=hvparams,
1301 beparams=beparams, osparams=osparams,
1302 force=force, nics=nics, disks=disks,
1303 disk_template=disk_template,
1304 remote_node=remote_node, os_name=os_name,
1305 force_variant=force_variant)
1306
1309 """/2/instances/[instance_name]/modify resource.
1310
1311 """
1323
1326 """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1327
1328 """
1330 """Increases the size of an instance disk.
1331
1332 @return: a job id
1333
1334 """
1335 op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1336 "instance_name": self.items[0],
1337 "disk": int(self.items[1]),
1338 })
1339
1340 return baserlib.SubmitJob([op])
1341
1344 """/2/instances/[instance_name]/console resource.
1345
1346 """
1347 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1348
1350 """Request information for connecting to instance's console.
1351
1352 @return: Serialized instance console description, see
1353 L{objects.InstanceConsole}
1354
1355 """
1356 client = baserlib.GetClient()
1357
1358 ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1359
1360 if console is None:
1361 raise http.HttpServiceUnavailable("Instance console unavailable")
1362
1363 assert isinstance(console, dict)
1364 return console
1365
1430
1439
1448
1457