1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Remote API resource implementations.
32
33 PUT or POST?
34 ============
35
36 According to RFC2616 the main difference between PUT and POST is that
37 POST can create new resources but PUT can only create the resource the
38 URI was pointing to on the PUT request.
39
40 In the context of this module POST on ``/2/instances`` to change an existing
41 entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
42 new instance) with a name specified in the request.
43
44 Quoting from RFC2616, section 9.6::
45
46 The fundamental difference between the POST and PUT requests is reflected in
47 the different meaning of the Request-URI. The URI in a POST request
48 identifies the resource that will handle the enclosed entity. That resource
49 might be a data-accepting process, a gateway to some other protocol, or a
50 separate entity that accepts annotations. In contrast, the URI in a PUT
51 request identifies the entity enclosed with the request -- the user agent
52 knows what URI is intended and the server MUST NOT attempt to apply the
53 request to some other resource. If the server desires that the request be
54 applied to a different URI, it MUST send a 301 (Moved Permanently) response;
55 the user agent MAY then make its own decision regarding whether or not to
56 redirect the request.
57
58 So when adding new methods, if they are operating on the URI entity itself,
59 PUT should be prefered over POST.
60
61 """
62
63
64
65
66
67 from ganeti import opcodes
68 from ganeti import objects
69 from ganeti import http
70 from ganeti import constants
71 from ganeti import cli
72 from ganeti import rapi
73 from ganeti import ht
74 from ganeti import compat
75 from ganeti.rapi import baserlib
76
77
78 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
79 I_FIELDS = ["name", "admin_state", "os",
80 "pnode", "snodes",
81 "disk_template",
82 "nic.ips", "nic.macs", "nic.modes", "nic.uuids", "nic.names",
83 "nic.links", "nic.networks", "nic.networks.names", "nic.bridges",
84 "network_port",
85 "disk.sizes", "disk.spindles", "disk_usage", "disk.uuids",
86 "disk.names", "disk.storage_ids", "disk.providers",
87 "beparams", "hvparams",
88 "oper_state", "oper_ram", "oper_vcpus", "status",
89 "custom_hvparams", "custom_beparams", "custom_nicparams",
90 ] + _COMMON_FIELDS
91
92 N_FIELDS = ["name", "offline", "master_candidate", "drained",
93 "dtotal", "dfree", "sptotal", "spfree",
94 "mtotal", "mnode", "mfree", "hv_state",
95 "pinst_cnt", "sinst_cnt",
96 "ctotal", "cnos", "cnodes", "csockets",
97 "pip", "sip", "role",
98 "pinst_list", "sinst_list",
99 "master_capable", "vm_capable",
100 "ndparams",
101 "group.uuid",
102 ] + _COMMON_FIELDS
103
104 NET_FIELDS = ["name", "network", "gateway",
105 "network6", "gateway6",
106 "mac_prefix",
107 "free_count", "reserved_count",
108 "map", "group_list", "inst_list",
109 "external_reservations",
110 ] + _COMMON_FIELDS
111
112 G_FIELDS = [
113 "alloc_policy",
114 "name",
115 "node_cnt",
116 "node_list",
117 "ipolicy",
118 "custom_ipolicy",
119 "diskparams",
120 "custom_diskparams",
121 "ndparams",
122 "custom_ndparams"
123 ] + _COMMON_FIELDS
124
125 FILTER_RULE_FIELDS = [
126 "watermark",
127 "priority",
128 "predicates",
129 "action",
130 "reason_trail",
131 "uuid",
132 ]
133
134 J_FIELDS_BULK = [
135 "id", "ops", "status", "summary",
136 "opstatus",
137 "received_ts", "start_ts", "end_ts",
138 ]
139
140 J_FIELDS = J_FIELDS_BULK + [
141 "oplog",
142 "opresult",
143 ]
144
145 _NR_DRAINED = "drained"
146 _NR_MASTER_CANDIDATE = "master-candidate"
147 _NR_MASTER = "master"
148 _NR_OFFLINE = "offline"
149 _NR_REGULAR = "regular"
150
151 _NR_MAP = {
152 constants.NR_MASTER: _NR_MASTER,
153 constants.NR_MCANDIDATE: _NR_MASTER_CANDIDATE,
154 constants.NR_DRAINED: _NR_DRAINED,
155 constants.NR_OFFLINE: _NR_OFFLINE,
156 constants.NR_REGULAR: _NR_REGULAR,
157 }
158
159 assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
160
161
162 _REQ_DATA_VERSION = "__version__"
163
164
165 _INST_CREATE_REQV1 = "instance-create-reqv1"
166
167
168 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
169
170
171 _NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
172
173
174 _NODE_EVAC_RES1 = "node-evac-res1"
175
176 ALL_FEATURES = compat.UniqueFrozenset([
177 _INST_CREATE_REQV1,
178 _INST_REINSTALL_REQV1,
179 _NODE_MIGRATE_REQV1,
180 _NODE_EVAC_RES1,
181 ])
182
183
184 _WFJC_TIMEOUT = 10
190 """Updates the beparams dict of inst to support the memory field.
191
192 @param inst: Inst dict
193 @return: Updated inst dict
194
195 """
196 beparams = inst["beparams"]
197 beparams[constants.BE_MEMORY] = beparams[constants.BE_MAXMEM]
198
199 return inst
200
201
202 -class R_root(baserlib.ResourceBase):
203 """/ resource.
204
205 """
206 @staticmethod
208 """Supported for legacy reasons.
209
210 """
211 return None
212
213
214 -class R_2(R_root):
215 """/2 resource.
216
217 """
218
221 """/version resource.
222
223 This resource should be used to determine the remote API version and
224 to adapt clients accordingly.
225
226 """
227 @staticmethod
233
234
235 -class R_2_info(baserlib.OpcodeResource):
236 """/2/info resource.
237
238 """
239 GET_OPCODE = opcodes.OpClusterQuery
240 GET_ALIASES = {
241 "volume_group_name": "vg_name",
242 "drbd_usermode_helper": "drbd_helper",
243 }
244
251
254 """/2/features resource.
255
256 """
257 @staticmethod
259 """Returns list of optional RAPI features implemented.
260
261 """
262 return list(ALL_FEATURES)
263
264
265 -class R_2_os(baserlib.OpcodeResource):
266 """/2/os resource.
267
268 """
269 GET_OPCODE = opcodes.OpOsDiagnose
270
272 """Return a list of all OSes.
273
274 Can return error 500 in case of a problem.
275
276 Example: ["debian-etch"]
277
278 """
279 cl = self.GetClient()
280 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
281 job_id = self.SubmitJob([op], cl=cl)
282
283 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
284 diagnose_data = result[0]
285
286 if not isinstance(diagnose_data, list):
287 raise http.HttpBadGateway(message="Can't get OS list")
288
289 os_names = []
290 for (name, variants) in diagnose_data:
291 os_names.extend(cli.CalculateOSNames(name, variants))
292
293 return os_names
294
301
311
314 """Checks and extracts filter rule parameters from a request body.
315
316 @return: the checked parameters: (priority, predicates, action).
317
318 """
319
320 if not isinstance(data, dict):
321 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
322
323
324 allowed_params = set(["priority", "predicates", "action", "reason"])
325 for param in data:
326 if param not in allowed_params:
327 raise http.HttpBadRequest("Invalid body parameters: filter rule doesn't"
328 " support the parameter '%s'" % param)
329
330 priority = baserlib.CheckParameter(
331 data, "priority", exptype=int, default=0)
332
333
334 predicates = baserlib.CheckParameter(
335 data, "predicates", exptype=list, default=[])
336
337
338 action = baserlib.CheckParameter(data, "action", default="CONTINUE")
339
340 reason = baserlib.CheckParameter(data, "reason", exptype=list, default=[])
341
342 return (priority, predicates, action, reason)
343
346 """/2/filters resource.
347
348 """
349
365
367 """Adds a filter rule.
368
369 @return: the UUID of the newly created filter rule.
370
371 """
372 priority, predicates, action, reason = \
373 checkFilterParameters(self.request_body)
374
375 reason.append(self.GetAuthReason())
376
377
378 return self.GetClient().ReplaceFilter(None, priority, predicates, action,
379 reason)
380
383 """/2/filters/[filter_uuid] resource.
384
385 """
387 """Returns a filter rule.
388
389 @return: a dictionary with job parameters.
390 The result includes:
391 - uuid: unique filter ID string
392 - watermark: highest job ID ever used as a number
393 - priority: filter priority as a non-negative number
394 - predicates: filter predicates, each one being a list
395 with the first element being the name of the predicate
396 and the rest being parameters suitable for that predicate
397 - action: effect of the filter as a string
398 - reason_trail: reasons for the addition of this filter as a
399 list of lists
400
401 """
402 uuid = self.items[0]
403
404 result = baserlib.HandleItemQueryErrors(self.GetClient().QueryFilters,
405 uuids=[uuid],
406 fields=FILTER_RULE_FIELDS)
407
408 return baserlib.MapFields(FILTER_RULE_FIELDS, result[0])
409
411 """Replaces an existing filter rule, or creates one if it doesn't
412 exist already.
413
414 @return: the UUID of the changed or created filter rule.
415
416 """
417 uuid = self.items[0]
418
419 priority, predicates, action, reason = \
420 checkFilterParameters(self.request_body)
421
422 reason.append(self.GetAuthReason())
423
424 return self.GetClient().ReplaceFilter(uuid, priority, predicates, action,
425 reason)
426
428 """Deletes a filter rule.
429
430 """
431 uuid = self.items[0]
432 return self.GetClient().DeleteFilter(uuid)
433
434
435 -class R_2_jobs(baserlib.ResourceBase):
436 """/2/jobs resource.
437
438 """
454
457 """/2/jobs/[job_id] resource.
458
459 """
461 """Returns a job status.
462
463 @return: a dictionary with job parameters.
464 The result includes:
465 - id: job ID as a number
466 - status: current job status as a string
467 - ops: involved OpCodes as a list of dictionaries for each
468 opcodes in the job
469 - opstatus: OpCodes status as a list
470 - opresult: OpCodes results as a list of lists
471
472 """
473 job_id = self.items[0]
474 result = self.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
475 if result is None:
476 raise http.HttpNotFound()
477 return baserlib.MapFields(J_FIELDS, result)
478
480 """Cancel not-yet-started job.
481
482 """
483 job_id = self.items[0]
484 result = self.GetClient().CancelJob(job_id)
485 return result
486
489 """/2/jobs/[job_id]/wait resource.
490
491 """
492
493
494 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
495
497 """Waits for job changes.
498
499 """
500 job_id = self.items[0]
501
502 fields = self.getBodyParameter("fields")
503 prev_job_info = self.getBodyParameter("previous_job_info", None)
504 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
505
506 if not isinstance(fields, list):
507 raise http.HttpBadRequest("The 'fields' parameter should be a list")
508
509 if not (prev_job_info is None or isinstance(prev_job_info, list)):
510 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
511 " be a list")
512
513 if not (prev_log_serial is None or
514 isinstance(prev_log_serial, (int, long))):
515 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
516 " be a number")
517
518 client = self.GetClient()
519 result = client.WaitForJobChangeOnce(job_id, fields,
520 prev_job_info, prev_log_serial,
521 timeout=_WFJC_TIMEOUT)
522 if not result:
523 raise http.HttpNotFound()
524
525 if result == constants.JOB_NOTCHANGED:
526
527 return None
528
529 (job_info, log_entries) = result
530
531 return {
532 "job_info": job_info,
533 "log_entries": log_entries,
534 }
535
536
537 -class R_2_nodes(baserlib.OpcodeResource):
538 """/2/nodes resource.
539
540 """
541
556
559 """/2/nodes/[node_name] resource.
560
561 """
562 GET_ALIASES = {
563 "sip": "secondary_ip",
564 }
565
578
581 """/2/nodes/[node_name]/powercycle resource.
582
583 """
584 POST_OPCODE = opcodes.OpNodePowercycle
585
587 """Tries to powercycle a node.
588
589 """
590 return (self.request_body, {
591 "node_name": self.items[0],
592 "force": self.useForce(),
593 })
594
597 """/2/nodes/[node_name]/role resource.
598
599 """
600 PUT_OPCODE = opcodes.OpNodeSetParams
601
603 """Returns the current node role.
604
605 @return: Node role
606
607 """
608 node_name = self.items[0]
609 client = self.GetClient()
610 result = client.QueryNodes(names=[node_name], fields=["role"],
611 use_locking=self.useLocking())
612
613 return _NR_MAP[result[0][0]]
614
653
656 """/2/nodes/[node_name]/evacuate resource.
657
658 """
659 POST_OPCODE = opcodes.OpNodeEvacuate
660
662 """Evacuate all instances off a node.
663
664 """
665 return (self.request_body, {
666 "node_name": self.items[0],
667 "dry_run": self.dryRun(),
668 })
669
672 """/2/nodes/[node_name]/migrate resource.
673
674 """
675 POST_OPCODE = opcodes.OpNodeMigrate
676
678 """Migrate all primary instances from a node.
679
680 """
681 if self.queryargs:
682
683 if "live" in self.queryargs and "mode" in self.queryargs:
684 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
685 " be passed")
686
687 if "live" in self.queryargs:
688 if self._checkIntVariable("live", default=1):
689 mode = constants.HT_MIGRATION_LIVE
690 else:
691 mode = constants.HT_MIGRATION_NONLIVE
692 else:
693 mode = self._checkStringVariable("mode", default=None)
694
695 data = {
696 "mode": mode,
697 }
698 else:
699 data = self.request_body
700
701 return (data, {
702 "node_name": self.items[0],
703 })
704
707 """/2/nodes/[node_name]/modify resource.
708
709 """
710 POST_OPCODE = opcodes.OpNodeSetParams
711
713 """Changes parameters of a node.
714
715 """
716 assert len(self.items) == 1
717
718 return (self.request_body, {
719 "node_name": self.items[0],
720 })
721
747
778
801
804 """/2/networks resource.
805
806 """
807 POST_OPCODE = opcodes.OpNetworkAdd
808 POST_RENAME = {
809 "name": "network_name",
810 }
811
813 """Create a network.
814
815 """
816 assert not self.items
817 return (self.request_body, {
818 "dry_run": self.dryRun(),
819 })
820
835
866
883
900
916
919 """/2/groups resource.
920
921 """
922 POST_OPCODE = opcodes.OpGroupAdd
923 POST_RENAME = {
924 "name": "group_name",
925 }
926
928 """Create a node group.
929
930
931 """
932 assert not self.items
933 return (self.request_body, {
934 "dry_run": self.dryRun(),
935 })
936
951
954 """/2/groups/[group_name] resource.
955
956 """
957 DELETE_OPCODE = opcodes.OpGroupRemove
958
971
981
984 """/2/groups/[group_name]/modify resource.
985
986 """
987 PUT_OPCODE = opcodes.OpGroupSetParams
988 PUT_RENAME = {
989 "custom_ndparams": "ndparams",
990 "custom_ipolicy": "ipolicy",
991 "custom_diskparams": "diskparams",
992 }
993
1002
1005 """/2/groups/[group_name]/rename resource.
1006
1007 """
1008 PUT_OPCODE = opcodes.OpGroupRename
1009
1019
1037
1040 """Convert in place the usb_devices string to the proper format.
1041
1042 In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
1043 comma to space because commas cannot be accepted on the command line
1044 (they already act as the separator between different hvparams). RAPI
1045 should be able to accept commas for backwards compatibility, but we want
1046 it to also accept the new space separator. Therefore, we convert
1047 spaces into commas here and keep the old parsing logic elsewhere.
1048
1049 """
1050 try:
1051 hvparams = data["hvparams"]
1052 usb_devices = hvparams[constants.HV_USB_DEVICES]
1053 hvparams[constants.HV_USB_DEVICES] = usb_devices.replace(" ", ",")
1054 data["hvparams"] = hvparams
1055 except KeyError:
1056
1057 pass
1058
1061 """/2/instances resource.
1062
1063 """
1064 POST_OPCODE = opcodes.OpInstanceCreate
1065 POST_RENAME = {
1066 "os": "os_type",
1067 "name": "instance_name",
1068 }
1069
1085
1113
1116 """/2/instances-multi-alloc resource.
1117
1118 """
1119 POST_OPCODE = opcodes.OpInstanceMultiAlloc
1120
1122 """Try to allocate multiple instances.
1123
1124 @return: A dict with submitted jobs, allocatable instances and failed
1125 allocations
1126
1127 """
1128 if "instances" not in self.request_body:
1129 raise http.HttpBadRequest("Request is missing required 'instances' field"
1130 " in body")
1131
1132
1133
1134 OPCODE_RENAME = {
1135 "os": "os_type",
1136 "name": "instance_name",
1137 }
1138
1139 body = objects.FillDict(self.request_body, {
1140 "instances": [
1141 baserlib.FillOpcode(opcodes.OpInstanceCreate, inst, {},
1142 rename=OPCODE_RENAME)
1143 for inst in self.request_body["instances"]
1144 ],
1145 })
1146
1147 return (body, {
1148 "dry_run": self.dryRun(),
1149 })
1150
1153 """/2/instances/[instance_name] resource.
1154
1155 """
1156 DELETE_OPCODE = opcodes.OpInstanceRemove
1157
1171
1182
1199
1202 """/2/instances/[instance_name]/reboot resource.
1203
1204 Implements an instance reboot.
1205
1206 """
1207 POST_OPCODE = opcodes.OpInstanceReboot
1208
1210 """Reboot an instance.
1211
1212 The URI takes type=[hard|soft|full] and
1213 ignore_secondaries=[False|True] parameters.
1214
1215 """
1216 return (self.request_body, {
1217 "instance_name": self.items[0],
1218 "reboot_type":
1219 self.queryargs.get("type", [constants.INSTANCE_REBOOT_HARD])[0],
1220 "ignore_secondaries": bool(self._checkIntVariable("ignore_secondaries")),
1221 "dry_run": self.dryRun(),
1222 })
1223
1226 """/2/instances/[instance_name]/startup resource.
1227
1228 Implements an instance startup.
1229
1230 """
1231 PUT_OPCODE = opcodes.OpInstanceStartup
1232
1246
1249 """/2/instances/[instance_name]/shutdown resource.
1250
1251 Implements an instance shutdown.
1252
1253 """
1254 PUT_OPCODE = opcodes.OpInstanceShutdown
1255
1265
1268 """Parses a request for reinstalling an instance.
1269
1270 """
1271 if not isinstance(data, dict):
1272 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1273
1274 ostype = baserlib.CheckParameter(data, "os", default=None)
1275 start = baserlib.CheckParameter(data, "start", exptype=bool,
1276 default=True)
1277 osparams = baserlib.CheckParameter(data, "osparams", default=None)
1278
1279 ops = [
1280 opcodes.OpInstanceShutdown(instance_name=name),
1281 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
1282 osparams=osparams),
1283 ]
1284
1285 if start:
1286 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
1287
1288 return ops
1289
1292 """/2/instances/[instance_name]/reinstall resource.
1293
1294 Implements an instance reinstall.
1295
1296 """
1297 POST_OPCODE = opcodes.OpInstanceReinstall
1298
1300 """Reinstall an instance.
1301
1302 The URI takes os=name and nostartup=[0|1] optional
1303 parameters. By default, the instance will be started
1304 automatically.
1305
1306 """
1307 if self.request_body:
1308 if self.queryargs:
1309 raise http.HttpBadRequest("Can't combine query and body parameters")
1310
1311 body = self.request_body
1312 elif self.queryargs:
1313
1314 body = {
1315 "os": self._checkStringVariable("os"),
1316 "start": not self._checkIntVariable("nostartup"),
1317 }
1318 else:
1319 body = {}
1320
1321 ops = _ParseInstanceReinstallRequest(self.items[0], body)
1322
1323 return self.SubmitJob(ops)
1324
1327 """/2/instances/[instance_name]/replace-disks resource.
1328
1329 """
1330 POST_OPCODE = opcodes.OpInstanceReplaceDisks
1331
1333 """Replaces disks on an instance.
1334
1335 """
1336 static = {
1337 "instance_name": self.items[0],
1338 }
1339
1340 if self.request_body:
1341 data = self.request_body
1342 elif self.queryargs:
1343
1344 data = {
1345 "remote_node": self._checkStringVariable("remote_node", default=None),
1346 "mode": self._checkStringVariable("mode", default=None),
1347 "disks": self._checkStringVariable("disks", default=None),
1348 "iallocator": self._checkStringVariable("iallocator", default=None),
1349 }
1350 else:
1351 data = {}
1352
1353
1354 try:
1355 raw_disks = data.pop("disks")
1356 except KeyError:
1357 pass
1358 else:
1359 if raw_disks:
1360 if ht.TListOf(ht.TInt)(raw_disks):
1361 data["disks"] = raw_disks
1362 else:
1363
1364 try:
1365 data["disks"] = [int(part) for part in raw_disks.split(",")]
1366 except (TypeError, ValueError), err:
1367 raise http.HttpBadRequest("Invalid disk index passed: %s" % err)
1368
1369 return (data, static)
1370
1388
1404
1407 """/2/instances/[instance_name]/recreate-disks resource.
1408
1409 """
1410 POST_OPCODE = opcodes.OpInstanceRecreateDisks
1411
1413 """Recreate disks for an instance.
1414
1415 """
1416 return (self.request_body, {
1417 "instance_name": self.items[0],
1418 })
1419
1422 """/2/instances/[instance_name]/prepare-export resource.
1423
1424 """
1425 PUT_OPCODE = opcodes.OpBackupPrepare
1426
1435
1438 """/2/instances/[instance_name]/export resource.
1439
1440 """
1441 PUT_OPCODE = opcodes.OpBackupExport
1442 PUT_RENAME = {
1443 "destination": "target_node",
1444 }
1445
1453
1468
1483
1486 """/2/instances/[instance_name]/rename resource.
1487
1488 """
1489 PUT_OPCODE = opcodes.OpInstanceRename
1490
1498
1501 """/2/instances/[instance_name]/modify resource.
1502
1503 """
1504 PUT_OPCODE = opcodes.OpInstanceSetParams
1505 PUT_RENAME = {
1506 "custom_beparams": "beparams",
1507 "custom_hvparams": "hvparams",
1508 }
1509
1520
1523 """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1524
1525 """
1526 POST_OPCODE = opcodes.OpInstanceGrowDisk
1527
1529 """Increases the size of an instance disk.
1530
1531 """
1532 return (self.request_body, {
1533 "instance_name": self.items[0],
1534 "disk": int(self.items[1]),
1535 })
1536
1539 """/2/instances/[instance_name]/console resource.
1540
1541 """
1542 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
1543 GET_OPCODE = opcodes.OpInstanceConsole
1544
1546 """Request information for connecting to instance's console.
1547
1548 @return: Serialized instance console description, see
1549 L{objects.InstanceConsole}
1550
1551 """
1552 instance_name = self.items[0]
1553 client = self.GetClient()
1554
1555 ((console, oper_state), ) = \
1556 client.QueryInstances([instance_name], ["console", "oper_state"], False)
1557
1558 if not oper_state:
1559 raise http.HttpServiceUnavailable("Instance console unavailable")
1560
1561 assert isinstance(console, dict)
1562 return console
1563
1566 """Tries to extract C{fields} query parameter.
1567
1568 @type args: dictionary
1569 @rtype: list of string
1570 @raise http.HttpBadRequest: When parameter can't be found
1571
1572 """
1573 try:
1574 fields = args["fields"]
1575 except KeyError:
1576 raise http.HttpBadRequest("Missing 'fields' query argument")
1577
1578 return _SplitQueryFields(fields[0])
1579
1582 """Splits fields as given for a query request.
1583
1584 @type fields: string
1585 @rtype: list of string
1586
1587 """
1588 return [i.strip() for i in fields.split(",")]
1589
1590
1591 -class R_2_query(baserlib.ResourceBase):
1592 """/2/query/[resource] resource.
1593
1594 """
1595
1596 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
1597 PUT_ACCESS = GET_ACCESS
1598 GET_OPCODE = opcodes.OpQuery
1599 PUT_OPCODE = opcodes.OpQuery
1600
1601 - def _Query(self, fields, qfilter):
1604
1606 """Returns resource information.
1607
1608 @return: Query result, see L{objects.QueryResponse}
1609
1610 """
1611 return self._Query(_GetQueryFields(self.queryargs), None)
1612
1614 """Submits job querying for resources.
1615
1616 @return: Query result, see L{objects.QueryResponse}
1617
1618 """
1619 body = self.request_body
1620
1621 baserlib.CheckType(body, dict, "Body contents")
1622
1623 try:
1624 fields = body["fields"]
1625 except KeyError:
1626 fields = _GetQueryFields(self.queryargs)
1627
1628 qfilter = body.get("qfilter", None)
1629
1630 if qfilter is None:
1631 qfilter = body.get("filter", None)
1632
1633 return self._Query(fields, qfilter)
1634
1637 """/2/query/[resource]/fields resource.
1638
1639 """
1640 GET_OPCODE = opcodes.OpQueryFields
1641
1643 """Retrieves list of available fields for a resource.
1644
1645 @return: List of serialized L{objects.QueryFieldDefinition}
1646
1647 """
1648 try:
1649 raw_fields = self.queryargs["fields"]
1650 except KeyError:
1651 fields = None
1652 else:
1653 fields = _SplitQueryFields(raw_fields[0])
1654
1655 return self.GetClient().QueryFields(self.items[0], fields).ToDict()
1656
1732
1741
1750
1759
1768
1777