1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Remote API resource implementations.
32
33 PUT or POST?
34 ============
35
36 According to RFC2616 the main difference between PUT and POST is that
37 POST can create new resources but PUT can only create the resource the
38 URI was pointing to on the PUT request.
39
40 In the context of this module POST on ``/2/instances`` to change an existing
41 entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
42 new instance) with a name specified in the request.
43
44 Quoting from RFC2616, section 9.6::
45
46 The fundamental difference between the POST and PUT requests is reflected in
47 the different meaning of the Request-URI. The URI in a POST request
48 identifies the resource that will handle the enclosed entity. That resource
49 might be a data-accepting process, a gateway to some other protocol, or a
50 separate entity that accepts annotations. In contrast, the URI in a PUT
51 request identifies the entity enclosed with the request -- the user agent
52 knows what URI is intended and the server MUST NOT attempt to apply the
53 request to some other resource. If the server desires that the request be
54 applied to a different URI, it MUST send a 301 (Moved Permanently) response;
55 the user agent MAY then make its own decision regarding whether or not to
56 redirect the request.
57
58 So when adding new methods, if they are operating on the URI entity itself,
59 PUT should be prefered over POST.
60
61 """
62
63
64
65
66
67 from ganeti import opcodes
68 from ganeti import objects
69 from ganeti import http
70 from ganeti import constants
71 from ganeti import cli
72 from ganeti import rapi
73 from ganeti import ht
74 from ganeti import compat
75 from ganeti.rapi import baserlib
76
77
78 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
79 I_FIELDS = ["name", "admin_state", "os",
80 "pnode", "snodes",
81 "disk_template",
82 "nic.ips", "nic.macs", "nic.modes", "nic.uuids", "nic.names",
83 "nic.links", "nic.networks", "nic.networks.names", "nic.bridges",
84 "network_port",
85 "disk.sizes", "disk.spindles", "disk_usage", "disk.uuids",
86 "disk.names",
87 "beparams", "hvparams",
88 "oper_state", "oper_ram", "oper_vcpus", "status",
89 "custom_hvparams", "custom_beparams", "custom_nicparams",
90 ] + _COMMON_FIELDS
91
92 N_FIELDS = ["name", "offline", "master_candidate", "drained",
93 "dtotal", "dfree", "sptotal", "spfree",
94 "mtotal", "mnode", "mfree",
95 "pinst_cnt", "sinst_cnt",
96 "ctotal", "cnos", "cnodes", "csockets",
97 "pip", "sip", "role",
98 "pinst_list", "sinst_list",
99 "master_capable", "vm_capable",
100 "ndparams",
101 "group.uuid",
102 ] + _COMMON_FIELDS
103
104 NET_FIELDS = ["name", "network", "gateway",
105 "network6", "gateway6",
106 "mac_prefix",
107 "free_count", "reserved_count",
108 "map", "group_list", "inst_list",
109 "external_reservations",
110 ] + _COMMON_FIELDS
111
112 G_FIELDS = [
113 "alloc_policy",
114 "name",
115 "node_cnt",
116 "node_list",
117 "ipolicy",
118 "custom_ipolicy",
119 "diskparams",
120 "custom_diskparams",
121 "ndparams",
122 "custom_ndparams",
123 ] + _COMMON_FIELDS
124
125 FILTER_RULE_FIELDS = [
126 "watermark",
127 "priority",
128 "predicates",
129 "action",
130 "reason_trail",
131 "uuid",
132 ]
133
134 J_FIELDS_BULK = [
135 "id", "ops", "status", "summary",
136 "opstatus",
137 "received_ts", "start_ts", "end_ts",
138 ]
139
140 J_FIELDS = J_FIELDS_BULK + [
141 "oplog",
142 "opresult",
143 ]
144
145 _NR_DRAINED = "drained"
146 _NR_MASTER_CANDIDATE = "master-candidate"
147 _NR_MASTER = "master"
148 _NR_OFFLINE = "offline"
149 _NR_REGULAR = "regular"
150
151 _NR_MAP = {
152 constants.NR_MASTER: _NR_MASTER,
153 constants.NR_MCANDIDATE: _NR_MASTER_CANDIDATE,
154 constants.NR_DRAINED: _NR_DRAINED,
155 constants.NR_OFFLINE: _NR_OFFLINE,
156 constants.NR_REGULAR: _NR_REGULAR,
157 }
158
159 assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
160
161
162 _REQ_DATA_VERSION = "__version__"
163
164
165 _INST_CREATE_REQV1 = "instance-create-reqv1"
166
167
168 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
169
170
171 _NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
172
173
174 _NODE_EVAC_RES1 = "node-evac-res1"
175
176 ALL_FEATURES = compat.UniqueFrozenset([
177 _INST_CREATE_REQV1,
178 _INST_REINSTALL_REQV1,
179 _NODE_MIGRATE_REQV1,
180 _NODE_EVAC_RES1,
181 ])
182
183
184 _WFJC_TIMEOUT = 10
190 """Updates the beparams dict of inst to support the memory field.
191
192 @param inst: Inst dict
193 @return: Updated inst dict
194
195 """
196 beparams = inst["beparams"]
197 beparams[constants.BE_MEMORY] = beparams[constants.BE_MAXMEM]
198
199 return inst
200
201
202 -class R_root(baserlib.ResourceBase):
203 """/ resource.
204
205 """
206 @staticmethod
208 """Supported for legacy reasons.
209
210 """
211 return None
212
213
214 -class R_2(R_root):
215 """/2 resource.
216
217 """
218
221 """/version resource.
222
223 This resource should be used to determine the remote API version and
224 to adapt clients accordingly.
225
226 """
227 @staticmethod
233
234
235 -class R_2_info(baserlib.OpcodeResource):
236 """/2/info resource.
237
238 """
239 GET_OPCODE = opcodes.OpClusterQuery
240 GET_ALIASES = {
241 "volume_group_name": "vg_name",
242 "drbd_usermode_helper": "drbd_helper",
243 }
244
251
254 """/2/features resource.
255
256 """
257 @staticmethod
259 """Returns list of optional RAPI features implemented.
260
261 """
262 return list(ALL_FEATURES)
263
264
265 -class R_2_os(baserlib.OpcodeResource):
266 """/2/os resource.
267
268 """
269 GET_OPCODE = opcodes.OpOsDiagnose
270
272 """Return a list of all OSes.
273
274 Can return error 500 in case of a problem.
275
276 Example: ["debian-etch"]
277
278 """
279 cl = self.GetClient()
280 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
281 job_id = self.SubmitJob([op], cl=cl)
282
283 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
284 diagnose_data = result[0]
285
286 if not isinstance(diagnose_data, list):
287 raise http.HttpBadGateway(message="Can't get OS list")
288
289 os_names = []
290 for (name, variants) in diagnose_data:
291 os_names.extend(cli.CalculateOSNames(name, variants))
292
293 return os_names
294
301
311
314 """Checks and extracts filter rule parameters from a request body.
315
316 @return: the checked parameters: (priority, predicates, action).
317
318 """
319
320 if not isinstance(data, dict):
321 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
322
323
324 allowed_params = set(["priority", "predicates", "action", "reason"])
325 for param in data:
326 if param not in allowed_params:
327 raise http.HttpBadRequest("Invalid body parameters: filter rule doesn't"
328 " support the parameter '%s'" % param)
329
330 priority = baserlib.CheckParameter(
331 data, "priority", exptype=int, default=0)
332
333
334 predicates = baserlib.CheckParameter(
335 data, "predicates", exptype=list, default=[])
336
337
338 action = baserlib.CheckParameter(data, "action", default="CONTINUE")
339
340 reason = baserlib.CheckParameter(data, "reason", exptype=list, default=[])
341
342 return (priority, predicates, action, reason)
343
346 """/2/filters resource.
347
348 """
349
365
367 """Adds a filter rule.
368
369 @return: the UUID of the newly created filter rule.
370
371 """
372 priority, predicates, action, reason = \
373 checkFilterParameters(self.request_body)
374
375
376 return self.GetClient().ReplaceFilter(None, priority, predicates, action,
377 reason)
378
381 """/2/filters/[filter_uuid] resource.
382
383 """
385 """Returns a filter rule.
386
387 @return: a dictionary with job parameters.
388 The result includes:
389 - uuid: unique filter ID string
390 - watermark: highest job ID ever used as a number
391 - priority: filter priority as a non-negative number
392 - predicates: filter predicates, each one being a list
393 with the first element being the name of the predicate
394 and the rest being parameters suitable for that predicate
395 - action: effect of the filter as a string
396 - reason_trail: reasons for the addition of this filter as a
397 list of lists
398
399 """
400 uuid = self.items[0]
401
402 result = baserlib.HandleItemQueryErrors(self.GetClient().QueryFilters,
403 uuids=[uuid],
404 fields=FILTER_RULE_FIELDS)
405
406 return baserlib.MapFields(FILTER_RULE_FIELDS, result[0])
407
409 """Replaces an existing filter rule, or creates one if it doesn't
410 exist already.
411
412 @return: the UUID of the changed or created filter rule.
413
414 """
415 uuid = self.items[0]
416
417 priority, predicates, action, reason = \
418 checkFilterParameters(self.request_body)
419
420 return self.GetClient().ReplaceFilter(uuid, priority, predicates, action,
421 reason)
422
424 """Deletes a filter rule.
425
426 """
427 uuid = self.items[0]
428 return self.GetClient().DeleteFilter(uuid)
429
430
431 -class R_2_jobs(baserlib.ResourceBase):
432 """/2/jobs resource.
433
434 """
450
453 """/2/jobs/[job_id] resource.
454
455 """
457 """Returns a job status.
458
459 @return: a dictionary with job parameters.
460 The result includes:
461 - id: job ID as a number
462 - status: current job status as a string
463 - ops: involved OpCodes as a list of dictionaries for each
464 opcodes in the job
465 - opstatus: OpCodes status as a list
466 - opresult: OpCodes results as a list of lists
467
468 """
469 job_id = self.items[0]
470 result = self.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
471 if result is None:
472 raise http.HttpNotFound()
473 return baserlib.MapFields(J_FIELDS, result)
474
476 """Cancel not-yet-started job.
477
478 """
479 job_id = self.items[0]
480 result = self.GetClient().CancelJob(job_id)
481 return result
482
485 """/2/jobs/[job_id]/wait resource.
486
487 """
488
489
490 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
491
493 """Waits for job changes.
494
495 """
496 job_id = self.items[0]
497
498 fields = self.getBodyParameter("fields")
499 prev_job_info = self.getBodyParameter("previous_job_info", None)
500 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
501
502 if not isinstance(fields, list):
503 raise http.HttpBadRequest("The 'fields' parameter should be a list")
504
505 if not (prev_job_info is None or isinstance(prev_job_info, list)):
506 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
507 " be a list")
508
509 if not (prev_log_serial is None or
510 isinstance(prev_log_serial, (int, long))):
511 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
512 " be a number")
513
514 client = self.GetClient()
515 result = client.WaitForJobChangeOnce(job_id, fields,
516 prev_job_info, prev_log_serial,
517 timeout=_WFJC_TIMEOUT)
518 if not result:
519 raise http.HttpNotFound()
520
521 if result == constants.JOB_NOTCHANGED:
522
523 return None
524
525 (job_info, log_entries) = result
526
527 return {
528 "job_info": job_info,
529 "log_entries": log_entries,
530 }
531
532
533 -class R_2_nodes(baserlib.OpcodeResource):
534 """/2/nodes resource.
535
536 """
537
552
555 """/2/nodes/[node_name] resource.
556
557 """
558 GET_ALIASES = {
559 "sip": "secondary_ip",
560 }
561
574
577 """/2/nodes/[node_name]/powercycle resource.
578
579 """
580 POST_OPCODE = opcodes.OpNodePowercycle
581
583 """Tries to powercycle a node.
584
585 """
586 return (self.request_body, {
587 "node_name": self.items[0],
588 "force": self.useForce(),
589 })
590
593 """/2/nodes/[node_name]/role resource.
594
595 """
596 PUT_OPCODE = opcodes.OpNodeSetParams
597
599 """Returns the current node role.
600
601 @return: Node role
602
603 """
604 node_name = self.items[0]
605 client = self.GetClient()
606 result = client.QueryNodes(names=[node_name], fields=["role"],
607 use_locking=self.useLocking())
608
609 return _NR_MAP[result[0][0]]
610
649
652 """/2/nodes/[node_name]/evacuate resource.
653
654 """
655 POST_OPCODE = opcodes.OpNodeEvacuate
656
658 """Evacuate all instances off a node.
659
660 """
661 return (self.request_body, {
662 "node_name": self.items[0],
663 "dry_run": self.dryRun(),
664 })
665
668 """/2/nodes/[node_name]/migrate resource.
669
670 """
671 POST_OPCODE = opcodes.OpNodeMigrate
672
674 """Migrate all primary instances from a node.
675
676 """
677 if self.queryargs:
678
679 if "live" in self.queryargs and "mode" in self.queryargs:
680 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
681 " be passed")
682
683 if "live" in self.queryargs:
684 if self._checkIntVariable("live", default=1):
685 mode = constants.HT_MIGRATION_LIVE
686 else:
687 mode = constants.HT_MIGRATION_NONLIVE
688 else:
689 mode = self._checkStringVariable("mode", default=None)
690
691 data = {
692 "mode": mode,
693 }
694 else:
695 data = self.request_body
696
697 return (data, {
698 "node_name": self.items[0],
699 })
700
703 """/2/nodes/[node_name]/modify resource.
704
705 """
706 POST_OPCODE = opcodes.OpNodeSetParams
707
709 """Changes parameters of a node.
710
711 """
712 assert len(self.items) == 1
713
714 return (self.request_body, {
715 "node_name": self.items[0],
716 })
717
743
774
797
800 """/2/networks resource.
801
802 """
803 POST_OPCODE = opcodes.OpNetworkAdd
804 POST_RENAME = {
805 "name": "network_name",
806 }
807
809 """Create a network.
810
811 """
812 assert not self.items
813 return (self.request_body, {
814 "dry_run": self.dryRun(),
815 })
816
831
862
879
896
912
915 """/2/groups resource.
916
917 """
918 POST_OPCODE = opcodes.OpGroupAdd
919 POST_RENAME = {
920 "name": "group_name",
921 }
922
924 """Create a node group.
925
926
927 """
928 assert not self.items
929 return (self.request_body, {
930 "dry_run": self.dryRun(),
931 })
932
947
950 """/2/groups/[group_name] resource.
951
952 """
953 DELETE_OPCODE = opcodes.OpGroupRemove
954
967
977
980 """/2/groups/[group_name]/modify resource.
981
982 """
983 PUT_OPCODE = opcodes.OpGroupSetParams
984 PUT_RENAME = {
985 "custom_ndparams": "ndparams",
986 "custom_ipolicy": "ipolicy",
987 "custom_diskparams": "diskparams",
988 }
989
998
1001 """/2/groups/[group_name]/rename resource.
1002
1003 """
1004 PUT_OPCODE = opcodes.OpGroupRename
1005
1015
1033
1036 """Convert in place the usb_devices string to the proper format.
1037
1038 In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
1039 comma to space because commas cannot be accepted on the command line
1040 (they already act as the separator between different hvparams). RAPI
1041 should be able to accept commas for backwards compatibility, but we want
1042 it to also accept the new space separator. Therefore, we convert
1043 spaces into commas here and keep the old parsing logic elsewhere.
1044
1045 """
1046 try:
1047 hvparams = data["hvparams"]
1048 usb_devices = hvparams[constants.HV_USB_DEVICES]
1049 hvparams[constants.HV_USB_DEVICES] = usb_devices.replace(" ", ",")
1050 data["hvparams"] = hvparams
1051 except KeyError:
1052
1053 pass
1054
1057 """/2/instances resource.
1058
1059 """
1060 POST_OPCODE = opcodes.OpInstanceCreate
1061 POST_RENAME = {
1062 "os": "os_type",
1063 "name": "instance_name",
1064 }
1065
1081
1109
1112 """/2/instances-multi-alloc resource.
1113
1114 """
1115 POST_OPCODE = opcodes.OpInstanceMultiAlloc
1116
1118 """Try to allocate multiple instances.
1119
1120 @return: A dict with submitted jobs, allocatable instances and failed
1121 allocations
1122
1123 """
1124 if "instances" not in self.request_body:
1125 raise http.HttpBadRequest("Request is missing required 'instances' field"
1126 " in body")
1127
1128
1129
1130 OPCODE_RENAME = {
1131 "os": "os_type",
1132 "name": "instance_name",
1133 }
1134
1135 body = objects.FillDict(self.request_body, {
1136 "instances": [
1137 baserlib.FillOpcode(opcodes.OpInstanceCreate, inst, {},
1138 rename=OPCODE_RENAME)
1139 for inst in self.request_body["instances"]
1140 ],
1141 })
1142
1143 return (body, {
1144 "dry_run": self.dryRun(),
1145 })
1146
1149 """/2/instances/[instance_name] resource.
1150
1151 """
1152 DELETE_OPCODE = opcodes.OpInstanceRemove
1153
1167
1178
1195
1198 """/2/instances/[instance_name]/reboot resource.
1199
1200 Implements an instance reboot.
1201
1202 """
1203 POST_OPCODE = opcodes.OpInstanceReboot
1204
1206 """Reboot an instance.
1207
1208 The URI takes type=[hard|soft|full] and
1209 ignore_secondaries=[False|True] parameters.
1210
1211 """
1212 return (self.request_body, {
1213 "instance_name": self.items[0],
1214 "reboot_type":
1215 self.queryargs.get("type", [constants.INSTANCE_REBOOT_HARD])[0],
1216 "ignore_secondaries": bool(self._checkIntVariable("ignore_secondaries")),
1217 "dry_run": self.dryRun(),
1218 })
1219
1222 """/2/instances/[instance_name]/startup resource.
1223
1224 Implements an instance startup.
1225
1226 """
1227 PUT_OPCODE = opcodes.OpInstanceStartup
1228
1242
1245 """/2/instances/[instance_name]/shutdown resource.
1246
1247 Implements an instance shutdown.
1248
1249 """
1250 PUT_OPCODE = opcodes.OpInstanceShutdown
1251
1261
1264 """Parses a request for reinstalling an instance.
1265
1266 """
1267 if not isinstance(data, dict):
1268 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1269
1270 ostype = baserlib.CheckParameter(data, "os", default=None)
1271 start = baserlib.CheckParameter(data, "start", exptype=bool,
1272 default=True)
1273 osparams = baserlib.CheckParameter(data, "osparams", default=None)
1274
1275 ops = [
1276 opcodes.OpInstanceShutdown(instance_name=name),
1277 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
1278 osparams=osparams),
1279 ]
1280
1281 if start:
1282 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
1283
1284 return ops
1285
1288 """/2/instances/[instance_name]/reinstall resource.
1289
1290 Implements an instance reinstall.
1291
1292 """
1293 POST_OPCODE = opcodes.OpInstanceReinstall
1294
1296 """Reinstall an instance.
1297
1298 The URI takes os=name and nostartup=[0|1] optional
1299 parameters. By default, the instance will be started
1300 automatically.
1301
1302 """
1303 if self.request_body:
1304 if self.queryargs:
1305 raise http.HttpBadRequest("Can't combine query and body parameters")
1306
1307 body = self.request_body
1308 elif self.queryargs:
1309
1310 body = {
1311 "os": self._checkStringVariable("os"),
1312 "start": not self._checkIntVariable("nostartup"),
1313 }
1314 else:
1315 body = {}
1316
1317 ops = _ParseInstanceReinstallRequest(self.items[0], body)
1318
1319 return self.SubmitJob(ops)
1320
1323 """/2/instances/[instance_name]/replace-disks resource.
1324
1325 """
1326 POST_OPCODE = opcodes.OpInstanceReplaceDisks
1327
1329 """Replaces disks on an instance.
1330
1331 """
1332 static = {
1333 "instance_name": self.items[0],
1334 }
1335
1336 if self.request_body:
1337 data = self.request_body
1338 elif self.queryargs:
1339
1340 data = {
1341 "remote_node": self._checkStringVariable("remote_node", default=None),
1342 "mode": self._checkStringVariable("mode", default=None),
1343 "disks": self._checkStringVariable("disks", default=None),
1344 "iallocator": self._checkStringVariable("iallocator", default=None),
1345 }
1346 else:
1347 data = {}
1348
1349
1350 try:
1351 raw_disks = data.pop("disks")
1352 except KeyError:
1353 pass
1354 else:
1355 if raw_disks:
1356 if ht.TListOf(ht.TInt)(raw_disks):
1357 data["disks"] = raw_disks
1358 else:
1359
1360 try:
1361 data["disks"] = [int(part) for part in raw_disks.split(",")]
1362 except (TypeError, ValueError), err:
1363 raise http.HttpBadRequest("Invalid disk index passed: %s" % err)
1364
1365 return (data, static)
1366
1384
1399
1402 """/2/instances/[instance_name]/recreate-disks resource.
1403
1404 """
1405 POST_OPCODE = opcodes.OpInstanceRecreateDisks
1406
1408 """Recreate disks for an instance.
1409
1410 """
1411 return (self.request_body, {
1412 "instance_name": self.items[0],
1413 })
1414
1417 """/2/instances/[instance_name]/prepare-export resource.
1418
1419 """
1420 PUT_OPCODE = opcodes.OpBackupPrepare
1421
1430
1433 """/2/instances/[instance_name]/export resource.
1434
1435 """
1436 PUT_OPCODE = opcodes.OpBackupExport
1437 PUT_RENAME = {
1438 "destination": "target_node",
1439 }
1440
1448
1463
1478
1481 """/2/instances/[instance_name]/rename resource.
1482
1483 """
1484 PUT_OPCODE = opcodes.OpInstanceRename
1485
1493
1496 """/2/instances/[instance_name]/modify resource.
1497
1498 """
1499 PUT_OPCODE = opcodes.OpInstanceSetParams
1500 PUT_RENAME = {
1501 "custom_beparams": "beparams",
1502 "custom_hvparams": "hvparams",
1503 }
1504
1515
1518 """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1519
1520 """
1521 POST_OPCODE = opcodes.OpInstanceGrowDisk
1522
1524 """Increases the size of an instance disk.
1525
1526 """
1527 return (self.request_body, {
1528 "instance_name": self.items[0],
1529 "disk": int(self.items[1]),
1530 })
1531
1534 """/2/instances/[instance_name]/console resource.
1535
1536 """
1537 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
1538 GET_OPCODE = opcodes.OpInstanceConsole
1539
1541 """Request information for connecting to instance's console.
1542
1543 @return: Serialized instance console description, see
1544 L{objects.InstanceConsole}
1545
1546 """
1547 instance_name = self.items[0]
1548 client = self.GetClient()
1549
1550 ((console, oper_state), ) = \
1551 client.QueryInstances([instance_name], ["console", "oper_state"], False)
1552
1553 if not oper_state:
1554 raise http.HttpServiceUnavailable("Instance console unavailable")
1555
1556 assert isinstance(console, dict)
1557 return console
1558
1561 """Tries to extract C{fields} query parameter.
1562
1563 @type args: dictionary
1564 @rtype: list of string
1565 @raise http.HttpBadRequest: When parameter can't be found
1566
1567 """
1568 try:
1569 fields = args["fields"]
1570 except KeyError:
1571 raise http.HttpBadRequest("Missing 'fields' query argument")
1572
1573 return _SplitQueryFields(fields[0])
1574
1577 """Splits fields as given for a query request.
1578
1579 @type fields: string
1580 @rtype: list of string
1581
1582 """
1583 return [i.strip() for i in fields.split(",")]
1584
1585
1586 -class R_2_query(baserlib.ResourceBase):
1587 """/2/query/[resource] resource.
1588
1589 """
1590
1591 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
1592 PUT_ACCESS = GET_ACCESS
1593 GET_OPCODE = opcodes.OpQuery
1594 PUT_OPCODE = opcodes.OpQuery
1595
1596 - def _Query(self, fields, qfilter):
1599
1601 """Returns resource information.
1602
1603 @return: Query result, see L{objects.QueryResponse}
1604
1605 """
1606 return self._Query(_GetQueryFields(self.queryargs), None)
1607
1609 """Submits job querying for resources.
1610
1611 @return: Query result, see L{objects.QueryResponse}
1612
1613 """
1614 body = self.request_body
1615
1616 baserlib.CheckType(body, dict, "Body contents")
1617
1618 try:
1619 fields = body["fields"]
1620 except KeyError:
1621 fields = _GetQueryFields(self.queryargs)
1622
1623 qfilter = body.get("qfilter", None)
1624
1625 if qfilter is None:
1626 qfilter = body.get("filter", None)
1627
1628 return self._Query(fields, qfilter)
1629
1632 """/2/query/[resource]/fields resource.
1633
1634 """
1635 GET_OPCODE = opcodes.OpQueryFields
1636
1638 """Retrieves list of available fields for a resource.
1639
1640 @return: List of serialized L{objects.QueryFieldDefinition}
1641
1642 """
1643 try:
1644 raw_fields = self.queryargs["fields"]
1645 except KeyError:
1646 fields = None
1647 else:
1648 fields = _SplitQueryFields(raw_fields[0])
1649
1650 return self.GetClient().QueryFields(self.items[0], fields).ToDict()
1651
1727
1736
1745
1754
1763
1772