| Trees | Indices | Help |
|
|---|
|
|
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 # All rights reserved.
6 #
7 # Redistribution and use in source and binary forms, with or without
8 # modification, are permitted provided that the following conditions are
9 # met:
10 #
11 # 1. Redistributions of source code must retain the above copyright notice,
12 # this list of conditions and the following disclaimer.
13 #
14 # 2. Redistributions in binary form must reproduce the above copyright
15 # notice, this list of conditions and the following disclaimer in the
16 # documentation and/or other materials provided with the distribution.
17 #
18 # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
19 # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20 # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
25 # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
26 # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
27 # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
28 # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29
30
31 """Remote API resource implementations.
32
33 PUT or POST?
34 ============
35
36 According to RFC2616 the main difference between PUT and POST is that
37 POST can create new resources but PUT can only create the resource the
38 URI was pointing to on the PUT request.
39
40 In the context of this module POST on ``/2/instances`` to change an existing
41 entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
42 new instance) with a name specified in the request.
43
44 Quoting from RFC2616, section 9.6::
45
46 The fundamental difference between the POST and PUT requests is reflected in
47 the different meaning of the Request-URI. The URI in a POST request
48 identifies the resource that will handle the enclosed entity. That resource
49 might be a data-accepting process, a gateway to some other protocol, or a
50 separate entity that accepts annotations. In contrast, the URI in a PUT
51 request identifies the entity enclosed with the request -- the user agent
52 knows what URI is intended and the server MUST NOT attempt to apply the
53 request to some other resource. If the server desires that the request be
54 applied to a different URI, it MUST send a 301 (Moved Permanently) response;
55 the user agent MAY then make its own decision regarding whether or not to
56 redirect the request.
57
58 So when adding new methods, if they are operating on the URI entity itself,
59 PUT should be prefered over POST.
60
61 """
62
63 # pylint: disable=C0103
64
65 # C0103: Invalid name, since the R_* names are not conforming
66
67 from ganeti import opcodes
68 from ganeti import objects
69 from ganeti import http
70 from ganeti import constants
71 from ganeti import cli
72 from ganeti import rapi
73 from ganeti import ht
74 from ganeti import compat
75 from ganeti.rapi import baserlib
76
77
78 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
79 I_FIELDS = ["name", "admin_state", "os",
80 "pnode", "snodes",
81 "disk_template",
82 "nic.ips", "nic.macs", "nic.modes", "nic.uuids", "nic.names",
83 "nic.links", "nic.networks", "nic.networks.names", "nic.bridges",
84 "network_port",
85 "disk.sizes", "disk.spindles", "disk_usage", "disk.uuids",
86 "disk.names", "disk.storage_ids", "disk.providers",
87 "beparams", "hvparams",
88 "oper_state", "oper_ram", "oper_vcpus", "status",
89 "custom_hvparams", "custom_beparams", "custom_nicparams",
90 ] + _COMMON_FIELDS
91
92 N_FIELDS = ["name", "offline", "master_candidate", "drained",
93 "dtotal", "dfree", "sptotal", "spfree",
94 "mtotal", "mnode", "mfree", "hv_state",
95 "pinst_cnt", "sinst_cnt",
96 "ctotal", "cnos", "cnodes", "csockets",
97 "pip", "sip", "role",
98 "pinst_list", "sinst_list",
99 "master_capable", "vm_capable",
100 "ndparams",
101 "group.uuid",
102 ] + _COMMON_FIELDS
103
104 NET_FIELDS = ["name", "network", "gateway",
105 "network6", "gateway6",
106 "mac_prefix",
107 "free_count", "reserved_count",
108 "map", "group_list", "inst_list",
109 "external_reservations",
110 ] + _COMMON_FIELDS
111
112 G_FIELDS = [
113 "alloc_policy",
114 "name",
115 "node_cnt",
116 "node_list",
117 "ipolicy",
118 "custom_ipolicy",
119 "diskparams",
120 "custom_diskparams",
121 "ndparams",
122 "custom_ndparams"
123 ] + _COMMON_FIELDS
124
125 FILTER_RULE_FIELDS = [
126 "watermark",
127 "priority",
128 "predicates",
129 "action",
130 "reason_trail",
131 "uuid",
132 ]
133
134 J_FIELDS_BULK = [
135 "id", "ops", "status", "summary",
136 "opstatus",
137 "received_ts", "start_ts", "end_ts",
138 ]
139
140 J_FIELDS = J_FIELDS_BULK + [
141 "oplog",
142 "opresult",
143 ]
144
145 _NR_DRAINED = "drained"
146 _NR_MASTER_CANDIDATE = "master-candidate"
147 _NR_MASTER = "master"
148 _NR_OFFLINE = "offline"
149 _NR_REGULAR = "regular"
150
151 _NR_MAP = {
152 constants.NR_MASTER: _NR_MASTER,
153 constants.NR_MCANDIDATE: _NR_MASTER_CANDIDATE,
154 constants.NR_DRAINED: _NR_DRAINED,
155 constants.NR_OFFLINE: _NR_OFFLINE,
156 constants.NR_REGULAR: _NR_REGULAR,
157 }
158
159 assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
160
161 # Request data version field
162 _REQ_DATA_VERSION = "__version__"
163
164 # Feature string for instance creation request data version 1
165 _INST_CREATE_REQV1 = "instance-create-reqv1"
166
167 # Feature string for instance reinstall request version 1
168 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
169
170 # Feature string for node migration version 1
171 _NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
172
173 # Feature string for node evacuation with LU-generated jobs
174 _NODE_EVAC_RES1 = "node-evac-res1"
175
176 ALL_FEATURES = compat.UniqueFrozenset([
177 _INST_CREATE_REQV1,
178 _INST_REINSTALL_REQV1,
179 _NODE_MIGRATE_REQV1,
180 _NODE_EVAC_RES1,
181 ])
182
183 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
184 _WFJC_TIMEOUT = 10
185
186
187 # FIXME: For compatibility we update the beparams/memory field. Needs to be
188 # removed in Ganeti 2.8
189 -def _UpdateBeparams(inst):
190 """Updates the beparams dict of inst to support the memory field.
191
192 @param inst: Inst dict
193 @return: Updated inst dict
194
195 """
196 beparams = inst["beparams"]
197 beparams[constants.BE_MEMORY] = beparams[constants.BE_MAXMEM]
198
199 return inst
200
212
218
221 """/version resource.
222
223 This resource should be used to determine the remote API version and
224 to adapt clients accordingly.
225
226 """
227 @staticmethod
233
236 """/2/info resource.
237
238 """
239 GET_OPCODE = opcodes.OpClusterQuery
240 GET_ALIASES = {
241 "volume_group_name": "vg_name",
242 "drbd_usermode_helper": "drbd_helper",
243 }
244
246 """Returns cluster information.
247
248 """
249 client = self.GetClient()
250 return client.QueryClusterInfo()
251
254 """/2/features resource.
255
256 """
257 @staticmethod
259 """Returns list of optional RAPI features implemented.
260
261 """
262 return list(ALL_FEATURES)
263
266 """/2/os resource.
267
268 """
269 GET_OPCODE = opcodes.OpOsDiagnose
270
272 """Return a list of all OSes.
273
274 Can return error 500 in case of a problem.
275
276 Example: ["debian-etch"]
277
278 """
279 cl = self.GetClient()
280 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
281 job_id = self.SubmitJob([op], cl=cl)
282 # we use custom feedback function, instead of print we log the status
283 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
284 diagnose_data = result[0]
285
286 if not isinstance(diagnose_data, list):
287 raise http.HttpBadGateway(message="Can't get OS list")
288
289 os_names = []
290 for (name, variants) in diagnose_data:
291 os_names.extend(cli.CalculateOSNames(name, variants))
292
293 return os_names
294
301
304 """/2/modify resource.
305
306 """
307 PUT_OPCODE = opcodes.OpClusterSetParams
308 PUT_FORBIDDEN = [
309 "compression_tools",
310 ]
311
314 """Checks and extracts filter rule parameters from a request body.
315
316 @return: the checked parameters: (priority, predicates, action).
317
318 """
319
320 if not isinstance(data, dict):
321 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
322
323 # Forbid unknown parameters
324 allowed_params = set(["priority", "predicates", "action", "reason"])
325 for param in data:
326 if param not in allowed_params:
327 raise http.HttpBadRequest("Invalid body parameters: filter rule doesn't"
328 " support the parameter '%s'" % param)
329
330 priority = baserlib.CheckParameter(
331 data, "priority", exptype=int, default=0)
332
333 # We leave the deeper check into the predicates list to the server.
334 predicates = baserlib.CheckParameter(
335 data, "predicates", exptype=list, default=[])
336
337 # The action can be a string or a list; we leave the check to the server.
338 action = baserlib.CheckParameter(data, "action", default="CONTINUE")
339
340 reason = baserlib.CheckParameter(data, "reason", exptype=list, default=[])
341
342 return (priority, predicates, action, reason)
343
346 """/2/filters resource.
347
348 """
349
351 """Returns a list of all filter rules.
352
353 @return: a dictionary with filter rule UUID and uri.
354
355 """
356 client = self.GetClient()
357
358 if self.useBulk():
359 bulkdata = client.QueryFilters(None, FILTER_RULE_FIELDS)
360 return baserlib.MapBulkFields(bulkdata, FILTER_RULE_FIELDS)
361 else:
362 jobdata = map(compat.fst, client.QueryFilters(None, ["uuid"]))
363 return baserlib.BuildUriList(jobdata, "/2/filters/%s",
364 uri_fields=("uuid", "uri"))
365
367 """Adds a filter rule.
368
369 @return: the UUID of the newly created filter rule.
370
371 """
372 priority, predicates, action, reason = \
373 checkFilterParameters(self.request_body)
374
375 reason.append(self.GetAuthReason())
376
377 # ReplaceFilter(None, ...) inserts a new filter.
378 return self.GetClient().ReplaceFilter(None, priority, predicates, action,
379 reason)
380
383 """/2/filters/[filter_uuid] resource.
384
385 """
387 """Returns a filter rule.
388
389 @return: a dictionary with job parameters.
390 The result includes:
391 - uuid: unique filter ID string
392 - watermark: highest job ID ever used as a number
393 - priority: filter priority as a non-negative number
394 - predicates: filter predicates, each one being a list
395 with the first element being the name of the predicate
396 and the rest being parameters suitable for that predicate
397 - action: effect of the filter as a string
398 - reason_trail: reasons for the addition of this filter as a
399 list of lists
400
401 """
402 uuid = self.items[0]
403
404 result = baserlib.HandleItemQueryErrors(self.GetClient().QueryFilters,
405 uuids=[uuid],
406 fields=FILTER_RULE_FIELDS)
407
408 return baserlib.MapFields(FILTER_RULE_FIELDS, result[0])
409
411 """Replaces an existing filter rule, or creates one if it doesn't
412 exist already.
413
414 @return: the UUID of the changed or created filter rule.
415
416 """
417 uuid = self.items[0]
418
419 priority, predicates, action, reason = \
420 checkFilterParameters(self.request_body)
421
422 reason.append(self.GetAuthReason())
423
424 return self.GetClient().ReplaceFilter(uuid, priority, predicates, action,
425 reason)
426
428 """Deletes a filter rule.
429
430 """
431 uuid = self.items[0]
432 return self.GetClient().DeleteFilter(uuid)
433
436 """/2/jobs resource.
437
438 """
440 """Returns a dictionary of jobs.
441
442 @return: a dictionary with jobs id and uri.
443
444 """
445 client = self.GetClient()
446
447 if self.useBulk():
448 bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
449 return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
450 else:
451 jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
452 return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
453 uri_fields=("id", "uri"))
454
457 """/2/jobs/[job_id] resource.
458
459 """
461 """Returns a job status.
462
463 @return: a dictionary with job parameters.
464 The result includes:
465 - id: job ID as a number
466 - status: current job status as a string
467 - ops: involved OpCodes as a list of dictionaries for each
468 opcodes in the job
469 - opstatus: OpCodes status as a list
470 - opresult: OpCodes results as a list of lists
471
472 """
473 job_id = self.items[0]
474 result = self.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
475 if result is None:
476 raise http.HttpNotFound()
477 return baserlib.MapFields(J_FIELDS, result)
478
486
489 """/2/jobs/[job_id]/wait resource.
490
491 """
492 # WaitForJobChange provides access to sensitive information and blocks
493 # machine resources (it's a blocking RAPI call), hence restricting access.
494 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
495
497 """Waits for job changes.
498
499 """
500 job_id = self.items[0]
501
502 fields = self.getBodyParameter("fields")
503 prev_job_info = self.getBodyParameter("previous_job_info", None)
504 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
505
506 if not isinstance(fields, list):
507 raise http.HttpBadRequest("The 'fields' parameter should be a list")
508
509 if not (prev_job_info is None or isinstance(prev_job_info, list)):
510 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
511 " be a list")
512
513 if not (prev_log_serial is None or
514 isinstance(prev_log_serial, (int, long))):
515 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
516 " be a number")
517
518 client = self.GetClient()
519 result = client.WaitForJobChangeOnce(job_id, fields,
520 prev_job_info, prev_log_serial,
521 timeout=_WFJC_TIMEOUT)
522 if not result:
523 raise http.HttpNotFound()
524
525 if result == constants.JOB_NOTCHANGED:
526 # No changes
527 return None
528
529 (job_info, log_entries) = result
530
531 return {
532 "job_info": job_info,
533 "log_entries": log_entries,
534 }
535
538 """/2/nodes resource.
539
540 """
541
543 """Returns a list of all nodes.
544
545 """
546 client = self.GetClient()
547
548 if self.useBulk():
549 bulkdata = client.QueryNodes([], N_FIELDS, False)
550 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
551 else:
552 nodesdata = client.QueryNodes([], ["name"], False)
553 nodeslist = [row[0] for row in nodesdata]
554 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
555 uri_fields=("id", "uri"))
556
559 """/2/nodes/[node_name] resource.
560
561 """
562 GET_ALIASES = {
563 "sip": "secondary_ip",
564 }
565
567 """Send information about a node.
568
569 """
570 node_name = self.items[0]
571 client = self.GetClient()
572
573 result = baserlib.HandleItemQueryErrors(client.QueryNodes,
574 names=[node_name], fields=N_FIELDS,
575 use_locking=self.useLocking())
576
577 return baserlib.MapFields(N_FIELDS, result[0])
578
581 """/2/nodes/[node_name]/powercycle resource.
582
583 """
584 POST_OPCODE = opcodes.OpNodePowercycle
585
587 """Tries to powercycle a node.
588
589 """
590 return (self.request_body, {
591 "node_name": self.items[0],
592 "force": self.useForce(),
593 })
594
597 """/2/nodes/[node_name]/role resource.
598
599 """
600 PUT_OPCODE = opcodes.OpNodeSetParams
601
603 """Returns the current node role.
604
605 @return: Node role
606
607 """
608 node_name = self.items[0]
609 client = self.GetClient()
610 result = client.QueryNodes(names=[node_name], fields=["role"],
611 use_locking=self.useLocking())
612
613 return _NR_MAP[result[0][0]]
614
616 """Sets the node role.
617
618 """
619 baserlib.CheckType(self.request_body, basestring, "Body contents")
620
621 role = self.request_body
622
623 if role == _NR_REGULAR:
624 candidate = False
625 offline = False
626 drained = False
627
628 elif role == _NR_MASTER_CANDIDATE:
629 candidate = True
630 offline = drained = None
631
632 elif role == _NR_DRAINED:
633 drained = True
634 candidate = offline = None
635
636 elif role == _NR_OFFLINE:
637 offline = True
638 candidate = drained = None
639
640 else:
641 raise http.HttpBadRequest("Can't set '%s' role" % role)
642
643 assert len(self.items) == 1
644
645 return ({}, {
646 "node_name": self.items[0],
647 "master_candidate": candidate,
648 "offline": offline,
649 "drained": drained,
650 "force": self.useForce(),
651 "auto_promote": bool(self._checkIntVariable("auto-promote", default=0)),
652 })
653
656 """/2/nodes/[node_name]/evacuate resource.
657
658 """
659 POST_OPCODE = opcodes.OpNodeEvacuate
660
662 """Evacuate all instances off a node.
663
664 """
665 return (self.request_body, {
666 "node_name": self.items[0],
667 "dry_run": self.dryRun(),
668 })
669
672 """/2/nodes/[node_name]/migrate resource.
673
674 """
675 POST_OPCODE = opcodes.OpNodeMigrate
676
678 """Migrate all primary instances from a node.
679
680 """
681 if self.queryargs:
682 # Support old-style requests
683 if "live" in self.queryargs and "mode" in self.queryargs:
684 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
685 " be passed")
686
687 if "live" in self.queryargs:
688 if self._checkIntVariable("live", default=1):
689 mode = constants.HT_MIGRATION_LIVE
690 else:
691 mode = constants.HT_MIGRATION_NONLIVE
692 else:
693 mode = self._checkStringVariable("mode", default=None)
694
695 data = {
696 "mode": mode,
697 }
698 else:
699 data = self.request_body
700
701 return (data, {
702 "node_name": self.items[0],
703 })
704
707 """/2/nodes/[node_name]/modify resource.
708
709 """
710 POST_OPCODE = opcodes.OpNodeSetParams
711
713 """Changes parameters of a node.
714
715 """
716 assert len(self.items) == 1
717
718 return (self.request_body, {
719 "node_name": self.items[0],
720 })
721
724 """/2/nodes/[node_name]/storage resource.
725
726 """
727 # LUNodeQueryStorage acquires locks, hence restricting access to GET
728 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
729 GET_OPCODE = opcodes.OpNodeQueryStorage
730
732 """List storage available on a node.
733
734 """
735 storage_type = self._checkStringVariable("storage_type", None)
736 output_fields = self._checkStringVariable("output_fields", None)
737
738 if not output_fields:
739 raise http.HttpBadRequest("Missing the required 'output_fields'"
740 " parameter")
741
742 return ({}, {
743 "nodes": [self.items[0]],
744 "storage_type": storage_type,
745 "output_fields": output_fields.split(","),
746 })
747
750 """/2/nodes/[node_name]/storage/modify resource.
751
752 """
753 PUT_OPCODE = opcodes.OpNodeModifyStorage
754
756 """Modifies a storage volume on a node.
757
758 """
759 storage_type = self._checkStringVariable("storage_type", None)
760 name = self._checkStringVariable("name", None)
761
762 if not name:
763 raise http.HttpBadRequest("Missing the required 'name'"
764 " parameter")
765
766 changes = {}
767
768 if "allocatable" in self.queryargs:
769 changes[constants.SF_ALLOCATABLE] = \
770 bool(self._checkIntVariable("allocatable", default=1))
771
772 return ({}, {
773 "node_name": self.items[0],
774 "storage_type": storage_type,
775 "name": name,
776 "changes": changes,
777 })
778
781 """/2/nodes/[node_name]/storage/repair resource.
782
783 """
784 PUT_OPCODE = opcodes.OpRepairNodeStorage
785
787 """Repairs a storage volume on a node.
788
789 """
790 storage_type = self._checkStringVariable("storage_type", None)
791 name = self._checkStringVariable("name", None)
792 if not name:
793 raise http.HttpBadRequest("Missing the required 'name'"
794 " parameter")
795
796 return ({}, {
797 "node_name": self.items[0],
798 "storage_type": storage_type,
799 "name": name,
800 })
801
804 """/2/networks resource.
805
806 """
807 POST_OPCODE = opcodes.OpNetworkAdd
808 POST_RENAME = {
809 "name": "network_name",
810 }
811
813 """Create a network.
814
815 """
816 assert not self.items
817 return (self.request_body, {
818 "dry_run": self.dryRun(),
819 })
820
822 """Returns a list of all networks.
823
824 """
825 client = self.GetClient()
826
827 if self.useBulk():
828 bulkdata = client.QueryNetworks([], NET_FIELDS, False)
829 return baserlib.MapBulkFields(bulkdata, NET_FIELDS)
830 else:
831 data = client.QueryNetworks([], ["name"], False)
832 networknames = [row[0] for row in data]
833 return baserlib.BuildUriList(networknames, "/2/networks/%s",
834 uri_fields=("name", "uri"))
835
838 """/2/networks/[network_name] resource.
839
840 """
841 DELETE_OPCODE = opcodes.OpNetworkRemove
842
844 """Send information about a network.
845
846 """
847 network_name = self.items[0]
848 client = self.GetClient()
849
850 result = baserlib.HandleItemQueryErrors(client.QueryNetworks,
851 names=[network_name],
852 fields=NET_FIELDS,
853 use_locking=self.useLocking())
854
855 return baserlib.MapFields(NET_FIELDS, result[0])
856
858 """Delete a network.
859
860 """
861 assert len(self.items) == 1
862 return (self.request_body, {
863 "network_name": self.items[0],
864 "dry_run": self.dryRun(),
865 })
866
869 """/2/networks/[network_name]/connect resource.
870
871 """
872 PUT_OPCODE = opcodes.OpNetworkConnect
873
875 """Changes some parameters of node group.
876
877 """
878 assert self.items
879 return (self.request_body, {
880 "network_name": self.items[0],
881 "dry_run": self.dryRun(),
882 })
883
886 """/2/networks/[network_name]/disconnect resource.
887
888 """
889 PUT_OPCODE = opcodes.OpNetworkDisconnect
890
892 """Changes some parameters of node group.
893
894 """
895 assert self.items
896 return (self.request_body, {
897 "network_name": self.items[0],
898 "dry_run": self.dryRun(),
899 })
900
903 """/2/networks/[network_name]/modify resource.
904
905 """
906 PUT_OPCODE = opcodes.OpNetworkSetParams
907
909 """Changes some parameters of network.
910
911 """
912 assert self.items
913 return (self.request_body, {
914 "network_name": self.items[0],
915 })
916
919 """/2/groups resource.
920
921 """
922 POST_OPCODE = opcodes.OpGroupAdd
923 POST_RENAME = {
924 "name": "group_name",
925 }
926
928 """Create a node group.
929
930
931 """
932 assert not self.items
933 return (self.request_body, {
934 "dry_run": self.dryRun(),
935 })
936
938 """Returns a list of all node groups.
939
940 """
941 client = self.GetClient()
942
943 if self.useBulk():
944 bulkdata = client.QueryGroups([], G_FIELDS, False)
945 return baserlib.MapBulkFields(bulkdata, G_FIELDS)
946 else:
947 data = client.QueryGroups([], ["name"], False)
948 groupnames = [row[0] for row in data]
949 return baserlib.BuildUriList(groupnames, "/2/groups/%s",
950 uri_fields=("name", "uri"))
951
954 """/2/groups/[group_name] resource.
955
956 """
957 DELETE_OPCODE = opcodes.OpGroupRemove
958
960 """Send information about a node group.
961
962 """
963 group_name = self.items[0]
964 client = self.GetClient()
965
966 result = baserlib.HandleItemQueryErrors(client.QueryGroups,
967 names=[group_name], fields=G_FIELDS,
968 use_locking=self.useLocking())
969
970 return baserlib.MapFields(G_FIELDS, result[0])
971
973 """Delete a node group.
974
975 """
976 assert len(self.items) == 1
977 return ({}, {
978 "group_name": self.items[0],
979 "dry_run": self.dryRun(),
980 })
981
984 """/2/groups/[group_name]/modify resource.
985
986 """
987 PUT_OPCODE = opcodes.OpGroupSetParams
988 PUT_RENAME = {
989 "custom_ndparams": "ndparams",
990 "custom_ipolicy": "ipolicy",
991 "custom_diskparams": "diskparams",
992 }
993
995 """Changes some parameters of node group.
996
997 """
998 assert self.items
999 return (self.request_body, {
1000 "group_name": self.items[0],
1001 })
1002
1005 """/2/groups/[group_name]/rename resource.
1006
1007 """
1008 PUT_OPCODE = opcodes.OpGroupRename
1009
1011 """Changes the name of a node group.
1012
1013 """
1014 assert len(self.items) == 1
1015 return (self.request_body, {
1016 "group_name": self.items[0],
1017 "dry_run": self.dryRun(),
1018 })
1019
1022 """/2/groups/[group_name]/assign-nodes resource.
1023
1024 """
1025 PUT_OPCODE = opcodes.OpGroupAssignNodes
1026
1028 """Assigns nodes to a group.
1029
1030 """
1031 assert len(self.items) == 1
1032 return (self.request_body, {
1033 "group_name": self.items[0],
1034 "dry_run": self.dryRun(),
1035 "force": self.useForce(),
1036 })
1037
1040 """Convert in place the usb_devices string to the proper format.
1041
1042 In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
1043 comma to space because commas cannot be accepted on the command line
1044 (they already act as the separator between different hvparams). RAPI
1045 should be able to accept commas for backwards compatibility, but we want
1046 it to also accept the new space separator. Therefore, we convert
1047 spaces into commas here and keep the old parsing logic elsewhere.
1048
1049 """
1050 try:
1051 hvparams = data["hvparams"]
1052 usb_devices = hvparams[constants.HV_USB_DEVICES]
1053 hvparams[constants.HV_USB_DEVICES] = usb_devices.replace(" ", ",")
1054 data["hvparams"] = hvparams
1055 except KeyError:
1056 #No usb_devices, no modification required
1057 pass
1058
1061 """/2/instances resource.
1062
1063 """
1064 POST_OPCODE = opcodes.OpInstanceCreate
1065 POST_RENAME = {
1066 "os": "os_type",
1067 "name": "instance_name",
1068 }
1069
1071 """Returns a list of all available instances.
1072
1073 """
1074 client = self.GetClient()
1075
1076 use_locking = self.useLocking()
1077 if self.useBulk():
1078 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
1079 return map(_UpdateBeparams, baserlib.MapBulkFields(bulkdata, I_FIELDS))
1080 else:
1081 instancesdata = client.QueryInstances([], ["name"], use_locking)
1082 instanceslist = [row[0] for row in instancesdata]
1083 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
1084 uri_fields=("id", "uri"))
1085
1087 """Create an instance.
1088
1089 @return: a job id
1090
1091 """
1092 baserlib.CheckType(self.request_body, dict, "Body contents")
1093
1094 # Default to request data version 0
1095 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
1096
1097 if data_version == 0:
1098 raise http.HttpBadRequest("Instance creation request version 0 is no"
1099 " longer supported")
1100 elif data_version != 1:
1101 raise http.HttpBadRequest("Unsupported request data version %s" %
1102 data_version)
1103
1104 data = self.request_body.copy()
1105 # Remove "__version__"
1106 data.pop(_REQ_DATA_VERSION, None)
1107
1108 _ConvertUsbDevices(data)
1109
1110 return (data, {
1111 "dry_run": self.dryRun(),
1112 })
1113
1116 """/2/instances-multi-alloc resource.
1117
1118 """
1119 POST_OPCODE = opcodes.OpInstanceMultiAlloc
1120
1122 """Try to allocate multiple instances.
1123
1124 @return: A dict with submitted jobs, allocatable instances and failed
1125 allocations
1126
1127 """
1128 if "instances" not in self.request_body:
1129 raise http.HttpBadRequest("Request is missing required 'instances' field"
1130 " in body")
1131
1132 # Unlike most other RAPI calls, this one is composed of individual opcodes,
1133 # and we have to do the filling ourselves
1134 OPCODE_RENAME = {
1135 "os": "os_type",
1136 "name": "instance_name",
1137 }
1138
1139 body = objects.FillDict(self.request_body, {
1140 "instances": [
1141 baserlib.FillOpcode(opcodes.OpInstanceCreate, inst, {},
1142 rename=OPCODE_RENAME)
1143 for inst in self.request_body["instances"]
1144 ],
1145 })
1146
1147 return (body, {
1148 "dry_run": self.dryRun(),
1149 })
1150
1153 """/2/instances/[instance_name] resource.
1154
1155 """
1156 DELETE_OPCODE = opcodes.OpInstanceRemove
1157
1159 """Send information about an instance.
1160
1161 """
1162 client = self.GetClient()
1163 instance_name = self.items[0]
1164
1165 result = baserlib.HandleItemQueryErrors(client.QueryInstances,
1166 names=[instance_name],
1167 fields=I_FIELDS,
1168 use_locking=self.useLocking())
1169
1170 return _UpdateBeparams(baserlib.MapFields(I_FIELDS, result[0]))
1171
1173 """Delete an instance.
1174
1175 """
1176 assert len(self.items) == 1
1177 return (self.request_body, {
1178 "instance_name": self.items[0],
1179 "ignore_failures": False,
1180 "dry_run": self.dryRun(),
1181 })
1182
1185 """/2/instances/[instance_name]/info resource.
1186
1187 """
1188 GET_OPCODE = opcodes.OpInstanceQueryData
1189
1191 """Request detailed instance information.
1192
1193 """
1194 assert len(self.items) == 1
1195 return ({}, {
1196 "instances": [self.items[0]],
1197 "static": bool(self._checkIntVariable("static", default=0)),
1198 })
1199
1202 """/2/instances/[instance_name]/reboot resource.
1203
1204 Implements an instance reboot.
1205
1206 """
1207 POST_OPCODE = opcodes.OpInstanceReboot
1208
1210 """Reboot an instance.
1211
1212 The URI takes type=[hard|soft|full] and
1213 ignore_secondaries=[False|True] parameters.
1214
1215 """
1216 return (self.request_body, {
1217 "instance_name": self.items[0],
1218 "reboot_type":
1219 self.queryargs.get("type", [constants.INSTANCE_REBOOT_HARD])[0],
1220 "ignore_secondaries": bool(self._checkIntVariable("ignore_secondaries")),
1221 "dry_run": self.dryRun(),
1222 })
1223
1226 """/2/instances/[instance_name]/startup resource.
1227
1228 Implements an instance startup.
1229
1230 """
1231 PUT_OPCODE = opcodes.OpInstanceStartup
1232
1234 """Startup an instance.
1235
1236 The URI takes force=[False|True] parameter to start the instance
1237 if even if secondary disks are failing.
1238
1239 """
1240 return ({}, {
1241 "instance_name": self.items[0],
1242 "force": self.useForce(),
1243 "dry_run": self.dryRun(),
1244 "no_remember": bool(self._checkIntVariable("no_remember")),
1245 })
1246
1249 """/2/instances/[instance_name]/shutdown resource.
1250
1251 Implements an instance shutdown.
1252
1253 """
1254 PUT_OPCODE = opcodes.OpInstanceShutdown
1255
1257 """Shutdown an instance.
1258
1259 """
1260 return (self.request_body, {
1261 "instance_name": self.items[0],
1262 "no_remember": bool(self._checkIntVariable("no_remember")),
1263 "dry_run": self.dryRun(),
1264 })
1265
1268 """Parses a request for reinstalling an instance.
1269
1270 """
1271 if not isinstance(data, dict):
1272 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1273
1274 ostype = baserlib.CheckParameter(data, "os", default=None)
1275 start = baserlib.CheckParameter(data, "start", exptype=bool,
1276 default=True)
1277 osparams = baserlib.CheckParameter(data, "osparams", default=None)
1278
1279 ops = [
1280 opcodes.OpInstanceShutdown(instance_name=name),
1281 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
1282 osparams=osparams),
1283 ]
1284
1285 if start:
1286 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
1287
1288 return ops
1289
1292 """/2/instances/[instance_name]/reinstall resource.
1293
1294 Implements an instance reinstall.
1295
1296 """
1297 POST_OPCODE = opcodes.OpInstanceReinstall
1298
1300 """Reinstall an instance.
1301
1302 The URI takes os=name and nostartup=[0|1] optional
1303 parameters. By default, the instance will be started
1304 automatically.
1305
1306 """
1307 if self.request_body:
1308 if self.queryargs:
1309 raise http.HttpBadRequest("Can't combine query and body parameters")
1310
1311 body = self.request_body
1312 elif self.queryargs:
1313 # Legacy interface, do not modify/extend
1314 body = {
1315 "os": self._checkStringVariable("os"),
1316 "start": not self._checkIntVariable("nostartup"),
1317 }
1318 else:
1319 body = {}
1320
1321 ops = _ParseInstanceReinstallRequest(self.items[0], body)
1322
1323 return self.SubmitJob(ops)
1324
1327 """/2/instances/[instance_name]/replace-disks resource.
1328
1329 """
1330 POST_OPCODE = opcodes.OpInstanceReplaceDisks
1331
1333 """Replaces disks on an instance.
1334
1335 """
1336 static = {
1337 "instance_name": self.items[0],
1338 }
1339
1340 if self.request_body:
1341 data = self.request_body
1342 elif self.queryargs:
1343 # Legacy interface, do not modify/extend
1344 data = {
1345 "remote_node": self._checkStringVariable("remote_node", default=None),
1346 "mode": self._checkStringVariable("mode", default=None),
1347 "disks": self._checkStringVariable("disks", default=None),
1348 "iallocator": self._checkStringVariable("iallocator", default=None),
1349 }
1350 else:
1351 data = {}
1352
1353 # Parse disks
1354 try:
1355 raw_disks = data.pop("disks")
1356 except KeyError:
1357 pass
1358 else:
1359 if raw_disks:
1360 if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102
1361 data["disks"] = raw_disks
1362 else:
1363 # Backwards compatibility for strings of the format "1, 2, 3"
1364 try:
1365 data["disks"] = [int(part) for part in raw_disks.split(",")]
1366 except (TypeError, ValueError), err:
1367 raise http.HttpBadRequest("Invalid disk index passed: %s" % err)
1368
1369 return (data, static)
1370
1373 """/2/instances/[instance_name]/activate-disks resource.
1374
1375 """
1376 PUT_OPCODE = opcodes.OpInstanceActivateDisks
1377
1379 """Activate disks for an instance.
1380
1381 The URI might contain ignore_size to ignore current recorded size.
1382
1383 """
1384 return ({}, {
1385 "instance_name": self.items[0],
1386 "ignore_size": bool(self._checkIntVariable("ignore_size")),
1387 })
1388
1391 """/2/instances/[instance_name]/deactivate-disks resource.
1392
1393 """
1394 PUT_OPCODE = opcodes.OpInstanceDeactivateDisks
1395
1397 """Deactivate disks for an instance.
1398
1399 """
1400 return ({}, {
1401 "instance_name": self.items[0],
1402 "force": self.useForce(),
1403 })
1404
1407 """/2/instances/[instance_name]/recreate-disks resource.
1408
1409 """
1410 POST_OPCODE = opcodes.OpInstanceRecreateDisks
1411
1413 """Recreate disks for an instance.
1414
1415 """
1416 return (self.request_body, {
1417 "instance_name": self.items[0],
1418 })
1419
1422 """/2/instances/[instance_name]/prepare-export resource.
1423
1424 """
1425 PUT_OPCODE = opcodes.OpBackupPrepare
1426
1428 """Prepares an export for an instance.
1429
1430 """
1431 return ({}, {
1432 "instance_name": self.items[0],
1433 "mode": self._checkStringVariable("mode"),
1434 })
1435
1438 """/2/instances/[instance_name]/export resource.
1439
1440 """
1441 PUT_OPCODE = opcodes.OpBackupExport
1442 PUT_RENAME = {
1443 "destination": "target_node",
1444 }
1445
1447 """Exports an instance.
1448
1449 """
1450 return (self.request_body, {
1451 "instance_name": self.items[0],
1452 })
1453
1456 """/2/instances/[instance_name]/migrate resource.
1457
1458 """
1459 PUT_OPCODE = opcodes.OpInstanceMigrate
1460
1462 """Migrates an instance.
1463
1464 """
1465 return (self.request_body, {
1466 "instance_name": self.items[0],
1467 })
1468
1471 """/2/instances/[instance_name]/failover resource.
1472
1473 """
1474 PUT_OPCODE = opcodes.OpInstanceFailover
1475
1477 """Does a failover of an instance.
1478
1479 """
1480 return (self.request_body, {
1481 "instance_name": self.items[0],
1482 })
1483
1486 """/2/instances/[instance_name]/rename resource.
1487
1488 """
1489 PUT_OPCODE = opcodes.OpInstanceRename
1490
1492 """Changes the name of an instance.
1493
1494 """
1495 return (self.request_body, {
1496 "instance_name": self.items[0],
1497 })
1498
1501 """/2/instances/[instance_name]/modify resource.
1502
1503 """
1504 PUT_OPCODE = opcodes.OpInstanceSetParams
1505 PUT_RENAME = {
1506 "custom_beparams": "beparams",
1507 "custom_hvparams": "hvparams",
1508 }
1509
1511 """Changes parameters of an instance.
1512
1513 """
1514 data = self.request_body.copy()
1515 _ConvertUsbDevices(data)
1516
1517 return (data, {
1518 "instance_name": self.items[0],
1519 })
1520
1523 """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1524
1525 """
1526 POST_OPCODE = opcodes.OpInstanceGrowDisk
1527
1529 """Increases the size of an instance disk.
1530
1531 """
1532 return (self.request_body, {
1533 "instance_name": self.items[0],
1534 "disk": int(self.items[1]),
1535 })
1536
1539 """/2/instances/[instance_name]/console resource.
1540
1541 """
1542 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
1543 GET_OPCODE = opcodes.OpInstanceConsole
1544
1546 """Request information for connecting to instance's console.
1547
1548 @return: Serialized instance console description, see
1549 L{objects.InstanceConsole}
1550
1551 """
1552 instance_name = self.items[0]
1553 client = self.GetClient()
1554
1555 ((console, oper_state), ) = \
1556 client.QueryInstances([instance_name], ["console", "oper_state"], False)
1557
1558 if not oper_state:
1559 raise http.HttpServiceUnavailable("Instance console unavailable")
1560
1561 assert isinstance(console, dict)
1562 return console
1563
1566 """Tries to extract C{fields} query parameter.
1567
1568 @type args: dictionary
1569 @rtype: list of string
1570 @raise http.HttpBadRequest: When parameter can't be found
1571
1572 """
1573 try:
1574 fields = args["fields"]
1575 except KeyError:
1576 raise http.HttpBadRequest("Missing 'fields' query argument")
1577
1578 return _SplitQueryFields(fields[0])
1579
1582 """Splits fields as given for a query request.
1583
1584 @type fields: string
1585 @rtype: list of string
1586
1587 """
1588 return [i.strip() for i in fields.split(",")]
1589
1592 """/2/query/[resource] resource.
1593
1594 """
1595 # Results might contain sensitive information
1596 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
1597 PUT_ACCESS = GET_ACCESS
1598 GET_OPCODE = opcodes.OpQuery
1599 PUT_OPCODE = opcodes.OpQuery
1600
1604
1606 """Returns resource information.
1607
1608 @return: Query result, see L{objects.QueryResponse}
1609
1610 """
1611 return self._Query(_GetQueryFields(self.queryargs), None)
1612
1614 """Submits job querying for resources.
1615
1616 @return: Query result, see L{objects.QueryResponse}
1617
1618 """
1619 body = self.request_body
1620
1621 baserlib.CheckType(body, dict, "Body contents")
1622
1623 try:
1624 fields = body["fields"]
1625 except KeyError:
1626 fields = _GetQueryFields(self.queryargs)
1627
1628 qfilter = body.get("qfilter", None)
1629 # TODO: remove this after 2.7
1630 if qfilter is None:
1631 qfilter = body.get("filter", None)
1632
1633 return self._Query(fields, qfilter)
1634
1637 """/2/query/[resource]/fields resource.
1638
1639 """
1640 GET_OPCODE = opcodes.OpQueryFields
1641
1643 """Retrieves list of available fields for a resource.
1644
1645 @return: List of serialized L{objects.QueryFieldDefinition}
1646
1647 """
1648 try:
1649 raw_fields = self.queryargs["fields"]
1650 except KeyError:
1651 fields = None
1652 else:
1653 fields = _SplitQueryFields(raw_fields[0])
1654
1655 return self.GetClient().QueryFields(self.items[0], fields).ToDict()
1656
1659 """Quasiclass for tagging resources.
1660
1661 Manages tags. When inheriting this class you must define the
1662 TAG_LEVEL for it.
1663
1664 """
1665 TAG_LEVEL = None
1666 GET_OPCODE = opcodes.OpTagsGet
1667 PUT_OPCODE = opcodes.OpTagsSet
1668 DELETE_OPCODE = opcodes.OpTagsDel
1669
1671 """A tag resource constructor.
1672
1673 We have to override the default to sort out cluster naming case.
1674
1675 """
1676 baserlib.OpcodeResource.__init__(self, items, queryargs, req, **kwargs)
1677
1678 if self.TAG_LEVEL == constants.TAG_CLUSTER:
1679 self.name = None
1680 else:
1681 self.name = items[0]
1682
1684 """Returns a list of tags.
1685
1686 Example: ["tag1", "tag2", "tag3"]
1687
1688 """
1689 kind = self.TAG_LEVEL
1690
1691 if kind in constants.VALID_TAG_TYPES:
1692 cl = self.GetClient()
1693 if kind == constants.TAG_CLUSTER:
1694 if self.name:
1695 raise http.HttpBadRequest("Can't specify a name"
1696 " for cluster tag request")
1697 tags = list(cl.QueryTags(kind, ""))
1698 else:
1699 if not self.name:
1700 raise http.HttpBadRequest("Missing name on tag request")
1701 tags = list(cl.QueryTags(kind, self.name))
1702
1703 else:
1704 raise http.HttpBadRequest("Unhandled tag type!")
1705
1706 return list(tags)
1707
1709 """Add a set of tags.
1710
1711 The request as a list of strings should be PUT to this URI. And
1712 you'll have back a job id.
1713
1714 """
1715 return ({}, {
1716 "kind": self.TAG_LEVEL,
1717 "name": self.name,
1718 "tags": self.queryargs.get("tag", []),
1719 "dry_run": self.dryRun(),
1720 })
1721
1723 """Delete a tag.
1724
1725 In order to delete a set of tags, the DELETE
1726 request should be addressed to URI like:
1727 /tags?tag=[tag]&tag=[tag]
1728
1729 """
1730 # Re-use code
1731 return self.GetPutOpInput()
1732
1741
1750
1759
1768
1777
| Trees | Indices | Help |
|
|---|
| Generated by Epydoc 3.0.1 on Fri Mar 11 11:24:12 2016 | http://epydoc.sourceforge.net |