| Trees | Indices | Help |
|
|---|
|
|
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Remote API resource implementations.
23
24 PUT or POST?
25 ============
26
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
30
31 In the context of this module POST on ``/2/instances`` to change an existing
32 entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
33 new instance) with a name specified in the request.
34
35 Quoting from RFC2616, section 9.6::
36
37 The fundamental difference between the POST and PUT requests is reflected in
38 the different meaning of the Request-URI. The URI in a POST request
39 identifies the resource that will handle the enclosed entity. That resource
40 might be a data-accepting process, a gateway to some other protocol, or a
41 separate entity that accepts annotations. In contrast, the URI in a PUT
42 request identifies the entity enclosed with the request -- the user agent
43 knows what URI is intended and the server MUST NOT attempt to apply the
44 request to some other resource. If the server desires that the request be
45 applied to a different URI, it MUST send a 301 (Moved Permanently) response;
46 the user agent MAY then make its own decision regarding whether or not to
47 redirect the request.
48
49 So when adding new methods, if they are operating on the URI entity itself,
50 PUT should be prefered over POST.
51
52 """
53
54 # pylint: disable=C0103
55
56 # C0103: Invalid name, since the R_* names are not conforming
57
58 from ganeti import opcodes
59 from ganeti import objects
60 from ganeti import http
61 from ganeti import constants
62 from ganeti import cli
63 from ganeti import rapi
64 from ganeti import ht
65 from ganeti import compat
66 from ganeti import ssconf
67 from ganeti.rapi import baserlib
68
69
70 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
71 I_FIELDS = ["name", "admin_state", "os",
72 "pnode", "snodes",
73 "disk_template",
74 "nic.ips", "nic.macs", "nic.modes",
75 "nic.links", "nic.networks", "nic.networks.names", "nic.bridges",
76 "network_port",
77 "disk.sizes", "disk_usage",
78 "beparams", "hvparams",
79 "oper_state", "oper_ram", "oper_vcpus", "status",
80 "custom_hvparams", "custom_beparams", "custom_nicparams",
81 ] + _COMMON_FIELDS
82
83 N_FIELDS = ["name", "offline", "master_candidate", "drained",
84 "dtotal", "dfree",
85 "mtotal", "mnode", "mfree",
86 "pinst_cnt", "sinst_cnt",
87 "ctotal", "cnodes", "csockets",
88 "pip", "sip", "role",
89 "pinst_list", "sinst_list",
90 "master_capable", "vm_capable",
91 "ndparams",
92 "group.uuid",
93 ] + _COMMON_FIELDS
94
95 NET_FIELDS = ["name", "network", "gateway",
96 "network6", "gateway6",
97 "mac_prefix",
98 "free_count", "reserved_count",
99 "map", "group_list", "inst_list",
100 "external_reservations",
101 ] + _COMMON_FIELDS
102
103 G_FIELDS = [
104 "alloc_policy",
105 "name",
106 "node_cnt",
107 "node_list",
108 "ipolicy",
109 "custom_ipolicy",
110 "diskparams",
111 "custom_diskparams",
112 "ndparams",
113 "custom_ndparams",
114 ] + _COMMON_FIELDS
115
116 J_FIELDS_BULK = [
117 "id", "ops", "status", "summary",
118 "opstatus",
119 "received_ts", "start_ts", "end_ts",
120 ]
121
122 J_FIELDS = J_FIELDS_BULK + [
123 "oplog",
124 "opresult",
125 ]
126
127 _NR_DRAINED = "drained"
128 _NR_MASTER_CANDIDATE = "master-candidate"
129 _NR_MASTER = "master"
130 _NR_OFFLINE = "offline"
131 _NR_REGULAR = "regular"
132
133 _NR_MAP = {
134 constants.NR_MASTER: _NR_MASTER,
135 constants.NR_MCANDIDATE: _NR_MASTER_CANDIDATE,
136 constants.NR_DRAINED: _NR_DRAINED,
137 constants.NR_OFFLINE: _NR_OFFLINE,
138 constants.NR_REGULAR: _NR_REGULAR,
139 }
140
141 assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
142
143 # Request data version field
144 _REQ_DATA_VERSION = "__version__"
145
146 # Feature string for instance creation request data version 1
147 _INST_CREATE_REQV1 = "instance-create-reqv1"
148
149 # Feature string for instance reinstall request version 1
150 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
151
152 # Feature string for node migration version 1
153 _NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
154
155 # Feature string for node evacuation with LU-generated jobs
156 _NODE_EVAC_RES1 = "node-evac-res1"
157
158 ALL_FEATURES = compat.UniqueFrozenset([
159 _INST_CREATE_REQV1,
160 _INST_REINSTALL_REQV1,
161 _NODE_MIGRATE_REQV1,
162 _NODE_EVAC_RES1,
163 ])
164
165 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
166 _WFJC_TIMEOUT = 10
167
168
169 # FIXME: For compatibility we update the beparams/memory field. Needs to be
170 # removed in Ganeti 2.8
171 -def _UpdateBeparams(inst):
172 """Updates the beparams dict of inst to support the memory field.
173
174 @param inst: Inst dict
175 @return: Updated inst dict
176
177 """
178 beparams = inst["beparams"]
179 beparams[constants.BE_MEMORY] = beparams[constants.BE_MAXMEM]
180
181 return inst
182
194
200
203 """/version resource.
204
205 This resource should be used to determine the remote API version and
206 to adapt clients accordingly.
207
208 """
209 @staticmethod
215
218 """/2/info resource.
219
220 """
221 GET_OPCODE = opcodes.OpClusterQuery
222
224 """Returns cluster information.
225
226 """
227 client = self.GetClient(query=True)
228 return client.QueryClusterInfo()
229
232 """/2/features resource.
233
234 """
235 @staticmethod
237 """Returns list of optional RAPI features implemented.
238
239 """
240 return list(ALL_FEATURES)
241
244 """/2/os resource.
245
246 """
247 GET_OPCODE = opcodes.OpOsDiagnose
248
250 """Return a list of all OSes.
251
252 Can return error 500 in case of a problem.
253
254 Example: ["debian-etch"]
255
256 """
257 cl = self.GetClient()
258 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
259 job_id = self.SubmitJob([op], cl=cl)
260 # we use custom feedback function, instead of print we log the status
261 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
262 diagnose_data = result[0]
263
264 if not isinstance(diagnose_data, list):
265 raise http.HttpBadGateway(message="Can't get OS list")
266
267 os_names = []
268 for (name, variants) in diagnose_data:
269 os_names.extend(cli.CalculateOSNames(name, variants))
270
271 return os_names
272
279
286
289 """/2/jobs resource.
290
291 """
293 """Returns a dictionary of jobs.
294
295 @return: a dictionary with jobs id and uri.
296
297 """
298 client = self.GetClient(query=True)
299
300 if self.useBulk():
301 bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
302 return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
303 else:
304 jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
305 return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
306 uri_fields=("id", "uri"))
307
310 """/2/jobs/[job_id] resource.
311
312 """
314 """Returns a job status.
315
316 @return: a dictionary with job parameters.
317 The result includes:
318 - id: job ID as a number
319 - status: current job status as a string
320 - ops: involved OpCodes as a list of dictionaries for each
321 opcodes in the job
322 - opstatus: OpCodes status as a list
323 - opresult: OpCodes results as a list of lists
324
325 """
326 job_id = self.items[0]
327 result = self.GetClient(query=True).QueryJobs([job_id, ], J_FIELDS)[0]
328 if result is None:
329 raise http.HttpNotFound()
330 return baserlib.MapFields(J_FIELDS, result)
331
339
342 """/2/jobs/[job_id]/wait resource.
343
344 """
345 # WaitForJobChange provides access to sensitive information and blocks
346 # machine resources (it's a blocking RAPI call), hence restricting access.
347 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
348
350 """Waits for job changes.
351
352 """
353 job_id = self.items[0]
354
355 fields = self.getBodyParameter("fields")
356 prev_job_info = self.getBodyParameter("previous_job_info", None)
357 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
358
359 if not isinstance(fields, list):
360 raise http.HttpBadRequest("The 'fields' parameter should be a list")
361
362 if not (prev_job_info is None or isinstance(prev_job_info, list)):
363 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
364 " be a list")
365
366 if not (prev_log_serial is None or
367 isinstance(prev_log_serial, (int, long))):
368 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
369 " be a number")
370
371 client = self.GetClient()
372 result = client.WaitForJobChangeOnce(job_id, fields,
373 prev_job_info, prev_log_serial,
374 timeout=_WFJC_TIMEOUT)
375 if not result:
376 raise http.HttpNotFound()
377
378 if result == constants.JOB_NOTCHANGED:
379 # No changes
380 return None
381
382 (job_info, log_entries) = result
383
384 return {
385 "job_info": job_info,
386 "log_entries": log_entries,
387 }
388
391 """/2/nodes resource.
392
393 """
394 GET_OPCODE = opcodes.OpNodeQuery
395
397 """Returns a list of all nodes.
398
399 """
400 client = self.GetClient(query=False)
401
402 if self.useBulk():
403 bulkdata = client.QueryNodes([], N_FIELDS, False)
404 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
405 else:
406 nodesdata = client.QueryNodes([], ["name"], False)
407 nodeslist = [row[0] for row in nodesdata]
408 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
409 uri_fields=("id", "uri"))
410
413 """/2/nodes/[node_name] resource.
414
415 """
416 GET_OPCODE = opcodes.OpNodeQuery
417
419 """Send information about a node.
420
421 """
422 node_name = self.items[0]
423 client = self.GetClient(query=False)
424
425 result = baserlib.HandleItemQueryErrors(client.QueryNodes,
426 names=[node_name], fields=N_FIELDS,
427 use_locking=self.useLocking())
428
429 return baserlib.MapFields(N_FIELDS, result[0])
430
433 """/2/nodes/[node_name]/powercycle resource.
434
435 """
436 POST_OPCODE = opcodes.OpNodePowercycle
437
439 """Tries to powercycle a node.
440
441 """
442 return (self.request_body, {
443 "node_name": self.items[0],
444 "force": self.useForce(),
445 })
446
449 """/2/nodes/[node_name]/role resource.
450
451 """
452 PUT_OPCODE = opcodes.OpNodeSetParams
453
455 """Returns the current node role.
456
457 @return: Node role
458
459 """
460 node_name = self.items[0]
461 client = self.GetClient(query=True)
462 result = client.QueryNodes(names=[node_name], fields=["role"],
463 use_locking=self.useLocking())
464
465 return _NR_MAP[result[0][0]]
466
468 """Sets the node role.
469
470 """
471 baserlib.CheckType(self.request_body, basestring, "Body contents")
472
473 role = self.request_body
474
475 if role == _NR_REGULAR:
476 candidate = False
477 offline = False
478 drained = False
479
480 elif role == _NR_MASTER_CANDIDATE:
481 candidate = True
482 offline = drained = None
483
484 elif role == _NR_DRAINED:
485 drained = True
486 candidate = offline = None
487
488 elif role == _NR_OFFLINE:
489 offline = True
490 candidate = drained = None
491
492 else:
493 raise http.HttpBadRequest("Can't set '%s' role" % role)
494
495 assert len(self.items) == 1
496
497 return ({}, {
498 "node_name": self.items[0],
499 "master_candidate": candidate,
500 "offline": offline,
501 "drained": drained,
502 "force": self.useForce(),
503 "auto_promote": bool(self._checkIntVariable("auto-promote", default=0)),
504 })
505
508 """/2/nodes/[node_name]/evacuate resource.
509
510 """
511 POST_OPCODE = opcodes.OpNodeEvacuate
512
514 """Evacuate all instances off a node.
515
516 """
517 return (self.request_body, {
518 "node_name": self.items[0],
519 "dry_run": self.dryRun(),
520 })
521
524 """/2/nodes/[node_name]/migrate resource.
525
526 """
527 POST_OPCODE = opcodes.OpNodeMigrate
528
530 """Migrate all primary instances from a node.
531
532 """
533 if self.queryargs:
534 # Support old-style requests
535 if "live" in self.queryargs and "mode" in self.queryargs:
536 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
537 " be passed")
538
539 if "live" in self.queryargs:
540 if self._checkIntVariable("live", default=1):
541 mode = constants.HT_MIGRATION_LIVE
542 else:
543 mode = constants.HT_MIGRATION_NONLIVE
544 else:
545 mode = self._checkStringVariable("mode", default=None)
546
547 data = {
548 "mode": mode,
549 }
550 else:
551 data = self.request_body
552
553 return (data, {
554 "node_name": self.items[0],
555 })
556
559 """/2/nodes/[node_name]/modify resource.
560
561 """
562 POST_OPCODE = opcodes.OpNodeSetParams
563
565 """Changes parameters of a node.
566
567 """
568 assert len(self.items) == 1
569
570 return (self.request_body, {
571 "node_name": self.items[0],
572 })
573
576 """/2/nodes/[node_name]/storage resource.
577
578 """
579 # LUNodeQueryStorage acquires locks, hence restricting access to GET
580 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
581 GET_OPCODE = opcodes.OpNodeQueryStorage
582
584 """List storage available on a node.
585
586 """
587 storage_type = self._checkStringVariable("storage_type", None)
588 output_fields = self._checkStringVariable("output_fields", None)
589
590 if not output_fields:
591 raise http.HttpBadRequest("Missing the required 'output_fields'"
592 " parameter")
593
594 return ({}, {
595 "nodes": [self.items[0]],
596 "storage_type": storage_type,
597 "output_fields": output_fields.split(","),
598 })
599
602 """/2/nodes/[node_name]/storage/modify resource.
603
604 """
605 PUT_OPCODE = opcodes.OpNodeModifyStorage
606
608 """Modifies a storage volume on a node.
609
610 """
611 storage_type = self._checkStringVariable("storage_type", None)
612 name = self._checkStringVariable("name", None)
613
614 if not name:
615 raise http.HttpBadRequest("Missing the required 'name'"
616 " parameter")
617
618 changes = {}
619
620 if "allocatable" in self.queryargs:
621 changes[constants.SF_ALLOCATABLE] = \
622 bool(self._checkIntVariable("allocatable", default=1))
623
624 return ({}, {
625 "node_name": self.items[0],
626 "storage_type": storage_type,
627 "name": name,
628 "changes": changes,
629 })
630
633 """/2/nodes/[node_name]/storage/repair resource.
634
635 """
636 PUT_OPCODE = opcodes.OpRepairNodeStorage
637
639 """Repairs a storage volume on a node.
640
641 """
642 storage_type = self._checkStringVariable("storage_type", None)
643 name = self._checkStringVariable("name", None)
644 if not name:
645 raise http.HttpBadRequest("Missing the required 'name'"
646 " parameter")
647
648 return ({}, {
649 "node_name": self.items[0],
650 "storage_type": storage_type,
651 "name": name,
652 })
653
656 """/2/networks resource.
657
658 """
659 GET_OPCODE = opcodes.OpNetworkQuery
660 POST_OPCODE = opcodes.OpNetworkAdd
661 POST_RENAME = {
662 "name": "network_name",
663 }
664
666 """Create a network.
667
668 """
669 assert not self.items
670 return (self.request_body, {
671 "dry_run": self.dryRun(),
672 })
673
675 """Returns a list of all networks.
676
677 """
678 client = self.GetClient()
679
680 if self.useBulk():
681 bulkdata = client.QueryNetworks([], NET_FIELDS, False)
682 return baserlib.MapBulkFields(bulkdata, NET_FIELDS)
683 else:
684 data = client.QueryNetworks([], ["name"], False)
685 networknames = [row[0] for row in data]
686 return baserlib.BuildUriList(networknames, "/2/networks/%s",
687 uri_fields=("name", "uri"))
688
691 """/2/networks/[network_name] resource.
692
693 """
694 DELETE_OPCODE = opcodes.OpNetworkRemove
695
697 """Send information about a network.
698
699 """
700 network_name = self.items[0]
701 client = self.GetClient()
702
703 result = baserlib.HandleItemQueryErrors(client.QueryNetworks,
704 names=[network_name],
705 fields=NET_FIELDS,
706 use_locking=self.useLocking())
707
708 return baserlib.MapFields(NET_FIELDS, result[0])
709
711 """Delete a network.
712
713 """
714 assert len(self.items) == 1
715 return (self.request_body, {
716 "network_name": self.items[0],
717 "dry_run": self.dryRun(),
718 })
719
722 """/2/networks/[network_name]/connect resource.
723
724 """
725 PUT_OPCODE = opcodes.OpNetworkConnect
726
728 """Changes some parameters of node group.
729
730 """
731 assert self.items
732 return (self.request_body, {
733 "network_name": self.items[0],
734 "dry_run": self.dryRun(),
735 })
736
739 """/2/networks/[network_name]/disconnect resource.
740
741 """
742 PUT_OPCODE = opcodes.OpNetworkDisconnect
743
745 """Changes some parameters of node group.
746
747 """
748 assert self.items
749 return (self.request_body, {
750 "network_name": self.items[0],
751 "dry_run": self.dryRun(),
752 })
753
756 """/2/networks/[network_name]/modify resource.
757
758 """
759 PUT_OPCODE = opcodes.OpNetworkSetParams
760
762 """Changes some parameters of network.
763
764 """
765 assert self.items
766 return (self.request_body, {
767 "network_name": self.items[0],
768 })
769
772 """/2/groups resource.
773
774 """
775 GET_OPCODE = opcodes.OpGroupQuery
776 POST_OPCODE = opcodes.OpGroupAdd
777 POST_RENAME = {
778 "name": "group_name",
779 }
780
782 """Create a node group.
783
784
785 """
786 assert not self.items
787 return (self.request_body, {
788 "dry_run": self.dryRun(),
789 })
790
792 """Returns a list of all node groups.
793
794 """
795 client = self.GetClient(query=True)
796
797 if self.useBulk():
798 bulkdata = client.QueryGroups([], G_FIELDS, False)
799 return baserlib.MapBulkFields(bulkdata, G_FIELDS)
800 else:
801 data = client.QueryGroups([], ["name"], False)
802 groupnames = [row[0] for row in data]
803 return baserlib.BuildUriList(groupnames, "/2/groups/%s",
804 uri_fields=("name", "uri"))
805
808 """/2/groups/[group_name] resource.
809
810 """
811 DELETE_OPCODE = opcodes.OpGroupRemove
812
814 """Send information about a node group.
815
816 """
817 group_name = self.items[0]
818 client = self.GetClient(query=True)
819
820 result = baserlib.HandleItemQueryErrors(client.QueryGroups,
821 names=[group_name], fields=G_FIELDS,
822 use_locking=self.useLocking())
823
824 return baserlib.MapFields(G_FIELDS, result[0])
825
827 """Delete a node group.
828
829 """
830 assert len(self.items) == 1
831 return ({}, {
832 "group_name": self.items[0],
833 "dry_run": self.dryRun(),
834 })
835
838 """/2/groups/[group_name]/modify resource.
839
840 """
841 PUT_OPCODE = opcodes.OpGroupSetParams
842
844 """Changes some parameters of node group.
845
846 """
847 assert self.items
848 return (self.request_body, {
849 "group_name": self.items[0],
850 })
851
854 """/2/groups/[group_name]/rename resource.
855
856 """
857 PUT_OPCODE = opcodes.OpGroupRename
858
860 """Changes the name of a node group.
861
862 """
863 assert len(self.items) == 1
864 return (self.request_body, {
865 "group_name": self.items[0],
866 "dry_run": self.dryRun(),
867 })
868
871 """/2/groups/[group_name]/assign-nodes resource.
872
873 """
874 PUT_OPCODE = opcodes.OpGroupAssignNodes
875
877 """Assigns nodes to a group.
878
879 """
880 assert len(self.items) == 1
881 return (self.request_body, {
882 "group_name": self.items[0],
883 "dry_run": self.dryRun(),
884 "force": self.useForce(),
885 })
886
889 """/2/instances resource.
890
891 """
892 GET_OPCODE = opcodes.OpInstanceQuery
893 POST_OPCODE = opcodes.OpInstanceCreate
894 POST_RENAME = {
895 "os": "os_type",
896 "name": "instance_name",
897 }
898
900 """Returns a list of all available instances.
901
902 """
903 client = self.GetClient()
904
905 use_locking = self.useLocking()
906 if self.useBulk():
907 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
908 return map(_UpdateBeparams, baserlib.MapBulkFields(bulkdata, I_FIELDS))
909 else:
910 instancesdata = client.QueryInstances([], ["name"], use_locking)
911 instanceslist = [row[0] for row in instancesdata]
912 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
913 uri_fields=("id", "uri"))
914
916 """Create an instance.
917
918 @return: a job id
919
920 """
921 baserlib.CheckType(self.request_body, dict, "Body contents")
922
923 # Default to request data version 0
924 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
925
926 if data_version == 0:
927 raise http.HttpBadRequest("Instance creation request version 0 is no"
928 " longer supported")
929 elif data_version != 1:
930 raise http.HttpBadRequest("Unsupported request data version %s" %
931 data_version)
932
933 data = self.request_body.copy()
934 # Remove "__version__"
935 data.pop(_REQ_DATA_VERSION, None)
936
937 return (data, {
938 "dry_run": self.dryRun(),
939 })
940
943 """/2/instances-multi-alloc resource.
944
945 """
946 POST_OPCODE = opcodes.OpInstanceMultiAlloc
947
949 """Try to allocate multiple instances.
950
951 @return: A dict with submitted jobs, allocatable instances and failed
952 allocations
953
954 """
955 if "instances" not in self.request_body:
956 raise http.HttpBadRequest("Request is missing required 'instances' field"
957 " in body")
958
959 op_id = {
960 "OP_ID": self.POST_OPCODE.OP_ID, # pylint: disable=E1101
961 }
962 body = objects.FillDict(self.request_body, {
963 "instances": [objects.FillDict(inst, op_id)
964 for inst in self.request_body["instances"]],
965 })
966
967 return (body, {
968 "dry_run": self.dryRun(),
969 })
970
973 """/2/instances/[instance_name] resource.
974
975 """
976 GET_OPCODE = opcodes.OpInstanceQuery
977 DELETE_OPCODE = opcodes.OpInstanceRemove
978
980 """Send information about an instance.
981
982 """
983 client = self.GetClient()
984 instance_name = self.items[0]
985
986 result = baserlib.HandleItemQueryErrors(client.QueryInstances,
987 names=[instance_name],
988 fields=I_FIELDS,
989 use_locking=self.useLocking())
990
991 return _UpdateBeparams(baserlib.MapFields(I_FIELDS, result[0]))
992
994 """Delete an instance.
995
996 """
997 assert len(self.items) == 1
998 return ({}, {
999 "instance_name": self.items[0],
1000 "ignore_failures": False,
1001 "dry_run": self.dryRun(),
1002 })
1003
1006 """/2/instances/[instance_name]/info resource.
1007
1008 """
1009 GET_OPCODE = opcodes.OpInstanceQueryData
1010
1012 """Request detailed instance information.
1013
1014 """
1015 assert len(self.items) == 1
1016 return ({}, {
1017 "instances": [self.items[0]],
1018 "static": bool(self._checkIntVariable("static", default=0)),
1019 })
1020
1023 """/2/instances/[instance_name]/reboot resource.
1024
1025 Implements an instance reboot.
1026
1027 """
1028 POST_OPCODE = opcodes.OpInstanceReboot
1029
1031 """Reboot an instance.
1032
1033 The URI takes type=[hard|soft|full] and
1034 ignore_secondaries=[False|True] parameters.
1035
1036 """
1037 return ({}, {
1038 "instance_name": self.items[0],
1039 "reboot_type":
1040 self.queryargs.get("type", [constants.INSTANCE_REBOOT_HARD])[0],
1041 "ignore_secondaries": bool(self._checkIntVariable("ignore_secondaries")),
1042 "dry_run": self.dryRun(),
1043 })
1044
1047 """/2/instances/[instance_name]/startup resource.
1048
1049 Implements an instance startup.
1050
1051 """
1052 PUT_OPCODE = opcodes.OpInstanceStartup
1053
1055 """Startup an instance.
1056
1057 The URI takes force=[False|True] parameter to start the instance
1058 if even if secondary disks are failing.
1059
1060 """
1061 return ({}, {
1062 "instance_name": self.items[0],
1063 "force": self.useForce(),
1064 "dry_run": self.dryRun(),
1065 "no_remember": bool(self._checkIntVariable("no_remember")),
1066 })
1067
1070 """/2/instances/[instance_name]/shutdown resource.
1071
1072 Implements an instance shutdown.
1073
1074 """
1075 PUT_OPCODE = opcodes.OpInstanceShutdown
1076
1078 """Shutdown an instance.
1079
1080 """
1081 return (self.request_body, {
1082 "instance_name": self.items[0],
1083 "no_remember": bool(self._checkIntVariable("no_remember")),
1084 "dry_run": self.dryRun(),
1085 })
1086
1089 """Parses a request for reinstalling an instance.
1090
1091 """
1092 if not isinstance(data, dict):
1093 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1094
1095 ostype = baserlib.CheckParameter(data, "os", default=None)
1096 start = baserlib.CheckParameter(data, "start", exptype=bool,
1097 default=True)
1098 osparams = baserlib.CheckParameter(data, "osparams", default=None)
1099
1100 ops = [
1101 opcodes.OpInstanceShutdown(instance_name=name),
1102 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
1103 osparams=osparams),
1104 ]
1105
1106 if start:
1107 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
1108
1109 return ops
1110
1113 """/2/instances/[instance_name]/reinstall resource.
1114
1115 Implements an instance reinstall.
1116
1117 """
1118 POST_OPCODE = opcodes.OpInstanceReinstall
1119
1121 """Reinstall an instance.
1122
1123 The URI takes os=name and nostartup=[0|1] optional
1124 parameters. By default, the instance will be started
1125 automatically.
1126
1127 """
1128 if self.request_body:
1129 if self.queryargs:
1130 raise http.HttpBadRequest("Can't combine query and body parameters")
1131
1132 body = self.request_body
1133 elif self.queryargs:
1134 # Legacy interface, do not modify/extend
1135 body = {
1136 "os": self._checkStringVariable("os"),
1137 "start": not self._checkIntVariable("nostartup"),
1138 }
1139 else:
1140 body = {}
1141
1142 ops = _ParseInstanceReinstallRequest(self.items[0], body)
1143
1144 return self.SubmitJob(ops)
1145
1148 """/2/instances/[instance_name]/replace-disks resource.
1149
1150 """
1151 POST_OPCODE = opcodes.OpInstanceReplaceDisks
1152
1154 """Replaces disks on an instance.
1155
1156 """
1157 static = {
1158 "instance_name": self.items[0],
1159 }
1160
1161 if self.request_body:
1162 data = self.request_body
1163 elif self.queryargs:
1164 # Legacy interface, do not modify/extend
1165 data = {
1166 "remote_node": self._checkStringVariable("remote_node", default=None),
1167 "mode": self._checkStringVariable("mode", default=None),
1168 "disks": self._checkStringVariable("disks", default=None),
1169 "iallocator": self._checkStringVariable("iallocator", default=None),
1170 }
1171 else:
1172 data = {}
1173
1174 # Parse disks
1175 try:
1176 raw_disks = data.pop("disks")
1177 except KeyError:
1178 pass
1179 else:
1180 if raw_disks:
1181 if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102
1182 data["disks"] = raw_disks
1183 else:
1184 # Backwards compatibility for strings of the format "1, 2, 3"
1185 try:
1186 data["disks"] = [int(part) for part in raw_disks.split(",")]
1187 except (TypeError, ValueError), err:
1188 raise http.HttpBadRequest("Invalid disk index passed: %s" % err)
1189
1190 return (data, static)
1191
1194 """/2/instances/[instance_name]/activate-disks resource.
1195
1196 """
1197 PUT_OPCODE = opcodes.OpInstanceActivateDisks
1198
1200 """Activate disks for an instance.
1201
1202 The URI might contain ignore_size to ignore current recorded size.
1203
1204 """
1205 return ({}, {
1206 "instance_name": self.items[0],
1207 "ignore_size": bool(self._checkIntVariable("ignore_size")),
1208 })
1209
1212 """/2/instances/[instance_name]/deactivate-disks resource.
1213
1214 """
1215 PUT_OPCODE = opcodes.OpInstanceDeactivateDisks
1216
1224
1227 """/2/instances/[instance_name]/recreate-disks resource.
1228
1229 """
1230 POST_OPCODE = opcodes.OpInstanceRecreateDisks
1231
1239
1242 """/2/instances/[instance_name]/prepare-export resource.
1243
1244 """
1245 PUT_OPCODE = opcodes.OpBackupPrepare
1246
1248 """Prepares an export for an instance.
1249
1250 """
1251 return ({}, {
1252 "instance_name": self.items[0],
1253 "mode": self._checkStringVariable("mode"),
1254 })
1255
1258 """/2/instances/[instance_name]/export resource.
1259
1260 """
1261 PUT_OPCODE = opcodes.OpBackupExport
1262 PUT_RENAME = {
1263 "destination": "target_node",
1264 }
1265
1267 """Exports an instance.
1268
1269 """
1270 return (self.request_body, {
1271 "instance_name": self.items[0],
1272 })
1273
1276 """/2/instances/[instance_name]/migrate resource.
1277
1278 """
1279 PUT_OPCODE = opcodes.OpInstanceMigrate
1280
1282 """Migrates an instance.
1283
1284 """
1285 return (self.request_body, {
1286 "instance_name": self.items[0],
1287 })
1288
1291 """/2/instances/[instance_name]/failover resource.
1292
1293 """
1294 PUT_OPCODE = opcodes.OpInstanceFailover
1295
1297 """Does a failover of an instance.
1298
1299 """
1300 return (self.request_body, {
1301 "instance_name": self.items[0],
1302 })
1303
1306 """/2/instances/[instance_name]/rename resource.
1307
1308 """
1309 PUT_OPCODE = opcodes.OpInstanceRename
1310
1312 """Changes the name of an instance.
1313
1314 """
1315 return (self.request_body, {
1316 "instance_name": self.items[0],
1317 })
1318
1321 """/2/instances/[instance_name]/modify resource.
1322
1323 """
1324 PUT_OPCODE = opcodes.OpInstanceSetParams
1325
1327 """Changes parameters of an instance.
1328
1329 """
1330 return (self.request_body, {
1331 "instance_name": self.items[0],
1332 })
1333
1336 """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1337
1338 """
1339 POST_OPCODE = opcodes.OpInstanceGrowDisk
1340
1342 """Increases the size of an instance disk.
1343
1344 """
1345 return (self.request_body, {
1346 "instance_name": self.items[0],
1347 "disk": int(self.items[1]),
1348 })
1349
1352 """/2/instances/[instance_name]/console resource.
1353
1354 """
1355 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
1356 GET_OPCODE = opcodes.OpInstanceConsole
1357
1359 """Request information for connecting to instance's console.
1360
1361 @return: Serialized instance console description, see
1362 L{objects.InstanceConsole}
1363
1364 """
1365 client = self.GetClient()
1366
1367 ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1368
1369 if console is None:
1370 raise http.HttpServiceUnavailable("Instance console unavailable")
1371
1372 assert isinstance(console, dict)
1373 return console
1374
1377 """Tries to extract C{fields} query parameter.
1378
1379 @type args: dictionary
1380 @rtype: list of string
1381 @raise http.HttpBadRequest: When parameter can't be found
1382
1383 """
1384 try:
1385 fields = args["fields"]
1386 except KeyError:
1387 raise http.HttpBadRequest("Missing 'fields' query argument")
1388
1389 return _SplitQueryFields(fields[0])
1390
1393 """Splits fields as given for a query request.
1394
1395 @type fields: string
1396 @rtype: list of string
1397
1398 """
1399 return [i.strip() for i in fields.split(",")]
1400
1403 """/2/query/[resource] resource.
1404
1405 """
1406 # Results might contain sensitive information
1407 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
1408 PUT_ACCESS = GET_ACCESS
1409 GET_OPCODE = opcodes.OpQuery
1410 PUT_OPCODE = opcodes.OpQuery
1411
1414
1416 """Returns resource information.
1417
1418 @return: Query result, see L{objects.QueryResponse}
1419
1420 """
1421 return self._Query(_GetQueryFields(self.queryargs), None)
1422
1424 """Submits job querying for resources.
1425
1426 @return: Query result, see L{objects.QueryResponse}
1427
1428 """
1429 body = self.request_body
1430
1431 baserlib.CheckType(body, dict, "Body contents")
1432
1433 try:
1434 fields = body["fields"]
1435 except KeyError:
1436 fields = _GetQueryFields(self.queryargs)
1437
1438 qfilter = body.get("qfilter", None)
1439 # TODO: remove this after 2.7
1440 if qfilter is None:
1441 qfilter = body.get("filter", None)
1442
1443 return self._Query(fields, qfilter)
1444
1447 """/2/query/[resource]/fields resource.
1448
1449 """
1450 GET_OPCODE = opcodes.OpQueryFields
1451
1453 """Retrieves list of available fields for a resource.
1454
1455 @return: List of serialized L{objects.QueryFieldDefinition}
1456
1457 """
1458 try:
1459 raw_fields = self.queryargs["fields"]
1460 except KeyError:
1461 fields = None
1462 else:
1463 fields = _SplitQueryFields(raw_fields[0])
1464
1465 return self.GetClient().QueryFields(self.items[0], fields).ToDict()
1466
1469 """Quasiclass for tagging resources.
1470
1471 Manages tags. When inheriting this class you must define the
1472 TAG_LEVEL for it.
1473
1474 """
1475 TAG_LEVEL = None
1476 GET_OPCODE = opcodes.OpTagsGet
1477 PUT_OPCODE = opcodes.OpTagsSet
1478 DELETE_OPCODE = opcodes.OpTagsDel
1479
1481 """A tag resource constructor.
1482
1483 We have to override the default to sort out cluster naming case.
1484
1485 """
1486 baserlib.OpcodeResource.__init__(self, items, queryargs, req, **kwargs)
1487
1488 if self.TAG_LEVEL == constants.TAG_CLUSTER:
1489 self.name = None
1490 else:
1491 self.name = items[0]
1492
1494 """Returns a list of tags.
1495
1496 Example: ["tag1", "tag2", "tag3"]
1497
1498 """
1499 kind = self.TAG_LEVEL
1500
1501 if kind in (constants.TAG_INSTANCE,
1502 constants.TAG_NODEGROUP,
1503 constants.TAG_NODE):
1504 if not self.name:
1505 raise http.HttpBadRequest("Missing name on tag request")
1506
1507 cl = self.GetClient(query=True)
1508 tags = list(cl.QueryTags(kind, self.name))
1509
1510 elif kind == constants.TAG_CLUSTER:
1511 assert not self.name
1512 # TODO: Use query API?
1513 ssc = ssconf.SimpleStore()
1514 tags = ssc.GetClusterTags()
1515
1516 return list(tags)
1517
1519 """Add a set of tags.
1520
1521 The request as a list of strings should be PUT to this URI. And
1522 you'll have back a job id.
1523
1524 """
1525 return ({}, {
1526 "kind": self.TAG_LEVEL,
1527 "name": self.name,
1528 "tags": self.queryargs.get("tag", []),
1529 "dry_run": self.dryRun(),
1530 })
1531
1533 """Delete a tag.
1534
1535 In order to delete a set of tags, the DELETE
1536 request should be addressed to URI like:
1537 /tags?tag=[tag]&tag=[tag]
1538
1539 """
1540 # Re-use code
1541 return self.GetPutOpInput()
1542
1551
1560
1569
1578
1587
| Trees | Indices | Help |
|
|---|
| Generated by Epydoc 3.0.1 on Tue Dec 3 11:32:55 2013 | http://epydoc.sourceforge.net |