| Trees | Indices | Help |
|
|---|
|
|
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Remote API resource implementations.
23
24 PUT or POST?
25 ============
26
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
30
31 In the context of this module POST on ``/2/instances`` to change an existing
32 entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
33 new instance) with a name specified in the request.
34
35 Quoting from RFC2616, section 9.6::
36
37 The fundamental difference between the POST and PUT requests is reflected in
38 the different meaning of the Request-URI. The URI in a POST request
39 identifies the resource that will handle the enclosed entity. That resource
40 might be a data-accepting process, a gateway to some other protocol, or a
41 separate entity that accepts annotations. In contrast, the URI in a PUT
42 request identifies the entity enclosed with the request -- the user agent
43 knows what URI is intended and the server MUST NOT attempt to apply the
44 request to some other resource. If the server desires that the request be
45 applied to a different URI, it MUST send a 301 (Moved Permanently) response;
46 the user agent MAY then make its own decision regarding whether or not to
47 redirect the request.
48
49 So when adding new methods, if they are operating on the URI entity itself,
50 PUT should be prefered over POST.
51
52 """
53
54 # pylint: disable=C0103
55
56 # C0103: Invalid name, since the R_* names are not conforming
57
58 from ganeti import opcodes
59 from ganeti import objects
60 from ganeti import http
61 from ganeti import constants
62 from ganeti import cli
63 from ganeti import rapi
64 from ganeti import ht
65 from ganeti import compat
66 from ganeti import ssconf
67 from ganeti.rapi import baserlib
68
69
70 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
71 I_FIELDS = ["name", "admin_state", "os",
72 "pnode", "snodes",
73 "disk_template",
74 "nic.ips", "nic.macs", "nic.modes", "nic.uuids", "nic.names",
75 "nic.links", "nic.networks", "nic.networks.names", "nic.bridges",
76 "network_port",
77 "disk.sizes", "disk.spindles", "disk_usage", "disk.uuids",
78 "disk.names",
79 "beparams", "hvparams",
80 "oper_state", "oper_ram", "oper_vcpus", "status",
81 "custom_hvparams", "custom_beparams", "custom_nicparams",
82 ] + _COMMON_FIELDS
83
84 N_FIELDS = ["name", "offline", "master_candidate", "drained",
85 "dtotal", "dfree", "sptotal", "spfree",
86 "mtotal", "mnode", "mfree",
87 "pinst_cnt", "sinst_cnt",
88 "ctotal", "cnos", "cnodes", "csockets",
89 "pip", "sip", "role",
90 "pinst_list", "sinst_list",
91 "master_capable", "vm_capable",
92 "ndparams",
93 "group.uuid",
94 ] + _COMMON_FIELDS
95
96 NET_FIELDS = ["name", "network", "gateway",
97 "network6", "gateway6",
98 "mac_prefix",
99 "free_count", "reserved_count",
100 "map", "group_list", "inst_list",
101 "external_reservations",
102 ] + _COMMON_FIELDS
103
104 G_FIELDS = [
105 "alloc_policy",
106 "name",
107 "node_cnt",
108 "node_list",
109 "ipolicy",
110 "custom_ipolicy",
111 "diskparams",
112 "custom_diskparams",
113 "ndparams",
114 "custom_ndparams",
115 ] + _COMMON_FIELDS
116
117 J_FIELDS_BULK = [
118 "id", "ops", "status", "summary",
119 "opstatus",
120 "received_ts", "start_ts", "end_ts",
121 ]
122
123 J_FIELDS = J_FIELDS_BULK + [
124 "oplog",
125 "opresult",
126 ]
127
128 _NR_DRAINED = "drained"
129 _NR_MASTER_CANDIDATE = "master-candidate"
130 _NR_MASTER = "master"
131 _NR_OFFLINE = "offline"
132 _NR_REGULAR = "regular"
133
134 _NR_MAP = {
135 constants.NR_MASTER: _NR_MASTER,
136 constants.NR_MCANDIDATE: _NR_MASTER_CANDIDATE,
137 constants.NR_DRAINED: _NR_DRAINED,
138 constants.NR_OFFLINE: _NR_OFFLINE,
139 constants.NR_REGULAR: _NR_REGULAR,
140 }
141
142 assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
143
144 # Request data version field
145 _REQ_DATA_VERSION = "__version__"
146
147 # Feature string for instance creation request data version 1
148 _INST_CREATE_REQV1 = "instance-create-reqv1"
149
150 # Feature string for instance reinstall request version 1
151 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
152
153 # Feature string for node migration version 1
154 _NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
155
156 # Feature string for node evacuation with LU-generated jobs
157 _NODE_EVAC_RES1 = "node-evac-res1"
158
159 ALL_FEATURES = compat.UniqueFrozenset([
160 _INST_CREATE_REQV1,
161 _INST_REINSTALL_REQV1,
162 _NODE_MIGRATE_REQV1,
163 _NODE_EVAC_RES1,
164 ])
165
166 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
167 _WFJC_TIMEOUT = 10
168
169
170 # FIXME: For compatibility we update the beparams/memory field. Needs to be
171 # removed in Ganeti 2.8
172 -def _UpdateBeparams(inst):
173 """Updates the beparams dict of inst to support the memory field.
174
175 @param inst: Inst dict
176 @return: Updated inst dict
177
178 """
179 beparams = inst["beparams"]
180 beparams[constants.BE_MEMORY] = beparams[constants.BE_MAXMEM]
181
182 return inst
183
195
201
204 """/version resource.
205
206 This resource should be used to determine the remote API version and
207 to adapt clients accordingly.
208
209 """
210 @staticmethod
216
219 """/2/info resource.
220
221 """
222 GET_OPCODE = opcodes.OpClusterQuery
223
225 """Returns cluster information.
226
227 """
228 client = self.GetClient(query=True)
229 return client.QueryClusterInfo()
230
233 """/2/features resource.
234
235 """
236 @staticmethod
238 """Returns list of optional RAPI features implemented.
239
240 """
241 return list(ALL_FEATURES)
242
245 """/2/os resource.
246
247 """
248 GET_OPCODE = opcodes.OpOsDiagnose
249
251 """Return a list of all OSes.
252
253 Can return error 500 in case of a problem.
254
255 Example: ["debian-etch"]
256
257 """
258 cl = self.GetClient()
259 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
260 job_id = self.SubmitJob([op], cl=cl)
261 # we use custom feedback function, instead of print we log the status
262 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
263 diagnose_data = result[0]
264
265 if not isinstance(diagnose_data, list):
266 raise http.HttpBadGateway(message="Can't get OS list")
267
268 os_names = []
269 for (name, variants) in diagnose_data:
270 os_names.extend(cli.CalculateOSNames(name, variants))
271
272 return os_names
273
280
287
290 """/2/jobs resource.
291
292 """
294 """Returns a dictionary of jobs.
295
296 @return: a dictionary with jobs id and uri.
297
298 """
299 client = self.GetClient(query=True)
300
301 if self.useBulk():
302 bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
303 return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
304 else:
305 jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
306 return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
307 uri_fields=("id", "uri"))
308
311 """/2/jobs/[job_id] resource.
312
313 """
315 """Returns a job status.
316
317 @return: a dictionary with job parameters.
318 The result includes:
319 - id: job ID as a number
320 - status: current job status as a string
321 - ops: involved OpCodes as a list of dictionaries for each
322 opcodes in the job
323 - opstatus: OpCodes status as a list
324 - opresult: OpCodes results as a list of lists
325
326 """
327 job_id = self.items[0]
328 result = self.GetClient(query=True).QueryJobs([job_id, ], J_FIELDS)[0]
329 if result is None:
330 raise http.HttpNotFound()
331 return baserlib.MapFields(J_FIELDS, result)
332
340
343 """/2/jobs/[job_id]/wait resource.
344
345 """
346 # WaitForJobChange provides access to sensitive information and blocks
347 # machine resources (it's a blocking RAPI call), hence restricting access.
348 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
349
351 """Waits for job changes.
352
353 """
354 job_id = self.items[0]
355
356 fields = self.getBodyParameter("fields")
357 prev_job_info = self.getBodyParameter("previous_job_info", None)
358 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
359
360 if not isinstance(fields, list):
361 raise http.HttpBadRequest("The 'fields' parameter should be a list")
362
363 if not (prev_job_info is None or isinstance(prev_job_info, list)):
364 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
365 " be a list")
366
367 if not (prev_log_serial is None or
368 isinstance(prev_log_serial, (int, long))):
369 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
370 " be a number")
371
372 client = self.GetClient()
373 result = client.WaitForJobChangeOnce(job_id, fields,
374 prev_job_info, prev_log_serial,
375 timeout=_WFJC_TIMEOUT)
376 if not result:
377 raise http.HttpNotFound()
378
379 if result == constants.JOB_NOTCHANGED:
380 # No changes
381 return None
382
383 (job_info, log_entries) = result
384
385 return {
386 "job_info": job_info,
387 "log_entries": log_entries,
388 }
389
392 """/2/nodes resource.
393
394 """
395 GET_OPCODE = opcodes.OpNodeQuery
396
398 """Returns a list of all nodes.
399
400 """
401 client = self.GetClient(query=True)
402
403 if self.useBulk():
404 bulkdata = client.QueryNodes([], N_FIELDS, False)
405 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
406 else:
407 nodesdata = client.QueryNodes([], ["name"], False)
408 nodeslist = [row[0] for row in nodesdata]
409 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
410 uri_fields=("id", "uri"))
411
414 """/2/nodes/[node_name] resource.
415
416 """
417 GET_OPCODE = opcodes.OpNodeQuery
418
420 """Send information about a node.
421
422 """
423 node_name = self.items[0]
424 client = self.GetClient(query=True)
425
426 result = baserlib.HandleItemQueryErrors(client.QueryNodes,
427 names=[node_name], fields=N_FIELDS,
428 use_locking=self.useLocking())
429
430 return baserlib.MapFields(N_FIELDS, result[0])
431
434 """/2/nodes/[node_name]/powercycle resource.
435
436 """
437 POST_OPCODE = opcodes.OpNodePowercycle
438
440 """Tries to powercycle a node.
441
442 """
443 return (self.request_body, {
444 "node_name": self.items[0],
445 "force": self.useForce(),
446 })
447
450 """/2/nodes/[node_name]/role resource.
451
452 """
453 PUT_OPCODE = opcodes.OpNodeSetParams
454
456 """Returns the current node role.
457
458 @return: Node role
459
460 """
461 node_name = self.items[0]
462 client = self.GetClient(query=True)
463 result = client.QueryNodes(names=[node_name], fields=["role"],
464 use_locking=self.useLocking())
465
466 return _NR_MAP[result[0][0]]
467
469 """Sets the node role.
470
471 """
472 baserlib.CheckType(self.request_body, basestring, "Body contents")
473
474 role = self.request_body
475
476 if role == _NR_REGULAR:
477 candidate = False
478 offline = False
479 drained = False
480
481 elif role == _NR_MASTER_CANDIDATE:
482 candidate = True
483 offline = drained = None
484
485 elif role == _NR_DRAINED:
486 drained = True
487 candidate = offline = None
488
489 elif role == _NR_OFFLINE:
490 offline = True
491 candidate = drained = None
492
493 else:
494 raise http.HttpBadRequest("Can't set '%s' role" % role)
495
496 assert len(self.items) == 1
497
498 return ({}, {
499 "node_name": self.items[0],
500 "master_candidate": candidate,
501 "offline": offline,
502 "drained": drained,
503 "force": self.useForce(),
504 "auto_promote": bool(self._checkIntVariable("auto-promote", default=0)),
505 })
506
509 """/2/nodes/[node_name]/evacuate resource.
510
511 """
512 POST_OPCODE = opcodes.OpNodeEvacuate
513
515 """Evacuate all instances off a node.
516
517 """
518 return (self.request_body, {
519 "node_name": self.items[0],
520 "dry_run": self.dryRun(),
521 })
522
525 """/2/nodes/[node_name]/migrate resource.
526
527 """
528 POST_OPCODE = opcodes.OpNodeMigrate
529
531 """Migrate all primary instances from a node.
532
533 """
534 if self.queryargs:
535 # Support old-style requests
536 if "live" in self.queryargs and "mode" in self.queryargs:
537 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
538 " be passed")
539
540 if "live" in self.queryargs:
541 if self._checkIntVariable("live", default=1):
542 mode = constants.HT_MIGRATION_LIVE
543 else:
544 mode = constants.HT_MIGRATION_NONLIVE
545 else:
546 mode = self._checkStringVariable("mode", default=None)
547
548 data = {
549 "mode": mode,
550 }
551 else:
552 data = self.request_body
553
554 return (data, {
555 "node_name": self.items[0],
556 })
557
560 """/2/nodes/[node_name]/modify resource.
561
562 """
563 POST_OPCODE = opcodes.OpNodeSetParams
564
566 """Changes parameters of a node.
567
568 """
569 assert len(self.items) == 1
570
571 return (self.request_body, {
572 "node_name": self.items[0],
573 })
574
577 """/2/nodes/[node_name]/storage resource.
578
579 """
580 # LUNodeQueryStorage acquires locks, hence restricting access to GET
581 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
582 GET_OPCODE = opcodes.OpNodeQueryStorage
583
585 """List storage available on a node.
586
587 """
588 storage_type = self._checkStringVariable("storage_type", None)
589 output_fields = self._checkStringVariable("output_fields", None)
590
591 if not output_fields:
592 raise http.HttpBadRequest("Missing the required 'output_fields'"
593 " parameter")
594
595 return ({}, {
596 "nodes": [self.items[0]],
597 "storage_type": storage_type,
598 "output_fields": output_fields.split(","),
599 })
600
603 """/2/nodes/[node_name]/storage/modify resource.
604
605 """
606 PUT_OPCODE = opcodes.OpNodeModifyStorage
607
609 """Modifies a storage volume on a node.
610
611 """
612 storage_type = self._checkStringVariable("storage_type", None)
613 name = self._checkStringVariable("name", None)
614
615 if not name:
616 raise http.HttpBadRequest("Missing the required 'name'"
617 " parameter")
618
619 changes = {}
620
621 if "allocatable" in self.queryargs:
622 changes[constants.SF_ALLOCATABLE] = \
623 bool(self._checkIntVariable("allocatable", default=1))
624
625 return ({}, {
626 "node_name": self.items[0],
627 "storage_type": storage_type,
628 "name": name,
629 "changes": changes,
630 })
631
634 """/2/nodes/[node_name]/storage/repair resource.
635
636 """
637 PUT_OPCODE = opcodes.OpRepairNodeStorage
638
640 """Repairs a storage volume on a node.
641
642 """
643 storage_type = self._checkStringVariable("storage_type", None)
644 name = self._checkStringVariable("name", None)
645 if not name:
646 raise http.HttpBadRequest("Missing the required 'name'"
647 " parameter")
648
649 return ({}, {
650 "node_name": self.items[0],
651 "storage_type": storage_type,
652 "name": name,
653 })
654
657 """/2/networks resource.
658
659 """
660 GET_OPCODE = opcodes.OpNetworkQuery
661 POST_OPCODE = opcodes.OpNetworkAdd
662 POST_RENAME = {
663 "name": "network_name",
664 }
665
667 """Create a network.
668
669 """
670 assert not self.items
671 return (self.request_body, {
672 "dry_run": self.dryRun(),
673 })
674
676 """Returns a list of all networks.
677
678 """
679 client = self.GetClient(query=True)
680
681 if self.useBulk():
682 bulkdata = client.QueryNetworks([], NET_FIELDS, False)
683 return baserlib.MapBulkFields(bulkdata, NET_FIELDS)
684 else:
685 data = client.QueryNetworks([], ["name"], False)
686 networknames = [row[0] for row in data]
687 return baserlib.BuildUriList(networknames, "/2/networks/%s",
688 uri_fields=("name", "uri"))
689
692 """/2/networks/[network_name] resource.
693
694 """
695 DELETE_OPCODE = opcodes.OpNetworkRemove
696
698 """Send information about a network.
699
700 """
701 network_name = self.items[0]
702 client = self.GetClient(query=True)
703
704 result = baserlib.HandleItemQueryErrors(client.QueryNetworks,
705 names=[network_name],
706 fields=NET_FIELDS,
707 use_locking=self.useLocking())
708
709 return baserlib.MapFields(NET_FIELDS, result[0])
710
712 """Delete a network.
713
714 """
715 assert len(self.items) == 1
716 return (self.request_body, {
717 "network_name": self.items[0],
718 "dry_run": self.dryRun(),
719 })
720
723 """/2/networks/[network_name]/connect resource.
724
725 """
726 PUT_OPCODE = opcodes.OpNetworkConnect
727
729 """Changes some parameters of node group.
730
731 """
732 assert self.items
733 return (self.request_body, {
734 "network_name": self.items[0],
735 "dry_run": self.dryRun(),
736 })
737
740 """/2/networks/[network_name]/disconnect resource.
741
742 """
743 PUT_OPCODE = opcodes.OpNetworkDisconnect
744
746 """Changes some parameters of node group.
747
748 """
749 assert self.items
750 return (self.request_body, {
751 "network_name": self.items[0],
752 "dry_run": self.dryRun(),
753 })
754
757 """/2/networks/[network_name]/modify resource.
758
759 """
760 PUT_OPCODE = opcodes.OpNetworkSetParams
761
763 """Changes some parameters of network.
764
765 """
766 assert self.items
767 return (self.request_body, {
768 "network_name": self.items[0],
769 })
770
773 """/2/groups resource.
774
775 """
776 GET_OPCODE = opcodes.OpGroupQuery
777 POST_OPCODE = opcodes.OpGroupAdd
778 POST_RENAME = {
779 "name": "group_name",
780 }
781
783 """Create a node group.
784
785
786 """
787 assert not self.items
788 return (self.request_body, {
789 "dry_run": self.dryRun(),
790 })
791
793 """Returns a list of all node groups.
794
795 """
796 client = self.GetClient(query=True)
797
798 if self.useBulk():
799 bulkdata = client.QueryGroups([], G_FIELDS, False)
800 return baserlib.MapBulkFields(bulkdata, G_FIELDS)
801 else:
802 data = client.QueryGroups([], ["name"], False)
803 groupnames = [row[0] for row in data]
804 return baserlib.BuildUriList(groupnames, "/2/groups/%s",
805 uri_fields=("name", "uri"))
806
809 """/2/groups/[group_name] resource.
810
811 """
812 DELETE_OPCODE = opcodes.OpGroupRemove
813
815 """Send information about a node group.
816
817 """
818 group_name = self.items[0]
819 client = self.GetClient(query=True)
820
821 result = baserlib.HandleItemQueryErrors(client.QueryGroups,
822 names=[group_name], fields=G_FIELDS,
823 use_locking=self.useLocking())
824
825 return baserlib.MapFields(G_FIELDS, result[0])
826
828 """Delete a node group.
829
830 """
831 assert len(self.items) == 1
832 return ({}, {
833 "group_name": self.items[0],
834 "dry_run": self.dryRun(),
835 })
836
839 """/2/groups/[group_name]/modify resource.
840
841 """
842 PUT_OPCODE = opcodes.OpGroupSetParams
843
845 """Changes some parameters of node group.
846
847 """
848 assert self.items
849 return (self.request_body, {
850 "group_name": self.items[0],
851 })
852
855 """/2/groups/[group_name]/rename resource.
856
857 """
858 PUT_OPCODE = opcodes.OpGroupRename
859
861 """Changes the name of a node group.
862
863 """
864 assert len(self.items) == 1
865 return (self.request_body, {
866 "group_name": self.items[0],
867 "dry_run": self.dryRun(),
868 })
869
872 """/2/groups/[group_name]/assign-nodes resource.
873
874 """
875 PUT_OPCODE = opcodes.OpGroupAssignNodes
876
878 """Assigns nodes to a group.
879
880 """
881 assert len(self.items) == 1
882 return (self.request_body, {
883 "group_name": self.items[0],
884 "dry_run": self.dryRun(),
885 "force": self.useForce(),
886 })
887
890 """Convert in place the usb_devices string to the proper format.
891
892 In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
893 comma to space because commas cannot be accepted on the command line
894 (they already act as the separator between different hvparams). RAPI
895 should be able to accept commas for backwards compatibility, but we want
896 it to also accept the new space separator. Therefore, we convert
897 spaces into commas here and keep the old parsing logic elsewhere.
898
899 """
900 try:
901 hvparams = data["hvparams"]
902 usb_devices = hvparams[constants.HV_USB_DEVICES]
903 hvparams[constants.HV_USB_DEVICES] = usb_devices.replace(" ", ",")
904 data["hvparams"] = hvparams
905 except KeyError:
906 #No usb_devices, no modification required
907 pass
908
911 """/2/instances resource.
912
913 """
914 GET_OPCODE = opcodes.OpInstanceQuery
915 POST_OPCODE = opcodes.OpInstanceCreate
916 POST_RENAME = {
917 "os": "os_type",
918 "name": "instance_name",
919 }
920
922 """Returns a list of all available instances.
923
924 """
925 client = self.GetClient()
926
927 use_locking = self.useLocking()
928 if self.useBulk():
929 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
930 return map(_UpdateBeparams, baserlib.MapBulkFields(bulkdata, I_FIELDS))
931 else:
932 instancesdata = client.QueryInstances([], ["name"], use_locking)
933 instanceslist = [row[0] for row in instancesdata]
934 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
935 uri_fields=("id", "uri"))
936
938 """Create an instance.
939
940 @return: a job id
941
942 """
943 baserlib.CheckType(self.request_body, dict, "Body contents")
944
945 # Default to request data version 0
946 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
947
948 if data_version == 0:
949 raise http.HttpBadRequest("Instance creation request version 0 is no"
950 " longer supported")
951 elif data_version != 1:
952 raise http.HttpBadRequest("Unsupported request data version %s" %
953 data_version)
954
955 data = self.request_body.copy()
956 # Remove "__version__"
957 data.pop(_REQ_DATA_VERSION, None)
958
959 _ConvertUsbDevices(data)
960
961 return (data, {
962 "dry_run": self.dryRun(),
963 })
964
967 """/2/instances-multi-alloc resource.
968
969 """
970 POST_OPCODE = opcodes.OpInstanceMultiAlloc
971
973 """Try to allocate multiple instances.
974
975 @return: A dict with submitted jobs, allocatable instances and failed
976 allocations
977
978 """
979 if "instances" not in self.request_body:
980 raise http.HttpBadRequest("Request is missing required 'instances' field"
981 " in body")
982
983 # Unlike most other RAPI calls, this one is composed of individual opcodes,
984 # and we have to do the filling ourselves
985 OPCODE_RENAME = {
986 "os": "os_type",
987 "name": "instance_name",
988 }
989
990 body = objects.FillDict(self.request_body, {
991 "instances": [
992 baserlib.FillOpcode(opcodes.OpInstanceCreate, inst, {},
993 rename=OPCODE_RENAME)
994 for inst in self.request_body["instances"]
995 ],
996 })
997
998 return (body, {
999 "dry_run": self.dryRun(),
1000 })
1001
1004 """/2/instances/[instance_name] resource.
1005
1006 """
1007 GET_OPCODE = opcodes.OpInstanceQuery
1008 DELETE_OPCODE = opcodes.OpInstanceRemove
1009
1011 """Send information about an instance.
1012
1013 """
1014 client = self.GetClient()
1015 instance_name = self.items[0]
1016
1017 result = baserlib.HandleItemQueryErrors(client.QueryInstances,
1018 names=[instance_name],
1019 fields=I_FIELDS,
1020 use_locking=self.useLocking())
1021
1022 return _UpdateBeparams(baserlib.MapFields(I_FIELDS, result[0]))
1023
1025 """Delete an instance.
1026
1027 """
1028 assert len(self.items) == 1
1029 return ({}, {
1030 "instance_name": self.items[0],
1031 "ignore_failures": False,
1032 "dry_run": self.dryRun(),
1033 })
1034
1037 """/2/instances/[instance_name]/info resource.
1038
1039 """
1040 GET_OPCODE = opcodes.OpInstanceQueryData
1041
1043 """Request detailed instance information.
1044
1045 """
1046 assert len(self.items) == 1
1047 return ({}, {
1048 "instances": [self.items[0]],
1049 "static": bool(self._checkIntVariable("static", default=0)),
1050 })
1051
1054 """/2/instances/[instance_name]/reboot resource.
1055
1056 Implements an instance reboot.
1057
1058 """
1059 POST_OPCODE = opcodes.OpInstanceReboot
1060
1062 """Reboot an instance.
1063
1064 The URI takes type=[hard|soft|full] and
1065 ignore_secondaries=[False|True] parameters.
1066
1067 """
1068 return ({}, {
1069 "instance_name": self.items[0],
1070 "reboot_type":
1071 self.queryargs.get("type", [constants.INSTANCE_REBOOT_HARD])[0],
1072 "ignore_secondaries": bool(self._checkIntVariable("ignore_secondaries")),
1073 "dry_run": self.dryRun(),
1074 })
1075
1078 """/2/instances/[instance_name]/startup resource.
1079
1080 Implements an instance startup.
1081
1082 """
1083 PUT_OPCODE = opcodes.OpInstanceStartup
1084
1086 """Startup an instance.
1087
1088 The URI takes force=[False|True] parameter to start the instance
1089 if even if secondary disks are failing.
1090
1091 """
1092 return ({}, {
1093 "instance_name": self.items[0],
1094 "force": self.useForce(),
1095 "dry_run": self.dryRun(),
1096 "no_remember": bool(self._checkIntVariable("no_remember")),
1097 })
1098
1101 """/2/instances/[instance_name]/shutdown resource.
1102
1103 Implements an instance shutdown.
1104
1105 """
1106 PUT_OPCODE = opcodes.OpInstanceShutdown
1107
1109 """Shutdown an instance.
1110
1111 """
1112 return (self.request_body, {
1113 "instance_name": self.items[0],
1114 "no_remember": bool(self._checkIntVariable("no_remember")),
1115 "dry_run": self.dryRun(),
1116 })
1117
1120 """Parses a request for reinstalling an instance.
1121
1122 """
1123 if not isinstance(data, dict):
1124 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1125
1126 ostype = baserlib.CheckParameter(data, "os", default=None)
1127 start = baserlib.CheckParameter(data, "start", exptype=bool,
1128 default=True)
1129 osparams = baserlib.CheckParameter(data, "osparams", default=None)
1130
1131 ops = [
1132 opcodes.OpInstanceShutdown(instance_name=name),
1133 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
1134 osparams=osparams),
1135 ]
1136
1137 if start:
1138 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
1139
1140 return ops
1141
1144 """/2/instances/[instance_name]/reinstall resource.
1145
1146 Implements an instance reinstall.
1147
1148 """
1149 POST_OPCODE = opcodes.OpInstanceReinstall
1150
1152 """Reinstall an instance.
1153
1154 The URI takes os=name and nostartup=[0|1] optional
1155 parameters. By default, the instance will be started
1156 automatically.
1157
1158 """
1159 if self.request_body:
1160 if self.queryargs:
1161 raise http.HttpBadRequest("Can't combine query and body parameters")
1162
1163 body = self.request_body
1164 elif self.queryargs:
1165 # Legacy interface, do not modify/extend
1166 body = {
1167 "os": self._checkStringVariable("os"),
1168 "start": not self._checkIntVariable("nostartup"),
1169 }
1170 else:
1171 body = {}
1172
1173 ops = _ParseInstanceReinstallRequest(self.items[0], body)
1174
1175 return self.SubmitJob(ops)
1176
1179 """/2/instances/[instance_name]/replace-disks resource.
1180
1181 """
1182 POST_OPCODE = opcodes.OpInstanceReplaceDisks
1183
1185 """Replaces disks on an instance.
1186
1187 """
1188 static = {
1189 "instance_name": self.items[0],
1190 }
1191
1192 if self.request_body:
1193 data = self.request_body
1194 elif self.queryargs:
1195 # Legacy interface, do not modify/extend
1196 data = {
1197 "remote_node": self._checkStringVariable("remote_node", default=None),
1198 "mode": self._checkStringVariable("mode", default=None),
1199 "disks": self._checkStringVariable("disks", default=None),
1200 "iallocator": self._checkStringVariable("iallocator", default=None),
1201 }
1202 else:
1203 data = {}
1204
1205 # Parse disks
1206 try:
1207 raw_disks = data.pop("disks")
1208 except KeyError:
1209 pass
1210 else:
1211 if raw_disks:
1212 if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102
1213 data["disks"] = raw_disks
1214 else:
1215 # Backwards compatibility for strings of the format "1, 2, 3"
1216 try:
1217 data["disks"] = [int(part) for part in raw_disks.split(",")]
1218 except (TypeError, ValueError), err:
1219 raise http.HttpBadRequest("Invalid disk index passed: %s" % err)
1220
1221 return (data, static)
1222
1225 """/2/instances/[instance_name]/activate-disks resource.
1226
1227 """
1228 PUT_OPCODE = opcodes.OpInstanceActivateDisks
1229
1231 """Activate disks for an instance.
1232
1233 The URI might contain ignore_size to ignore current recorded size.
1234
1235 """
1236 return ({}, {
1237 "instance_name": self.items[0],
1238 "ignore_size": bool(self._checkIntVariable("ignore_size")),
1239 })
1240
1243 """/2/instances/[instance_name]/deactivate-disks resource.
1244
1245 """
1246 PUT_OPCODE = opcodes.OpInstanceDeactivateDisks
1247
1255
1258 """/2/instances/[instance_name]/recreate-disks resource.
1259
1260 """
1261 POST_OPCODE = opcodes.OpInstanceRecreateDisks
1262
1270
1273 """/2/instances/[instance_name]/prepare-export resource.
1274
1275 """
1276 PUT_OPCODE = opcodes.OpBackupPrepare
1277
1279 """Prepares an export for an instance.
1280
1281 """
1282 return ({}, {
1283 "instance_name": self.items[0],
1284 "mode": self._checkStringVariable("mode"),
1285 })
1286
1289 """/2/instances/[instance_name]/export resource.
1290
1291 """
1292 PUT_OPCODE = opcodes.OpBackupExport
1293 PUT_RENAME = {
1294 "destination": "target_node",
1295 }
1296
1298 """Exports an instance.
1299
1300 """
1301 return (self.request_body, {
1302 "instance_name": self.items[0],
1303 })
1304
1307 """/2/instances/[instance_name]/migrate resource.
1308
1309 """
1310 PUT_OPCODE = opcodes.OpInstanceMigrate
1311
1313 """Migrates an instance.
1314
1315 """
1316 return (self.request_body, {
1317 "instance_name": self.items[0],
1318 })
1319
1322 """/2/instances/[instance_name]/failover resource.
1323
1324 """
1325 PUT_OPCODE = opcodes.OpInstanceFailover
1326
1328 """Does a failover of an instance.
1329
1330 """
1331 return (self.request_body, {
1332 "instance_name": self.items[0],
1333 })
1334
1337 """/2/instances/[instance_name]/rename resource.
1338
1339 """
1340 PUT_OPCODE = opcodes.OpInstanceRename
1341
1343 """Changes the name of an instance.
1344
1345 """
1346 return (self.request_body, {
1347 "instance_name": self.items[0],
1348 })
1349
1352 """/2/instances/[instance_name]/modify resource.
1353
1354 """
1355 PUT_OPCODE = opcodes.OpInstanceSetParams
1356
1358 """Changes parameters of an instance.
1359
1360 """
1361 data = self.request_body.copy()
1362 _ConvertUsbDevices(data)
1363
1364 return (data, {
1365 "instance_name": self.items[0],
1366 })
1367
1370 """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1371
1372 """
1373 POST_OPCODE = opcodes.OpInstanceGrowDisk
1374
1376 """Increases the size of an instance disk.
1377
1378 """
1379 return (self.request_body, {
1380 "instance_name": self.items[0],
1381 "disk": int(self.items[1]),
1382 })
1383
1386 """/2/instances/[instance_name]/console resource.
1387
1388 """
1389 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
1390 GET_OPCODE = opcodes.OpInstanceConsole
1391
1393 """Request information for connecting to instance's console.
1394
1395 @return: Serialized instance console description, see
1396 L{objects.InstanceConsole}
1397
1398 """
1399 client = self.GetClient()
1400
1401 ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1402
1403 if console is None:
1404 raise http.HttpServiceUnavailable("Instance console unavailable")
1405
1406 assert isinstance(console, dict)
1407 return console
1408
1411 """Tries to extract C{fields} query parameter.
1412
1413 @type args: dictionary
1414 @rtype: list of string
1415 @raise http.HttpBadRequest: When parameter can't be found
1416
1417 """
1418 try:
1419 fields = args["fields"]
1420 except KeyError:
1421 raise http.HttpBadRequest("Missing 'fields' query argument")
1422
1423 return _SplitQueryFields(fields[0])
1424
1427 """Splits fields as given for a query request.
1428
1429 @type fields: string
1430 @rtype: list of string
1431
1432 """
1433 return [i.strip() for i in fields.split(",")]
1434
1437 """/2/query/[resource] resource.
1438
1439 """
1440 # Results might contain sensitive information
1441 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
1442 PUT_ACCESS = GET_ACCESS
1443 GET_OPCODE = opcodes.OpQuery
1444 PUT_OPCODE = opcodes.OpQuery
1445
1448
1450 """Returns resource information.
1451
1452 @return: Query result, see L{objects.QueryResponse}
1453
1454 """
1455 return self._Query(_GetQueryFields(self.queryargs), None)
1456
1458 """Submits job querying for resources.
1459
1460 @return: Query result, see L{objects.QueryResponse}
1461
1462 """
1463 body = self.request_body
1464
1465 baserlib.CheckType(body, dict, "Body contents")
1466
1467 try:
1468 fields = body["fields"]
1469 except KeyError:
1470 fields = _GetQueryFields(self.queryargs)
1471
1472 qfilter = body.get("qfilter", None)
1473 # TODO: remove this after 2.7
1474 if qfilter is None:
1475 qfilter = body.get("filter", None)
1476
1477 return self._Query(fields, qfilter)
1478
1481 """/2/query/[resource]/fields resource.
1482
1483 """
1484 GET_OPCODE = opcodes.OpQueryFields
1485
1487 """Retrieves list of available fields for a resource.
1488
1489 @return: List of serialized L{objects.QueryFieldDefinition}
1490
1491 """
1492 try:
1493 raw_fields = self.queryargs["fields"]
1494 except KeyError:
1495 fields = None
1496 else:
1497 fields = _SplitQueryFields(raw_fields[0])
1498
1499 return self.GetClient().QueryFields(self.items[0], fields).ToDict()
1500
1503 """Quasiclass for tagging resources.
1504
1505 Manages tags. When inheriting this class you must define the
1506 TAG_LEVEL for it.
1507
1508 """
1509 TAG_LEVEL = None
1510 GET_OPCODE = opcodes.OpTagsGet
1511 PUT_OPCODE = opcodes.OpTagsSet
1512 DELETE_OPCODE = opcodes.OpTagsDel
1513
1515 """A tag resource constructor.
1516
1517 We have to override the default to sort out cluster naming case.
1518
1519 """
1520 baserlib.OpcodeResource.__init__(self, items, queryargs, req, **kwargs)
1521
1522 if self.TAG_LEVEL == constants.TAG_CLUSTER:
1523 self.name = None
1524 else:
1525 self.name = items[0]
1526
1528 """Returns a list of tags.
1529
1530 Example: ["tag1", "tag2", "tag3"]
1531
1532 """
1533 kind = self.TAG_LEVEL
1534
1535 if kind in (constants.TAG_INSTANCE,
1536 constants.TAG_NODEGROUP,
1537 constants.TAG_NODE,
1538 constants.TAG_NETWORK):
1539 if not self.name:
1540 raise http.HttpBadRequest("Missing name on tag request")
1541
1542 cl = self.GetClient(query=True)
1543 tags = list(cl.QueryTags(kind, self.name))
1544
1545 elif kind == constants.TAG_CLUSTER:
1546 assert not self.name
1547 # TODO: Use query API?
1548 ssc = ssconf.SimpleStore()
1549 tags = ssc.GetClusterTags()
1550
1551 else:
1552 raise http.HttpBadRequest("Unhandled tag type!")
1553
1554 return list(tags)
1555
1557 """Add a set of tags.
1558
1559 The request as a list of strings should be PUT to this URI. And
1560 you'll have back a job id.
1561
1562 """
1563 return ({}, {
1564 "kind": self.TAG_LEVEL,
1565 "name": self.name,
1566 "tags": self.queryargs.get("tag", []),
1567 "dry_run": self.dryRun(),
1568 })
1569
1571 """Delete a tag.
1572
1573 In order to delete a set of tags, the DELETE
1574 request should be addressed to URI like:
1575 /tags?tag=[tag]&tag=[tag]
1576
1577 """
1578 # Re-use code
1579 return self.GetPutOpInput()
1580
1589
1598
1607
1616
1625
| Trees | Indices | Help |
|
|---|
| Generated by Epydoc 3.0.1 on Wed Dec 30 13:43:07 2015 | http://epydoc.sourceforge.net |