| Trees | Indices | Help |
|
|---|
|
|
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Remote API resource implementations.
23
24 PUT or POST?
25 ============
26
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
30
31 In the context of this module POST on ``/2/instances`` to change an existing
32 entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
33 new instance) with a name specified in the request.
34
35 Quoting from RFC2616, section 9.6::
36
37 The fundamental difference between the POST and PUT requests is reflected in
38 the different meaning of the Request-URI. The URI in a POST request
39 identifies the resource that will handle the enclosed entity. That resource
40 might be a data-accepting process, a gateway to some other protocol, or a
41 separate entity that accepts annotations. In contrast, the URI in a PUT
42 request identifies the entity enclosed with the request -- the user agent
43 knows what URI is intended and the server MUST NOT attempt to apply the
44 request to some other resource. If the server desires that the request be
45 applied to a different URI, it MUST send a 301 (Moved Permanently) response;
46 the user agent MAY then make its own decision regarding whether or not to
47 redirect the request.
48
49 So when adding new methods, if they are operating on the URI entity itself,
50 PUT should be prefered over POST.
51
52 """
53
54 # pylint: disable=C0103
55
56 # C0103: Invalid name, since the R_* names are not conforming
57
58 from ganeti import opcodes
59 from ganeti import http
60 from ganeti import constants
61 from ganeti import cli
62 from ganeti import rapi
63 from ganeti import ht
64 from ganeti import compat
65 from ganeti import ssconf
66 from ganeti.rapi import baserlib
67
68
69 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
70 I_FIELDS = ["name", "admin_state", "os",
71 "pnode", "snodes",
72 "disk_template",
73 "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
74 "network_port",
75 "disk.sizes", "disk_usage",
76 "beparams", "hvparams",
77 "oper_state", "oper_ram", "oper_vcpus", "status",
78 "custom_hvparams", "custom_beparams", "custom_nicparams",
79 ] + _COMMON_FIELDS
80
81 N_FIELDS = ["name", "offline", "master_candidate", "drained",
82 "dtotal", "dfree",
83 "mtotal", "mnode", "mfree",
84 "pinst_cnt", "sinst_cnt",
85 "ctotal", "cnodes", "csockets",
86 "pip", "sip", "role",
87 "pinst_list", "sinst_list",
88 "master_capable", "vm_capable",
89 "ndparams",
90 "group.uuid",
91 ] + _COMMON_FIELDS
92
93 G_FIELDS = [
94 "alloc_policy",
95 "name",
96 "node_cnt",
97 "node_list",
98 "ipolicy",
99 "custom_ipolicy",
100 "diskparams",
101 "custom_diskparams",
102 "ndparams",
103 "custom_ndparams",
104 ] + _COMMON_FIELDS
105
106 J_FIELDS_BULK = [
107 "id", "ops", "status", "summary",
108 "opstatus",
109 "received_ts", "start_ts", "end_ts",
110 ]
111
112 J_FIELDS = J_FIELDS_BULK + [
113 "oplog",
114 "opresult",
115 ]
116
117 _NR_DRAINED = "drained"
118 _NR_MASTER_CANDIDATE = "master-candidate"
119 _NR_MASTER = "master"
120 _NR_OFFLINE = "offline"
121 _NR_REGULAR = "regular"
122
123 _NR_MAP = {
124 constants.NR_MASTER: _NR_MASTER,
125 constants.NR_MCANDIDATE: _NR_MASTER_CANDIDATE,
126 constants.NR_DRAINED: _NR_DRAINED,
127 constants.NR_OFFLINE: _NR_OFFLINE,
128 constants.NR_REGULAR: _NR_REGULAR,
129 }
130
131 assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
132
133 # Request data version field
134 _REQ_DATA_VERSION = "__version__"
135
136 # Feature string for instance creation request data version 1
137 _INST_CREATE_REQV1 = "instance-create-reqv1"
138
139 # Feature string for instance reinstall request version 1
140 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
141
142 # Feature string for node migration version 1
143 _NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
144
145 # Feature string for node evacuation with LU-generated jobs
146 _NODE_EVAC_RES1 = "node-evac-res1"
147
148 ALL_FEATURES = frozenset([
149 _INST_CREATE_REQV1,
150 _INST_REINSTALL_REQV1,
151 _NODE_MIGRATE_REQV1,
152 _NODE_EVAC_RES1,
153 ])
154
155 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
156 _WFJC_TIMEOUT = 10
157
158
159 # FIXME: For compatibility we update the beparams/memory field. Needs to be
160 # removed in Ganeti 2.7
161 -def _UpdateBeparams(inst):
162 """Updates the beparams dict of inst to support the memory field.
163
164 @param inst: Inst dict
165 @return: Updated inst dict
166
167 """
168 beparams = inst["beparams"]
169 beparams[constants.BE_MEMORY] = beparams[constants.BE_MAXMEM]
170
171 return inst
172
184
190
193 """/version resource.
194
195 This resource should be used to determine the remote API version and
196 to adapt clients accordingly.
197
198 """
199 @staticmethod
205
208 """/2/info resource.
209
210 """
211 GET_OPCODE = opcodes.OpClusterQuery
212
214 """Returns cluster information.
215
216 """
217 client = self.GetClient()
218 return client.QueryClusterInfo()
219
222 """/2/features resource.
223
224 """
225 @staticmethod
227 """Returns list of optional RAPI features implemented.
228
229 """
230 return list(ALL_FEATURES)
231
234 """/2/os resource.
235
236 """
237 GET_OPCODE = opcodes.OpOsDiagnose
238
240 """Return a list of all OSes.
241
242 Can return error 500 in case of a problem.
243
244 Example: ["debian-etch"]
245
246 """
247 cl = self.GetClient()
248 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
249 job_id = self.SubmitJob([op], cl=cl)
250 # we use custom feedback function, instead of print we log the status
251 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
252 diagnose_data = result[0]
253
254 if not isinstance(diagnose_data, list):
255 raise http.HttpBadGateway(message="Can't get OS list")
256
257 os_names = []
258 for (name, variants) in diagnose_data:
259 os_names.extend(cli.CalculateOSNames(name, variants))
260
261 return os_names
262
269
276
279 """/2/jobs resource.
280
281 """
283 """Returns a dictionary of jobs.
284
285 @return: a dictionary with jobs id and uri.
286
287 """
288 client = self.GetClient()
289
290 if self.useBulk():
291 bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
292 return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
293 else:
294 jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
295 return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
296 uri_fields=("id", "uri"))
297
300 """/2/jobs/[job_id] resource.
301
302 """
304 """Returns a job status.
305
306 @return: a dictionary with job parameters.
307 The result includes:
308 - id: job ID as a number
309 - status: current job status as a string
310 - ops: involved OpCodes as a list of dictionaries for each
311 opcodes in the job
312 - opstatus: OpCodes status as a list
313 - opresult: OpCodes results as a list of lists
314
315 """
316 job_id = self.items[0]
317 result = self.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
318 if result is None:
319 raise http.HttpNotFound()
320 return baserlib.MapFields(J_FIELDS, result)
321
329
332 """/2/jobs/[job_id]/wait resource.
333
334 """
335 # WaitForJobChange provides access to sensitive information and blocks
336 # machine resources (it's a blocking RAPI call), hence restricting access.
337 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
338
340 """Waits for job changes.
341
342 """
343 job_id = self.items[0]
344
345 fields = self.getBodyParameter("fields")
346 prev_job_info = self.getBodyParameter("previous_job_info", None)
347 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
348
349 if not isinstance(fields, list):
350 raise http.HttpBadRequest("The 'fields' parameter should be a list")
351
352 if not (prev_job_info is None or isinstance(prev_job_info, list)):
353 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
354 " be a list")
355
356 if not (prev_log_serial is None or
357 isinstance(prev_log_serial, (int, long))):
358 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
359 " be a number")
360
361 client = self.GetClient()
362 result = client.WaitForJobChangeOnce(job_id, fields,
363 prev_job_info, prev_log_serial,
364 timeout=_WFJC_TIMEOUT)
365 if not result:
366 raise http.HttpNotFound()
367
368 if result == constants.JOB_NOTCHANGED:
369 # No changes
370 return None
371
372 (job_info, log_entries) = result
373
374 return {
375 "job_info": job_info,
376 "log_entries": log_entries,
377 }
378
381 """/2/nodes resource.
382
383 """
384 GET_OPCODE = opcodes.OpNodeQuery
385
387 """Returns a list of all nodes.
388
389 """
390 client = self.GetClient()
391
392 if self.useBulk():
393 bulkdata = client.QueryNodes([], N_FIELDS, False)
394 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
395 else:
396 nodesdata = client.QueryNodes([], ["name"], False)
397 nodeslist = [row[0] for row in nodesdata]
398 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
399 uri_fields=("id", "uri"))
400
403 """/2/nodes/[node_name] resource.
404
405 """
406 GET_OPCODE = opcodes.OpNodeQuery
407
409 """Send information about a node.
410
411 """
412 node_name = self.items[0]
413 client = self.GetClient()
414
415 result = baserlib.HandleItemQueryErrors(client.QueryNodes,
416 names=[node_name], fields=N_FIELDS,
417 use_locking=self.useLocking())
418
419 return baserlib.MapFields(N_FIELDS, result[0])
420
423 """/2/nodes/[node_name]/powercycle resource.
424
425 """
426 POST_OPCODE = opcodes.OpNodePowercycle
427
429 """Tries to powercycle a node.
430
431 """
432 return (self.request_body, {
433 "node_name": self.items[0],
434 "force": self.useForce(),
435 })
436
439 """/2/nodes/[node_name]/role resource.
440
441 """
442 PUT_OPCODE = opcodes.OpNodeSetParams
443
445 """Returns the current node role.
446
447 @return: Node role
448
449 """
450 node_name = self.items[0]
451 client = self.GetClient()
452 result = client.QueryNodes(names=[node_name], fields=["role"],
453 use_locking=self.useLocking())
454
455 return _NR_MAP[result[0][0]]
456
458 """Sets the node role.
459
460 """
461 baserlib.CheckType(self.request_body, basestring, "Body contents")
462
463 role = self.request_body
464
465 if role == _NR_REGULAR:
466 candidate = False
467 offline = False
468 drained = False
469
470 elif role == _NR_MASTER_CANDIDATE:
471 candidate = True
472 offline = drained = None
473
474 elif role == _NR_DRAINED:
475 drained = True
476 candidate = offline = None
477
478 elif role == _NR_OFFLINE:
479 offline = True
480 candidate = drained = None
481
482 else:
483 raise http.HttpBadRequest("Can't set '%s' role" % role)
484
485 assert len(self.items) == 1
486
487 return ({}, {
488 "node_name": self.items[0],
489 "master_candidate": candidate,
490 "offline": offline,
491 "drained": drained,
492 "force": self.useForce(),
493 "auto_promote": bool(self._checkIntVariable("auto-promote", default=0)),
494 })
495
498 """/2/nodes/[node_name]/evacuate resource.
499
500 """
501 POST_OPCODE = opcodes.OpNodeEvacuate
502
504 """Evacuate all instances off a node.
505
506 """
507 return (self.request_body, {
508 "node_name": self.items[0],
509 "dry_run": self.dryRun(),
510 })
511
514 """/2/nodes/[node_name]/migrate resource.
515
516 """
517 POST_OPCODE = opcodes.OpNodeMigrate
518
520 """Migrate all primary instances from a node.
521
522 """
523 if self.queryargs:
524 # Support old-style requests
525 if "live" in self.queryargs and "mode" in self.queryargs:
526 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
527 " be passed")
528
529 if "live" in self.queryargs:
530 if self._checkIntVariable("live", default=1):
531 mode = constants.HT_MIGRATION_LIVE
532 else:
533 mode = constants.HT_MIGRATION_NONLIVE
534 else:
535 mode = self._checkStringVariable("mode", default=None)
536
537 data = {
538 "mode": mode,
539 }
540 else:
541 data = self.request_body
542
543 return (data, {
544 "node_name": self.items[0],
545 })
546
549 """/2/nodes/[node_name]/modify resource.
550
551 """
552 POST_OPCODE = opcodes.OpNodeSetParams
553
555 """Changes parameters of a node.
556
557 """
558 assert len(self.items) == 1
559
560 return (self.request_body, {
561 "node_name": self.items[0],
562 })
563
566 """/2/nodes/[node_name]/storage resource.
567
568 """
569 # LUNodeQueryStorage acquires locks, hence restricting access to GET
570 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
571 GET_OPCODE = opcodes.OpNodeQueryStorage
572
574 """List storage available on a node.
575
576 """
577 storage_type = self._checkStringVariable("storage_type", None)
578 output_fields = self._checkStringVariable("output_fields", None)
579
580 if not output_fields:
581 raise http.HttpBadRequest("Missing the required 'output_fields'"
582 " parameter")
583
584 return ({}, {
585 "nodes": [self.items[0]],
586 "storage_type": storage_type,
587 "output_fields": output_fields.split(","),
588 })
589
592 """/2/nodes/[node_name]/storage/modify resource.
593
594 """
595 PUT_OPCODE = opcodes.OpNodeModifyStorage
596
598 """Modifies a storage volume on a node.
599
600 """
601 storage_type = self._checkStringVariable("storage_type", None)
602 name = self._checkStringVariable("name", None)
603
604 if not name:
605 raise http.HttpBadRequest("Missing the required 'name'"
606 " parameter")
607
608 changes = {}
609
610 if "allocatable" in self.queryargs:
611 changes[constants.SF_ALLOCATABLE] = \
612 bool(self._checkIntVariable("allocatable", default=1))
613
614 return ({}, {
615 "node_name": self.items[0],
616 "storage_type": storage_type,
617 "name": name,
618 "changes": changes,
619 })
620
623 """/2/nodes/[node_name]/storage/repair resource.
624
625 """
626 PUT_OPCODE = opcodes.OpRepairNodeStorage
627
629 """Repairs a storage volume on a node.
630
631 """
632 storage_type = self._checkStringVariable("storage_type", None)
633 name = self._checkStringVariable("name", None)
634 if not name:
635 raise http.HttpBadRequest("Missing the required 'name'"
636 " parameter")
637
638 return ({}, {
639 "node_name": self.items[0],
640 "storage_type": storage_type,
641 "name": name,
642 })
643
646 """/2/groups resource.
647
648 """
649 GET_OPCODE = opcodes.OpGroupQuery
650 POST_OPCODE = opcodes.OpGroupAdd
651 POST_RENAME = {
652 "name": "group_name",
653 }
654
656 """Create a node group.
657
658 """
659 assert not self.items
660 return (self.request_body, {
661 "dry_run": self.dryRun(),
662 })
663
665 """Returns a list of all node groups.
666
667 """
668 client = self.GetClient()
669
670 if self.useBulk():
671 bulkdata = client.QueryGroups([], G_FIELDS, False)
672 return baserlib.MapBulkFields(bulkdata, G_FIELDS)
673 else:
674 data = client.QueryGroups([], ["name"], False)
675 groupnames = [row[0] for row in data]
676 return baserlib.BuildUriList(groupnames, "/2/groups/%s",
677 uri_fields=("name", "uri"))
678
681 """/2/groups/[group_name] resource.
682
683 """
684 DELETE_OPCODE = opcodes.OpGroupRemove
685
687 """Send information about a node group.
688
689 """
690 group_name = self.items[0]
691 client = self.GetClient()
692
693 result = baserlib.HandleItemQueryErrors(client.QueryGroups,
694 names=[group_name], fields=G_FIELDS,
695 use_locking=self.useLocking())
696
697 return baserlib.MapFields(G_FIELDS, result[0])
698
700 """Delete a node group.
701
702 """
703 assert len(self.items) == 1
704 return ({}, {
705 "group_name": self.items[0],
706 "dry_run": self.dryRun(),
707 })
708
711 """/2/groups/[group_name]/modify resource.
712
713 """
714 PUT_OPCODE = opcodes.OpGroupSetParams
715
717 """Changes some parameters of node group.
718
719 """
720 assert self.items
721 return (self.request_body, {
722 "group_name": self.items[0],
723 })
724
727 """/2/groups/[group_name]/rename resource.
728
729 """
730 PUT_OPCODE = opcodes.OpGroupRename
731
733 """Changes the name of a node group.
734
735 """
736 assert len(self.items) == 1
737 return (self.request_body, {
738 "group_name": self.items[0],
739 "dry_run": self.dryRun(),
740 })
741
744 """/2/groups/[group_name]/assign-nodes resource.
745
746 """
747 PUT_OPCODE = opcodes.OpGroupAssignNodes
748
750 """Assigns nodes to a group.
751
752 """
753 assert len(self.items) == 1
754 return (self.request_body, {
755 "group_name": self.items[0],
756 "dry_run": self.dryRun(),
757 "force": self.useForce(),
758 })
759
762 """/2/instances resource.
763
764 """
765 GET_OPCODE = opcodes.OpInstanceQuery
766 POST_OPCODE = opcodes.OpInstanceCreate
767 POST_RENAME = {
768 "os": "os_type",
769 "name": "instance_name",
770 }
771
773 """Returns a list of all available instances.
774
775 """
776 client = self.GetClient()
777
778 use_locking = self.useLocking()
779 if self.useBulk():
780 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
781 return map(_UpdateBeparams, baserlib.MapBulkFields(bulkdata, I_FIELDS))
782 else:
783 instancesdata = client.QueryInstances([], ["name"], use_locking)
784 instanceslist = [row[0] for row in instancesdata]
785 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
786 uri_fields=("id", "uri"))
787
789 """Create an instance.
790
791 @return: a job id
792
793 """
794 baserlib.CheckType(self.request_body, dict, "Body contents")
795
796 # Default to request data version 0
797 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
798
799 if data_version == 0:
800 raise http.HttpBadRequest("Instance creation request version 0 is no"
801 " longer supported")
802 elif data_version != 1:
803 raise http.HttpBadRequest("Unsupported request data version %s" %
804 data_version)
805
806 data = self.request_body.copy()
807 # Remove "__version__"
808 data.pop(_REQ_DATA_VERSION, None)
809
810 return (data, {
811 "dry_run": self.dryRun(),
812 })
813
816 """/2/instances/[instance_name] resource.
817
818 """
819 GET_OPCODE = opcodes.OpInstanceQuery
820 DELETE_OPCODE = opcodes.OpInstanceRemove
821
823 """Send information about an instance.
824
825 """
826 client = self.GetClient()
827 instance_name = self.items[0]
828
829 result = baserlib.HandleItemQueryErrors(client.QueryInstances,
830 names=[instance_name],
831 fields=I_FIELDS,
832 use_locking=self.useLocking())
833
834 return _UpdateBeparams(baserlib.MapFields(I_FIELDS, result[0]))
835
837 """Delete an instance.
838
839 """
840 assert len(self.items) == 1
841 return ({}, {
842 "instance_name": self.items[0],
843 "ignore_failures": False,
844 "dry_run": self.dryRun(),
845 })
846
849 """/2/instances/[instance_name]/info resource.
850
851 """
852 GET_OPCODE = opcodes.OpInstanceQueryData
853
855 """Request detailed instance information.
856
857 """
858 assert len(self.items) == 1
859 return ({}, {
860 "instances": [self.items[0]],
861 "static": bool(self._checkIntVariable("static", default=0)),
862 })
863
866 """/2/instances/[instance_name]/reboot resource.
867
868 Implements an instance reboot.
869
870 """
871 POST_OPCODE = opcodes.OpInstanceReboot
872
874 """Reboot an instance.
875
876 The URI takes type=[hard|soft|full] and
877 ignore_secondaries=[False|True] parameters.
878
879 """
880 return ({}, {
881 "instance_name": self.items[0],
882 "reboot_type":
883 self.queryargs.get("type", [constants.INSTANCE_REBOOT_HARD])[0],
884 "ignore_secondaries": bool(self._checkIntVariable("ignore_secondaries")),
885 "dry_run": self.dryRun(),
886 })
887
890 """/2/instances/[instance_name]/startup resource.
891
892 Implements an instance startup.
893
894 """
895 PUT_OPCODE = opcodes.OpInstanceStartup
896
898 """Startup an instance.
899
900 The URI takes force=[False|True] parameter to start the instance
901 if even if secondary disks are failing.
902
903 """
904 return ({}, {
905 "instance_name": self.items[0],
906 "force": self.useForce(),
907 "dry_run": self.dryRun(),
908 "no_remember": bool(self._checkIntVariable("no_remember")),
909 })
910
913 """/2/instances/[instance_name]/shutdown resource.
914
915 Implements an instance shutdown.
916
917 """
918 PUT_OPCODE = opcodes.OpInstanceShutdown
919
921 """Shutdown an instance.
922
923 """
924 return (self.request_body, {
925 "instance_name": self.items[0],
926 "no_remember": bool(self._checkIntVariable("no_remember")),
927 "dry_run": self.dryRun(),
928 })
929
932 """Parses a request for reinstalling an instance.
933
934 """
935 if not isinstance(data, dict):
936 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
937
938 ostype = baserlib.CheckParameter(data, "os", default=None)
939 start = baserlib.CheckParameter(data, "start", exptype=bool,
940 default=True)
941 osparams = baserlib.CheckParameter(data, "osparams", default=None)
942
943 ops = [
944 opcodes.OpInstanceShutdown(instance_name=name),
945 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
946 osparams=osparams),
947 ]
948
949 if start:
950 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
951
952 return ops
953
956 """/2/instances/[instance_name]/reinstall resource.
957
958 Implements an instance reinstall.
959
960 """
961 POST_OPCODE = opcodes.OpInstanceReinstall
962
964 """Reinstall an instance.
965
966 The URI takes os=name and nostartup=[0|1] optional
967 parameters. By default, the instance will be started
968 automatically.
969
970 """
971 if self.request_body:
972 if self.queryargs:
973 raise http.HttpBadRequest("Can't combine query and body parameters")
974
975 body = self.request_body
976 elif self.queryargs:
977 # Legacy interface, do not modify/extend
978 body = {
979 "os": self._checkStringVariable("os"),
980 "start": not self._checkIntVariable("nostartup"),
981 }
982 else:
983 body = {}
984
985 ops = _ParseInstanceReinstallRequest(self.items[0], body)
986
987 return self.SubmitJob(ops)
988
991 """/2/instances/[instance_name]/replace-disks resource.
992
993 """
994 POST_OPCODE = opcodes.OpInstanceReplaceDisks
995
997 """Replaces disks on an instance.
998
999 """
1000 static = {
1001 "instance_name": self.items[0],
1002 }
1003
1004 if self.request_body:
1005 data = self.request_body
1006 elif self.queryargs:
1007 # Legacy interface, do not modify/extend
1008 data = {
1009 "remote_node": self._checkStringVariable("remote_node", default=None),
1010 "mode": self._checkStringVariable("mode", default=None),
1011 "disks": self._checkStringVariable("disks", default=None),
1012 "iallocator": self._checkStringVariable("iallocator", default=None),
1013 }
1014 else:
1015 data = {}
1016
1017 # Parse disks
1018 try:
1019 raw_disks = data.pop("disks")
1020 except KeyError:
1021 pass
1022 else:
1023 if raw_disks:
1024 if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102
1025 data["disks"] = raw_disks
1026 else:
1027 # Backwards compatibility for strings of the format "1, 2, 3"
1028 try:
1029 data["disks"] = [int(part) for part in raw_disks.split(",")]
1030 except (TypeError, ValueError), err:
1031 raise http.HttpBadRequest("Invalid disk index passed: %s" % err)
1032
1033 return (data, static)
1034
1037 """/2/instances/[instance_name]/activate-disks resource.
1038
1039 """
1040 PUT_OPCODE = opcodes.OpInstanceActivateDisks
1041
1043 """Activate disks for an instance.
1044
1045 The URI might contain ignore_size to ignore current recorded size.
1046
1047 """
1048 return ({}, {
1049 "instance_name": self.items[0],
1050 "ignore_size": bool(self._checkIntVariable("ignore_size")),
1051 })
1052
1055 """/2/instances/[instance_name]/deactivate-disks resource.
1056
1057 """
1058 PUT_OPCODE = opcodes.OpInstanceDeactivateDisks
1059
1067
1070 """/2/instances/[instance_name]/recreate-disks resource.
1071
1072 """
1073 POST_OPCODE = opcodes.OpInstanceRecreateDisks
1074
1082
1085 """/2/instances/[instance_name]/prepare-export resource.
1086
1087 """
1088 PUT_OPCODE = opcodes.OpBackupPrepare
1089
1091 """Prepares an export for an instance.
1092
1093 """
1094 return ({}, {
1095 "instance_name": self.items[0],
1096 "mode": self._checkStringVariable("mode"),
1097 })
1098
1101 """/2/instances/[instance_name]/export resource.
1102
1103 """
1104 PUT_OPCODE = opcodes.OpBackupExport
1105 PUT_RENAME = {
1106 "destination": "target_node",
1107 }
1108
1110 """Exports an instance.
1111
1112 """
1113 return (self.request_body, {
1114 "instance_name": self.items[0],
1115 })
1116
1119 """/2/instances/[instance_name]/migrate resource.
1120
1121 """
1122 PUT_OPCODE = opcodes.OpInstanceMigrate
1123
1125 """Migrates an instance.
1126
1127 """
1128 return (self.request_body, {
1129 "instance_name": self.items[0],
1130 })
1131
1134 """/2/instances/[instance_name]/failover resource.
1135
1136 """
1137 PUT_OPCODE = opcodes.OpInstanceFailover
1138
1140 """Does a failover of an instance.
1141
1142 """
1143 return (self.request_body, {
1144 "instance_name": self.items[0],
1145 })
1146
1149 """/2/instances/[instance_name]/rename resource.
1150
1151 """
1152 PUT_OPCODE = opcodes.OpInstanceRename
1153
1155 """Changes the name of an instance.
1156
1157 """
1158 return (self.request_body, {
1159 "instance_name": self.items[0],
1160 })
1161
1164 """/2/instances/[instance_name]/modify resource.
1165
1166 """
1167 PUT_OPCODE = opcodes.OpInstanceSetParams
1168
1170 """Changes parameters of an instance.
1171
1172 """
1173 return (self.request_body, {
1174 "instance_name": self.items[0],
1175 })
1176
1179 """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1180
1181 """
1182 POST_OPCODE = opcodes.OpInstanceGrowDisk
1183
1185 """Increases the size of an instance disk.
1186
1187 """
1188 return (self.request_body, {
1189 "instance_name": self.items[0],
1190 "disk": int(self.items[1]),
1191 })
1192
1195 """/2/instances/[instance_name]/console resource.
1196
1197 """
1198 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1199 GET_OPCODE = opcodes.OpInstanceConsole
1200
1202 """Request information for connecting to instance's console.
1203
1204 @return: Serialized instance console description, see
1205 L{objects.InstanceConsole}
1206
1207 """
1208 client = self.GetClient()
1209
1210 ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1211
1212 if console is None:
1213 raise http.HttpServiceUnavailable("Instance console unavailable")
1214
1215 assert isinstance(console, dict)
1216 return console
1217
1220 """
1221
1222 """
1223 try:
1224 fields = args["fields"]
1225 except KeyError:
1226 raise http.HttpBadRequest("Missing 'fields' query argument")
1227
1228 return _SplitQueryFields(fields[0])
1229
1236
1239 """/2/query/[resource] resource.
1240
1241 """
1242 # Results might contain sensitive information
1243 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1244 GET_OPCODE = opcodes.OpQuery
1245 PUT_OPCODE = opcodes.OpQuery
1246
1249
1251 """Returns resource information.
1252
1253 @return: Query result, see L{objects.QueryResponse}
1254
1255 """
1256 return self._Query(_GetQueryFields(self.queryargs), None)
1257
1259 """Submits job querying for resources.
1260
1261 @return: Query result, see L{objects.QueryResponse}
1262
1263 """
1264 body = self.request_body
1265
1266 baserlib.CheckType(body, dict, "Body contents")
1267
1268 try:
1269 fields = body["fields"]
1270 except KeyError:
1271 fields = _GetQueryFields(self.queryargs)
1272
1273 qfilter = body.get("qfilter", None)
1274 # TODO: remove this after 2.7
1275 if qfilter is None:
1276 qfilter = body.get("filter", None)
1277
1278 return self._Query(fields, qfilter)
1279
1282 """/2/query/[resource]/fields resource.
1283
1284 """
1285 GET_OPCODE = opcodes.OpQueryFields
1286
1288 """Retrieves list of available fields for a resource.
1289
1290 @return: List of serialized L{objects.QueryFieldDefinition}
1291
1292 """
1293 try:
1294 raw_fields = self.queryargs["fields"]
1295 except KeyError:
1296 fields = None
1297 else:
1298 fields = _SplitQueryFields(raw_fields[0])
1299
1300 return self.GetClient().QueryFields(self.items[0], fields).ToDict()
1301
1304 """Quasiclass for tagging resources.
1305
1306 Manages tags. When inheriting this class you must define the
1307 TAG_LEVEL for it.
1308
1309 """
1310 TAG_LEVEL = None
1311 GET_OPCODE = opcodes.OpTagsGet
1312 PUT_OPCODE = opcodes.OpTagsSet
1313 DELETE_OPCODE = opcodes.OpTagsDel
1314
1316 """A tag resource constructor.
1317
1318 We have to override the default to sort out cluster naming case.
1319
1320 """
1321 baserlib.OpcodeResource.__init__(self, items, queryargs, req, **kwargs)
1322
1323 if self.TAG_LEVEL == constants.TAG_CLUSTER:
1324 self.name = None
1325 else:
1326 self.name = items[0]
1327
1329 """Returns a list of tags.
1330
1331 Example: ["tag1", "tag2", "tag3"]
1332
1333 """
1334 kind = self.TAG_LEVEL
1335
1336 if kind in (constants.TAG_INSTANCE,
1337 constants.TAG_NODEGROUP,
1338 constants.TAG_NODE):
1339 if not self.name:
1340 raise http.HttpBadRequest("Missing name on tag request")
1341
1342 cl = self.GetClient()
1343 if kind == constants.TAG_INSTANCE:
1344 fn = cl.QueryInstances
1345 elif kind == constants.TAG_NODEGROUP:
1346 fn = cl.QueryGroups
1347 else:
1348 fn = cl.QueryNodes
1349 result = fn(names=[self.name], fields=["tags"], use_locking=False)
1350 if not result or not result[0]:
1351 raise http.HttpBadGateway("Invalid response from tag query")
1352 tags = result[0][0]
1353
1354 elif kind == constants.TAG_CLUSTER:
1355 assert not self.name
1356 # TODO: Use query API?
1357 ssc = ssconf.SimpleStore()
1358 tags = ssc.GetClusterTags()
1359
1360 return list(tags)
1361
1363 """Add a set of tags.
1364
1365 The request as a list of strings should be PUT to this URI. And
1366 you'll have back a job id.
1367
1368 """
1369 return ({}, {
1370 "kind": self.TAG_LEVEL,
1371 "name": self.name,
1372 "tags": self.queryargs.get("tag", []),
1373 "dry_run": self.dryRun(),
1374 })
1375
1377 """Delete a tag.
1378
1379 In order to delete a set of tags, the DELETE
1380 request should be addressed to URI like:
1381 /tags?tag=[tag]&tag=[tag]
1382
1383 """
1384 # Re-use code
1385 return self.GetPutOpInput()
1386
1395
1404
1413
1422
| Trees | Indices | Help |
|
|---|
| Generated by Epydoc 3.0.1 on Fri Jan 18 10:10:12 2013 | http://epydoc.sourceforge.net |