| Trees | Indices | Help |
|
|---|
|
|
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Remote API resource implementations.
23
24 PUT or POST?
25 ============
26
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
30
31 In the context of this module POST on ``/2/instances`` to change an existing
32 entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
33 new instance) with a name specified in the request.
34
35 Quoting from RFC2616, section 9.6::
36
37 The fundamental difference between the POST and PUT requests is reflected in
38 the different meaning of the Request-URI. The URI in a POST request
39 identifies the resource that will handle the enclosed entity. That resource
40 might be a data-accepting process, a gateway to some other protocol, or a
41 separate entity that accepts annotations. In contrast, the URI in a PUT
42 request identifies the entity enclosed with the request -- the user agent
43 knows what URI is intended and the server MUST NOT attempt to apply the
44 request to some other resource. If the server desires that the request be
45 applied to a different URI, it MUST send a 301 (Moved Permanently) response;
46 the user agent MAY then make its own decision regarding whether or not to
47 redirect the request.
48
49 So when adding new methods, if they are operating on the URI entity itself,
50 PUT should be prefered over POST.
51
52 """
53
54 # pylint: disable=C0103
55
56 # C0103: Invalid name, since the R_* names are not conforming
57
58 from ganeti import opcodes
59 from ganeti import objects
60 from ganeti import http
61 from ganeti import constants
62 from ganeti import cli
63 from ganeti import rapi
64 from ganeti import ht
65 from ganeti import compat
66 from ganeti import ssconf
67 from ganeti.rapi import baserlib
68
69
70 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
71 I_FIELDS = ["name", "admin_state", "os",
72 "pnode", "snodes",
73 "disk_template",
74 "nic.ips", "nic.macs", "nic.modes", "nic.uuids", "nic.names",
75 "nic.links", "nic.networks", "nic.networks.names", "nic.bridges",
76 "network_port",
77 "disk.sizes", "disk_usage", "disk.uuids", "disk.names",
78 "beparams", "hvparams",
79 "oper_state", "oper_ram", "oper_vcpus", "status",
80 "custom_hvparams", "custom_beparams", "custom_nicparams",
81 ] + _COMMON_FIELDS
82
83 N_FIELDS = ["name", "offline", "master_candidate", "drained",
84 "dtotal", "dfree",
85 "mtotal", "mnode", "mfree",
86 "pinst_cnt", "sinst_cnt",
87 "ctotal", "cnodes", "csockets",
88 "pip", "sip", "role",
89 "pinst_list", "sinst_list",
90 "master_capable", "vm_capable",
91 "ndparams",
92 "group.uuid",
93 ] + _COMMON_FIELDS
94
95 NET_FIELDS = ["name", "network", "gateway",
96 "network6", "gateway6",
97 "mac_prefix",
98 "free_count", "reserved_count",
99 "map", "group_list", "inst_list",
100 "external_reservations",
101 ] + _COMMON_FIELDS
102
103 G_FIELDS = [
104 "alloc_policy",
105 "name",
106 "node_cnt",
107 "node_list",
108 "ipolicy",
109 "custom_ipolicy",
110 "diskparams",
111 "custom_diskparams",
112 "ndparams",
113 "custom_ndparams",
114 ] + _COMMON_FIELDS
115
116 J_FIELDS_BULK = [
117 "id", "ops", "status", "summary",
118 "opstatus",
119 "received_ts", "start_ts", "end_ts",
120 ]
121
122 J_FIELDS = J_FIELDS_BULK + [
123 "oplog",
124 "opresult",
125 ]
126
127 _NR_DRAINED = "drained"
128 _NR_MASTER_CANDIDATE = "master-candidate"
129 _NR_MASTER = "master"
130 _NR_OFFLINE = "offline"
131 _NR_REGULAR = "regular"
132
133 _NR_MAP = {
134 constants.NR_MASTER: _NR_MASTER,
135 constants.NR_MCANDIDATE: _NR_MASTER_CANDIDATE,
136 constants.NR_DRAINED: _NR_DRAINED,
137 constants.NR_OFFLINE: _NR_OFFLINE,
138 constants.NR_REGULAR: _NR_REGULAR,
139 }
140
141 assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
142
143 # Request data version field
144 _REQ_DATA_VERSION = "__version__"
145
146 # Feature string for instance creation request data version 1
147 _INST_CREATE_REQV1 = "instance-create-reqv1"
148
149 # Feature string for instance reinstall request version 1
150 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
151
152 # Feature string for node migration version 1
153 _NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
154
155 # Feature string for node evacuation with LU-generated jobs
156 _NODE_EVAC_RES1 = "node-evac-res1"
157
158 ALL_FEATURES = compat.UniqueFrozenset([
159 _INST_CREATE_REQV1,
160 _INST_REINSTALL_REQV1,
161 _NODE_MIGRATE_REQV1,
162 _NODE_EVAC_RES1,
163 ])
164
165 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
166 _WFJC_TIMEOUT = 10
167
168
169 # FIXME: For compatibility we update the beparams/memory field. Needs to be
170 # removed in Ganeti 2.8
171 -def _UpdateBeparams(inst):
172 """Updates the beparams dict of inst to support the memory field.
173
174 @param inst: Inst dict
175 @return: Updated inst dict
176
177 """
178 beparams = inst["beparams"]
179 beparams[constants.BE_MEMORY] = beparams[constants.BE_MAXMEM]
180
181 return inst
182
194
200
203 """/version resource.
204
205 This resource should be used to determine the remote API version and
206 to adapt clients accordingly.
207
208 """
209 @staticmethod
215
218 """/2/info resource.
219
220 """
221 GET_OPCODE = opcodes.OpClusterQuery
222
224 """Returns cluster information.
225
226 """
227 client = self.GetClient(query=True)
228 return client.QueryClusterInfo()
229
232 """/2/features resource.
233
234 """
235 @staticmethod
237 """Returns list of optional RAPI features implemented.
238
239 """
240 return list(ALL_FEATURES)
241
244 """/2/os resource.
245
246 """
247 GET_OPCODE = opcodes.OpOsDiagnose
248
250 """Return a list of all OSes.
251
252 Can return error 500 in case of a problem.
253
254 Example: ["debian-etch"]
255
256 """
257 cl = self.GetClient()
258 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
259 job_id = self.SubmitJob([op], cl=cl)
260 # we use custom feedback function, instead of print we log the status
261 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
262 diagnose_data = result[0]
263
264 if not isinstance(diagnose_data, list):
265 raise http.HttpBadGateway(message="Can't get OS list")
266
267 os_names = []
268 for (name, variants) in diagnose_data:
269 os_names.extend(cli.CalculateOSNames(name, variants))
270
271 return os_names
272
279
286
289 """/2/jobs resource.
290
291 """
293 """Returns a dictionary of jobs.
294
295 @return: a dictionary with jobs id and uri.
296
297 """
298 client = self.GetClient(query=True)
299
300 if self.useBulk():
301 bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
302 return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
303 else:
304 jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
305 return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
306 uri_fields=("id", "uri"))
307
310 """/2/jobs/[job_id] resource.
311
312 """
314 """Returns a job status.
315
316 @return: a dictionary with job parameters.
317 The result includes:
318 - id: job ID as a number
319 - status: current job status as a string
320 - ops: involved OpCodes as a list of dictionaries for each
321 opcodes in the job
322 - opstatus: OpCodes status as a list
323 - opresult: OpCodes results as a list of lists
324
325 """
326 job_id = self.items[0]
327 result = self.GetClient(query=True).QueryJobs([job_id, ], J_FIELDS)[0]
328 if result is None:
329 raise http.HttpNotFound()
330 return baserlib.MapFields(J_FIELDS, result)
331
339
342 """/2/jobs/[job_id]/wait resource.
343
344 """
345 # WaitForJobChange provides access to sensitive information and blocks
346 # machine resources (it's a blocking RAPI call), hence restricting access.
347 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
348
350 """Waits for job changes.
351
352 """
353 job_id = self.items[0]
354
355 fields = self.getBodyParameter("fields")
356 prev_job_info = self.getBodyParameter("previous_job_info", None)
357 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
358
359 if not isinstance(fields, list):
360 raise http.HttpBadRequest("The 'fields' parameter should be a list")
361
362 if not (prev_job_info is None or isinstance(prev_job_info, list)):
363 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
364 " be a list")
365
366 if not (prev_log_serial is None or
367 isinstance(prev_log_serial, (int, long))):
368 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
369 " be a number")
370
371 client = self.GetClient()
372 result = client.WaitForJobChangeOnce(job_id, fields,
373 prev_job_info, prev_log_serial,
374 timeout=_WFJC_TIMEOUT)
375 if not result:
376 raise http.HttpNotFound()
377
378 if result == constants.JOB_NOTCHANGED:
379 # No changes
380 return None
381
382 (job_info, log_entries) = result
383
384 return {
385 "job_info": job_info,
386 "log_entries": log_entries,
387 }
388
391 """/2/nodes resource.
392
393 """
394 GET_OPCODE = opcodes.OpNodeQuery
395
397 """Returns a list of all nodes.
398
399 """
400 client = self.GetClient(query=True)
401
402 if self.useBulk():
403 bulkdata = client.QueryNodes([], N_FIELDS, False)
404 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
405 else:
406 nodesdata = client.QueryNodes([], ["name"], False)
407 nodeslist = [row[0] for row in nodesdata]
408 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
409 uri_fields=("id", "uri"))
410
413 """/2/nodes/[node_name] resource.
414
415 """
416 GET_OPCODE = opcodes.OpNodeQuery
417
419 """Send information about a node.
420
421 """
422 node_name = self.items[0]
423 client = self.GetClient(query=True)
424
425 result = baserlib.HandleItemQueryErrors(client.QueryNodes,
426 names=[node_name], fields=N_FIELDS,
427 use_locking=self.useLocking())
428
429 return baserlib.MapFields(N_FIELDS, result[0])
430
433 """/2/nodes/[node_name]/powercycle resource.
434
435 """
436 POST_OPCODE = opcodes.OpNodePowercycle
437
439 """Tries to powercycle a node.
440
441 """
442 return (self.request_body, {
443 "node_name": self.items[0],
444 "force": self.useForce(),
445 })
446
449 """/2/nodes/[node_name]/role resource.
450
451 """
452 PUT_OPCODE = opcodes.OpNodeSetParams
453
455 """Returns the current node role.
456
457 @return: Node role
458
459 """
460 node_name = self.items[0]
461 client = self.GetClient(query=True)
462 result = client.QueryNodes(names=[node_name], fields=["role"],
463 use_locking=self.useLocking())
464
465 return _NR_MAP[result[0][0]]
466
468 """Sets the node role.
469
470 """
471 baserlib.CheckType(self.request_body, basestring, "Body contents")
472
473 role = self.request_body
474
475 if role == _NR_REGULAR:
476 candidate = False
477 offline = False
478 drained = False
479
480 elif role == _NR_MASTER_CANDIDATE:
481 candidate = True
482 offline = drained = None
483
484 elif role == _NR_DRAINED:
485 drained = True
486 candidate = offline = None
487
488 elif role == _NR_OFFLINE:
489 offline = True
490 candidate = drained = None
491
492 else:
493 raise http.HttpBadRequest("Can't set '%s' role" % role)
494
495 assert len(self.items) == 1
496
497 return ({}, {
498 "node_name": self.items[0],
499 "master_candidate": candidate,
500 "offline": offline,
501 "drained": drained,
502 "force": self.useForce(),
503 "auto_promote": bool(self._checkIntVariable("auto-promote", default=0)),
504 })
505
508 """/2/nodes/[node_name]/evacuate resource.
509
510 """
511 POST_OPCODE = opcodes.OpNodeEvacuate
512
514 """Evacuate all instances off a node.
515
516 """
517 return (self.request_body, {
518 "node_name": self.items[0],
519 "dry_run": self.dryRun(),
520 })
521
524 """/2/nodes/[node_name]/migrate resource.
525
526 """
527 POST_OPCODE = opcodes.OpNodeMigrate
528
530 """Migrate all primary instances from a node.
531
532 """
533 if self.queryargs:
534 # Support old-style requests
535 if "live" in self.queryargs and "mode" in self.queryargs:
536 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
537 " be passed")
538
539 if "live" in self.queryargs:
540 if self._checkIntVariable("live", default=1):
541 mode = constants.HT_MIGRATION_LIVE
542 else:
543 mode = constants.HT_MIGRATION_NONLIVE
544 else:
545 mode = self._checkStringVariable("mode", default=None)
546
547 data = {
548 "mode": mode,
549 }
550 else:
551 data = self.request_body
552
553 return (data, {
554 "node_name": self.items[0],
555 })
556
559 """/2/nodes/[node_name]/modify resource.
560
561 """
562 POST_OPCODE = opcodes.OpNodeSetParams
563
565 """Changes parameters of a node.
566
567 """
568 assert len(self.items) == 1
569
570 return (self.request_body, {
571 "node_name": self.items[0],
572 })
573
576 """/2/nodes/[node_name]/storage resource.
577
578 """
579 # LUNodeQueryStorage acquires locks, hence restricting access to GET
580 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
581 GET_OPCODE = opcodes.OpNodeQueryStorage
582
584 """List storage available on a node.
585
586 """
587 storage_type = self._checkStringVariable("storage_type", None)
588 output_fields = self._checkStringVariable("output_fields", None)
589
590 if not output_fields:
591 raise http.HttpBadRequest("Missing the required 'output_fields'"
592 " parameter")
593
594 return ({}, {
595 "nodes": [self.items[0]],
596 "storage_type": storage_type,
597 "output_fields": output_fields.split(","),
598 })
599
602 """/2/nodes/[node_name]/storage/modify resource.
603
604 """
605 PUT_OPCODE = opcodes.OpNodeModifyStorage
606
608 """Modifies a storage volume on a node.
609
610 """
611 storage_type = self._checkStringVariable("storage_type", None)
612 name = self._checkStringVariable("name", None)
613
614 if not name:
615 raise http.HttpBadRequest("Missing the required 'name'"
616 " parameter")
617
618 changes = {}
619
620 if "allocatable" in self.queryargs:
621 changes[constants.SF_ALLOCATABLE] = \
622 bool(self._checkIntVariable("allocatable", default=1))
623
624 return ({}, {
625 "node_name": self.items[0],
626 "storage_type": storage_type,
627 "name": name,
628 "changes": changes,
629 })
630
633 """/2/nodes/[node_name]/storage/repair resource.
634
635 """
636 PUT_OPCODE = opcodes.OpRepairNodeStorage
637
639 """Repairs a storage volume on a node.
640
641 """
642 storage_type = self._checkStringVariable("storage_type", None)
643 name = self._checkStringVariable("name", None)
644 if not name:
645 raise http.HttpBadRequest("Missing the required 'name'"
646 " parameter")
647
648 return ({}, {
649 "node_name": self.items[0],
650 "storage_type": storage_type,
651 "name": name,
652 })
653
656 """/2/networks resource.
657
658 """
659 GET_OPCODE = opcodes.OpNetworkQuery
660 POST_OPCODE = opcodes.OpNetworkAdd
661 POST_RENAME = {
662 "name": "network_name",
663 }
664
666 """Create a network.
667
668 """
669 assert not self.items
670 return (self.request_body, {
671 "dry_run": self.dryRun(),
672 })
673
675 """Returns a list of all networks.
676
677 """
678 client = self.GetClient(query=True)
679
680 if self.useBulk():
681 bulkdata = client.QueryNetworks([], NET_FIELDS, False)
682 return baserlib.MapBulkFields(bulkdata, NET_FIELDS)
683 else:
684 data = client.QueryNetworks([], ["name"], False)
685 networknames = [row[0] for row in data]
686 return baserlib.BuildUriList(networknames, "/2/networks/%s",
687 uri_fields=("name", "uri"))
688
691 """/2/networks/[network_name] resource.
692
693 """
694 DELETE_OPCODE = opcodes.OpNetworkRemove
695
697 """Send information about a network.
698
699 """
700 network_name = self.items[0]
701 client = self.GetClient(query=True)
702
703 result = baserlib.HandleItemQueryErrors(client.QueryNetworks,
704 names=[network_name],
705 fields=NET_FIELDS,
706 use_locking=self.useLocking())
707
708 return baserlib.MapFields(NET_FIELDS, result[0])
709
711 """Delete a network.
712
713 """
714 assert len(self.items) == 1
715 return (self.request_body, {
716 "network_name": self.items[0],
717 "dry_run": self.dryRun(),
718 })
719
722 """/2/networks/[network_name]/connect resource.
723
724 """
725 PUT_OPCODE = opcodes.OpNetworkConnect
726
728 """Changes some parameters of node group.
729
730 """
731 assert self.items
732 return (self.request_body, {
733 "network_name": self.items[0],
734 "dry_run": self.dryRun(),
735 })
736
739 """/2/networks/[network_name]/disconnect resource.
740
741 """
742 PUT_OPCODE = opcodes.OpNetworkDisconnect
743
745 """Changes some parameters of node group.
746
747 """
748 assert self.items
749 return (self.request_body, {
750 "network_name": self.items[0],
751 "dry_run": self.dryRun(),
752 })
753
756 """/2/networks/[network_name]/modify resource.
757
758 """
759 PUT_OPCODE = opcodes.OpNetworkSetParams
760
762 """Changes some parameters of network.
763
764 """
765 assert self.items
766 return (self.request_body, {
767 "network_name": self.items[0],
768 })
769
772 """/2/groups resource.
773
774 """
775 GET_OPCODE = opcodes.OpGroupQuery
776 POST_OPCODE = opcodes.OpGroupAdd
777 POST_RENAME = {
778 "name": "group_name",
779 }
780
782 """Create a node group.
783
784
785 """
786 assert not self.items
787 return (self.request_body, {
788 "dry_run": self.dryRun(),
789 })
790
792 """Returns a list of all node groups.
793
794 """
795 client = self.GetClient(query=True)
796
797 if self.useBulk():
798 bulkdata = client.QueryGroups([], G_FIELDS, False)
799 return baserlib.MapBulkFields(bulkdata, G_FIELDS)
800 else:
801 data = client.QueryGroups([], ["name"], False)
802 groupnames = [row[0] for row in data]
803 return baserlib.BuildUriList(groupnames, "/2/groups/%s",
804 uri_fields=("name", "uri"))
805
808 """/2/groups/[group_name] resource.
809
810 """
811 DELETE_OPCODE = opcodes.OpGroupRemove
812
814 """Send information about a node group.
815
816 """
817 group_name = self.items[0]
818 client = self.GetClient(query=True)
819
820 result = baserlib.HandleItemQueryErrors(client.QueryGroups,
821 names=[group_name], fields=G_FIELDS,
822 use_locking=self.useLocking())
823
824 return baserlib.MapFields(G_FIELDS, result[0])
825
827 """Delete a node group.
828
829 """
830 assert len(self.items) == 1
831 return ({}, {
832 "group_name": self.items[0],
833 "dry_run": self.dryRun(),
834 })
835
838 """/2/groups/[group_name]/modify resource.
839
840 """
841 PUT_OPCODE = opcodes.OpGroupSetParams
842
844 """Changes some parameters of node group.
845
846 """
847 assert self.items
848 return (self.request_body, {
849 "group_name": self.items[0],
850 })
851
854 """/2/groups/[group_name]/rename resource.
855
856 """
857 PUT_OPCODE = opcodes.OpGroupRename
858
860 """Changes the name of a node group.
861
862 """
863 assert len(self.items) == 1
864 return (self.request_body, {
865 "group_name": self.items[0],
866 "dry_run": self.dryRun(),
867 })
868
871 """/2/groups/[group_name]/assign-nodes resource.
872
873 """
874 PUT_OPCODE = opcodes.OpGroupAssignNodes
875
877 """Assigns nodes to a group.
878
879 """
880 assert len(self.items) == 1
881 return (self.request_body, {
882 "group_name": self.items[0],
883 "dry_run": self.dryRun(),
884 "force": self.useForce(),
885 })
886
889 """Convert in place the usb_devices string to the proper format.
890
891 In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from
892 comma to space because commas cannot be accepted on the command line
893 (they already act as the separator between different hvparams). RAPI
894 should be able to accept commas for backwards compatibility, but we want
895 it to also accept the new space separator. Therefore, we convert
896 spaces into commas here and keep the old parsing logic elsewhere.
897
898 """
899 try:
900 hvparams = data["hvparams"]
901 usb_devices = hvparams[constants.HV_USB_DEVICES]
902 hvparams[constants.HV_USB_DEVICES] = usb_devices.replace(" ", ",")
903 data["hvparams"] = hvparams
904 except KeyError:
905 #No usb_devices, no modification required
906 pass
907
910 """/2/instances resource.
911
912 """
913 GET_OPCODE = opcodes.OpInstanceQuery
914 POST_OPCODE = opcodes.OpInstanceCreate
915 POST_RENAME = {
916 "os": "os_type",
917 "name": "instance_name",
918 }
919
921 """Returns a list of all available instances.
922
923 """
924 client = self.GetClient()
925
926 use_locking = self.useLocking()
927 if self.useBulk():
928 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
929 return map(_UpdateBeparams, baserlib.MapBulkFields(bulkdata, I_FIELDS))
930 else:
931 instancesdata = client.QueryInstances([], ["name"], use_locking)
932 instanceslist = [row[0] for row in instancesdata]
933 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
934 uri_fields=("id", "uri"))
935
937 """Create an instance.
938
939 @return: a job id
940
941 """
942 baserlib.CheckType(self.request_body, dict, "Body contents")
943
944 # Default to request data version 0
945 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
946
947 if data_version == 0:
948 raise http.HttpBadRequest("Instance creation request version 0 is no"
949 " longer supported")
950 elif data_version != 1:
951 raise http.HttpBadRequest("Unsupported request data version %s" %
952 data_version)
953
954 data = self.request_body.copy()
955 # Remove "__version__"
956 data.pop(_REQ_DATA_VERSION, None)
957
958 _ConvertUsbDevices(data)
959
960 return (data, {
961 "dry_run": self.dryRun(),
962 })
963
966 """/2/instances-multi-alloc resource.
967
968 """
969 POST_OPCODE = opcodes.OpInstanceMultiAlloc
970
972 """Try to allocate multiple instances.
973
974 @return: A dict with submitted jobs, allocatable instances and failed
975 allocations
976
977 """
978 if "instances" not in self.request_body:
979 raise http.HttpBadRequest("Request is missing required 'instances' field"
980 " in body")
981
982 op_id = {
983 "OP_ID": self.POST_OPCODE.OP_ID, # pylint: disable=E1101
984 }
985 body = objects.FillDict(self.request_body, {
986 "instances": [objects.FillDict(inst, op_id)
987 for inst in self.request_body["instances"]],
988 })
989
990 return (body, {
991 "dry_run": self.dryRun(),
992 })
993
996 """/2/instances/[instance_name] resource.
997
998 """
999 GET_OPCODE = opcodes.OpInstanceQuery
1000 DELETE_OPCODE = opcodes.OpInstanceRemove
1001
1003 """Send information about an instance.
1004
1005 """
1006 client = self.GetClient()
1007 instance_name = self.items[0]
1008
1009 result = baserlib.HandleItemQueryErrors(client.QueryInstances,
1010 names=[instance_name],
1011 fields=I_FIELDS,
1012 use_locking=self.useLocking())
1013
1014 return _UpdateBeparams(baserlib.MapFields(I_FIELDS, result[0]))
1015
1017 """Delete an instance.
1018
1019 """
1020 assert len(self.items) == 1
1021 return ({}, {
1022 "instance_name": self.items[0],
1023 "ignore_failures": False,
1024 "dry_run": self.dryRun(),
1025 })
1026
1029 """/2/instances/[instance_name]/info resource.
1030
1031 """
1032 GET_OPCODE = opcodes.OpInstanceQueryData
1033
1035 """Request detailed instance information.
1036
1037 """
1038 assert len(self.items) == 1
1039 return ({}, {
1040 "instances": [self.items[0]],
1041 "static": bool(self._checkIntVariable("static", default=0)),
1042 })
1043
1046 """/2/instances/[instance_name]/reboot resource.
1047
1048 Implements an instance reboot.
1049
1050 """
1051 POST_OPCODE = opcodes.OpInstanceReboot
1052
1054 """Reboot an instance.
1055
1056 The URI takes type=[hard|soft|full] and
1057 ignore_secondaries=[False|True] parameters.
1058
1059 """
1060 return ({}, {
1061 "instance_name": self.items[0],
1062 "reboot_type":
1063 self.queryargs.get("type", [constants.INSTANCE_REBOOT_HARD])[0],
1064 "ignore_secondaries": bool(self._checkIntVariable("ignore_secondaries")),
1065 "dry_run": self.dryRun(),
1066 })
1067
1070 """/2/instances/[instance_name]/startup resource.
1071
1072 Implements an instance startup.
1073
1074 """
1075 PUT_OPCODE = opcodes.OpInstanceStartup
1076
1078 """Startup an instance.
1079
1080 The URI takes force=[False|True] parameter to start the instance
1081 if even if secondary disks are failing.
1082
1083 """
1084 return ({}, {
1085 "instance_name": self.items[0],
1086 "force": self.useForce(),
1087 "dry_run": self.dryRun(),
1088 "no_remember": bool(self._checkIntVariable("no_remember")),
1089 })
1090
1093 """/2/instances/[instance_name]/shutdown resource.
1094
1095 Implements an instance shutdown.
1096
1097 """
1098 PUT_OPCODE = opcodes.OpInstanceShutdown
1099
1101 """Shutdown an instance.
1102
1103 """
1104 return (self.request_body, {
1105 "instance_name": self.items[0],
1106 "no_remember": bool(self._checkIntVariable("no_remember")),
1107 "dry_run": self.dryRun(),
1108 })
1109
1112 """Parses a request for reinstalling an instance.
1113
1114 """
1115 if not isinstance(data, dict):
1116 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1117
1118 ostype = baserlib.CheckParameter(data, "os", default=None)
1119 start = baserlib.CheckParameter(data, "start", exptype=bool,
1120 default=True)
1121 osparams = baserlib.CheckParameter(data, "osparams", default=None)
1122
1123 ops = [
1124 opcodes.OpInstanceShutdown(instance_name=name),
1125 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
1126 osparams=osparams),
1127 ]
1128
1129 if start:
1130 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
1131
1132 return ops
1133
1136 """/2/instances/[instance_name]/reinstall resource.
1137
1138 Implements an instance reinstall.
1139
1140 """
1141 POST_OPCODE = opcodes.OpInstanceReinstall
1142
1144 """Reinstall an instance.
1145
1146 The URI takes os=name and nostartup=[0|1] optional
1147 parameters. By default, the instance will be started
1148 automatically.
1149
1150 """
1151 if self.request_body:
1152 if self.queryargs:
1153 raise http.HttpBadRequest("Can't combine query and body parameters")
1154
1155 body = self.request_body
1156 elif self.queryargs:
1157 # Legacy interface, do not modify/extend
1158 body = {
1159 "os": self._checkStringVariable("os"),
1160 "start": not self._checkIntVariable("nostartup"),
1161 }
1162 else:
1163 body = {}
1164
1165 ops = _ParseInstanceReinstallRequest(self.items[0], body)
1166
1167 return self.SubmitJob(ops)
1168
1171 """/2/instances/[instance_name]/replace-disks resource.
1172
1173 """
1174 POST_OPCODE = opcodes.OpInstanceReplaceDisks
1175
1177 """Replaces disks on an instance.
1178
1179 """
1180 static = {
1181 "instance_name": self.items[0],
1182 }
1183
1184 if self.request_body:
1185 data = self.request_body
1186 elif self.queryargs:
1187 # Legacy interface, do not modify/extend
1188 data = {
1189 "remote_node": self._checkStringVariable("remote_node", default=None),
1190 "mode": self._checkStringVariable("mode", default=None),
1191 "disks": self._checkStringVariable("disks", default=None),
1192 "iallocator": self._checkStringVariable("iallocator", default=None),
1193 }
1194 else:
1195 data = {}
1196
1197 # Parse disks
1198 try:
1199 raw_disks = data.pop("disks")
1200 except KeyError:
1201 pass
1202 else:
1203 if raw_disks:
1204 if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102
1205 data["disks"] = raw_disks
1206 else:
1207 # Backwards compatibility for strings of the format "1, 2, 3"
1208 try:
1209 data["disks"] = [int(part) for part in raw_disks.split(",")]
1210 except (TypeError, ValueError), err:
1211 raise http.HttpBadRequest("Invalid disk index passed: %s" % err)
1212
1213 return (data, static)
1214
1217 """/2/instances/[instance_name]/activate-disks resource.
1218
1219 """
1220 PUT_OPCODE = opcodes.OpInstanceActivateDisks
1221
1223 """Activate disks for an instance.
1224
1225 The URI might contain ignore_size to ignore current recorded size.
1226
1227 """
1228 return ({}, {
1229 "instance_name": self.items[0],
1230 "ignore_size": bool(self._checkIntVariable("ignore_size")),
1231 })
1232
1235 """/2/instances/[instance_name]/deactivate-disks resource.
1236
1237 """
1238 PUT_OPCODE = opcodes.OpInstanceDeactivateDisks
1239
1247
1250 """/2/instances/[instance_name]/recreate-disks resource.
1251
1252 """
1253 POST_OPCODE = opcodes.OpInstanceRecreateDisks
1254
1262
1265 """/2/instances/[instance_name]/prepare-export resource.
1266
1267 """
1268 PUT_OPCODE = opcodes.OpBackupPrepare
1269
1271 """Prepares an export for an instance.
1272
1273 """
1274 return ({}, {
1275 "instance_name": self.items[0],
1276 "mode": self._checkStringVariable("mode"),
1277 })
1278
1281 """/2/instances/[instance_name]/export resource.
1282
1283 """
1284 PUT_OPCODE = opcodes.OpBackupExport
1285 PUT_RENAME = {
1286 "destination": "target_node",
1287 }
1288
1290 """Exports an instance.
1291
1292 """
1293 return (self.request_body, {
1294 "instance_name": self.items[0],
1295 })
1296
1299 """/2/instances/[instance_name]/migrate resource.
1300
1301 """
1302 PUT_OPCODE = opcodes.OpInstanceMigrate
1303
1305 """Migrates an instance.
1306
1307 """
1308 return (self.request_body, {
1309 "instance_name": self.items[0],
1310 })
1311
1314 """/2/instances/[instance_name]/failover resource.
1315
1316 """
1317 PUT_OPCODE = opcodes.OpInstanceFailover
1318
1320 """Does a failover of an instance.
1321
1322 """
1323 return (self.request_body, {
1324 "instance_name": self.items[0],
1325 })
1326
1329 """/2/instances/[instance_name]/rename resource.
1330
1331 """
1332 PUT_OPCODE = opcodes.OpInstanceRename
1333
1335 """Changes the name of an instance.
1336
1337 """
1338 return (self.request_body, {
1339 "instance_name": self.items[0],
1340 })
1341
1344 """/2/instances/[instance_name]/modify resource.
1345
1346 """
1347 PUT_OPCODE = opcodes.OpInstanceSetParams
1348
1350 """Changes parameters of an instance.
1351
1352 """
1353 data = self.request_body.copy()
1354 _ConvertUsbDevices(data)
1355
1356 return (data, {
1357 "instance_name": self.items[0],
1358 })
1359
1362 """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1363
1364 """
1365 POST_OPCODE = opcodes.OpInstanceGrowDisk
1366
1368 """Increases the size of an instance disk.
1369
1370 """
1371 return (self.request_body, {
1372 "instance_name": self.items[0],
1373 "disk": int(self.items[1]),
1374 })
1375
1378 """/2/instances/[instance_name]/console resource.
1379
1380 """
1381 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
1382 GET_OPCODE = opcodes.OpInstanceConsole
1383
1385 """Request information for connecting to instance's console.
1386
1387 @return: Serialized instance console description, see
1388 L{objects.InstanceConsole}
1389
1390 """
1391 client = self.GetClient()
1392
1393 ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1394
1395 if console is None:
1396 raise http.HttpServiceUnavailable("Instance console unavailable")
1397
1398 assert isinstance(console, dict)
1399 return console
1400
1403 """Tries to extract C{fields} query parameter.
1404
1405 @type args: dictionary
1406 @rtype: list of string
1407 @raise http.HttpBadRequest: When parameter can't be found
1408
1409 """
1410 try:
1411 fields = args["fields"]
1412 except KeyError:
1413 raise http.HttpBadRequest("Missing 'fields' query argument")
1414
1415 return _SplitQueryFields(fields[0])
1416
1419 """Splits fields as given for a query request.
1420
1421 @type fields: string
1422 @rtype: list of string
1423
1424 """
1425 return [i.strip() for i in fields.split(",")]
1426
1429 """/2/query/[resource] resource.
1430
1431 """
1432 # Results might contain sensitive information
1433 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ]
1434 PUT_ACCESS = GET_ACCESS
1435 GET_OPCODE = opcodes.OpQuery
1436 PUT_OPCODE = opcodes.OpQuery
1437
1440
1442 """Returns resource information.
1443
1444 @return: Query result, see L{objects.QueryResponse}
1445
1446 """
1447 return self._Query(_GetQueryFields(self.queryargs), None)
1448
1450 """Submits job querying for resources.
1451
1452 @return: Query result, see L{objects.QueryResponse}
1453
1454 """
1455 body = self.request_body
1456
1457 baserlib.CheckType(body, dict, "Body contents")
1458
1459 try:
1460 fields = body["fields"]
1461 except KeyError:
1462 fields = _GetQueryFields(self.queryargs)
1463
1464 qfilter = body.get("qfilter", None)
1465 # TODO: remove this after 2.7
1466 if qfilter is None:
1467 qfilter = body.get("filter", None)
1468
1469 return self._Query(fields, qfilter)
1470
1473 """/2/query/[resource]/fields resource.
1474
1475 """
1476 GET_OPCODE = opcodes.OpQueryFields
1477
1479 """Retrieves list of available fields for a resource.
1480
1481 @return: List of serialized L{objects.QueryFieldDefinition}
1482
1483 """
1484 try:
1485 raw_fields = self.queryargs["fields"]
1486 except KeyError:
1487 fields = None
1488 else:
1489 fields = _SplitQueryFields(raw_fields[0])
1490
1491 return self.GetClient().QueryFields(self.items[0], fields).ToDict()
1492
1495 """Quasiclass for tagging resources.
1496
1497 Manages tags. When inheriting this class you must define the
1498 TAG_LEVEL for it.
1499
1500 """
1501 TAG_LEVEL = None
1502 GET_OPCODE = opcodes.OpTagsGet
1503 PUT_OPCODE = opcodes.OpTagsSet
1504 DELETE_OPCODE = opcodes.OpTagsDel
1505
1507 """A tag resource constructor.
1508
1509 We have to override the default to sort out cluster naming case.
1510
1511 """
1512 baserlib.OpcodeResource.__init__(self, items, queryargs, req, **kwargs)
1513
1514 if self.TAG_LEVEL == constants.TAG_CLUSTER:
1515 self.name = None
1516 else:
1517 self.name = items[0]
1518
1520 """Returns a list of tags.
1521
1522 Example: ["tag1", "tag2", "tag3"]
1523
1524 """
1525 kind = self.TAG_LEVEL
1526
1527 if kind in (constants.TAG_INSTANCE,
1528 constants.TAG_NODEGROUP,
1529 constants.TAG_NODE,
1530 constants.TAG_NETWORK):
1531 if not self.name:
1532 raise http.HttpBadRequest("Missing name on tag request")
1533
1534 cl = self.GetClient(query=True)
1535 tags = list(cl.QueryTags(kind, self.name))
1536
1537 elif kind == constants.TAG_CLUSTER:
1538 assert not self.name
1539 # TODO: Use query API?
1540 ssc = ssconf.SimpleStore()
1541 tags = ssc.GetClusterTags()
1542
1543 else:
1544 raise http.HttpBadRequest("Unhandled tag type!")
1545
1546 return list(tags)
1547
1549 """Add a set of tags.
1550
1551 The request as a list of strings should be PUT to this URI. And
1552 you'll have back a job id.
1553
1554 """
1555 return ({}, {
1556 "kind": self.TAG_LEVEL,
1557 "name": self.name,
1558 "tags": self.queryargs.get("tag", []),
1559 "dry_run": self.dryRun(),
1560 })
1561
1563 """Delete a tag.
1564
1565 In order to delete a set of tags, the DELETE
1566 request should be addressed to URI like:
1567 /tags?tag=[tag]&tag=[tag]
1568
1569 """
1570 # Re-use code
1571 return self.GetPutOpInput()
1572
1581
1590
1599
1608
1617
| Trees | Indices | Help |
|
|---|
| Generated by Epydoc 3.0.1 on Fri Jul 4 09:39:10 2014 | http://epydoc.sourceforge.net |