| Trees | Indices | Help |
|
|---|
|
|
1 #
2 #
3
4 # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc.
5 #
6 # This program is free software; you can redistribute it and/or modify
7 # it under the terms of the GNU General Public License as published by
8 # the Free Software Foundation; either version 2 of the License, or
9 # (at your option) any later version.
10 #
11 # This program is distributed in the hope that it will be useful, but
12 # WITHOUT ANY WARRANTY; without even the implied warranty of
13 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 # General Public License for more details.
15 #
16 # You should have received a copy of the GNU General Public License
17 # along with this program; if not, write to the Free Software
18 # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 # 02110-1301, USA.
20
21
22 """Remote API resource implementations.
23
24 PUT or POST?
25 ============
26
27 According to RFC2616 the main difference between PUT and POST is that
28 POST can create new resources but PUT can only create the resource the
29 URI was pointing to on the PUT request.
30
31 In the context of this module POST on ``/2/instances`` to change an existing
32 entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a
33 new instance) with a name specified in the request.
34
35 Quoting from RFC2616, section 9.6::
36
37 The fundamental difference between the POST and PUT requests is reflected in
38 the different meaning of the Request-URI. The URI in a POST request
39 identifies the resource that will handle the enclosed entity. That resource
40 might be a data-accepting process, a gateway to some other protocol, or a
41 separate entity that accepts annotations. In contrast, the URI in a PUT
42 request identifies the entity enclosed with the request -- the user agent
43 knows what URI is intended and the server MUST NOT attempt to apply the
44 request to some other resource. If the server desires that the request be
45 applied to a different URI, it MUST send a 301 (Moved Permanently) response;
46 the user agent MAY then make its own decision regarding whether or not to
47 redirect the request.
48
49 So when adding new methods, if they are operating on the URI entity itself,
50 PUT should be prefered over POST.
51
52 """
53
54 # pylint: disable=C0103
55
56 # C0103: Invalid name, since the R_* names are not conforming
57
58 from ganeti import opcodes
59 from ganeti import http
60 from ganeti import constants
61 from ganeti import cli
62 from ganeti import rapi
63 from ganeti import ht
64 from ganeti import compat
65 from ganeti.rapi import baserlib
66
67
68 _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"]
69 I_FIELDS = ["name", "admin_state", "os",
70 "pnode", "snodes",
71 "disk_template",
72 "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges",
73 "network_port",
74 "disk.sizes", "disk_usage",
75 "beparams", "hvparams",
76 "oper_state", "oper_ram", "oper_vcpus", "status",
77 "custom_hvparams", "custom_beparams", "custom_nicparams",
78 ] + _COMMON_FIELDS
79
80 N_FIELDS = ["name", "offline", "master_candidate", "drained",
81 "dtotal", "dfree",
82 "mtotal", "mnode", "mfree",
83 "pinst_cnt", "sinst_cnt",
84 "ctotal", "cnodes", "csockets",
85 "pip", "sip", "role",
86 "pinst_list", "sinst_list",
87 "master_capable", "vm_capable",
88 "group.uuid",
89 ] + _COMMON_FIELDS
90
91 G_FIELDS = [
92 "alloc_policy",
93 "name",
94 "node_cnt",
95 "node_list",
96 ] + _COMMON_FIELDS
97
98 J_FIELDS_BULK = [
99 "id", "ops", "status", "summary",
100 "opstatus",
101 "received_ts", "start_ts", "end_ts",
102 ]
103
104 J_FIELDS = J_FIELDS_BULK + [
105 "oplog",
106 "opresult",
107 ]
108
109 _NR_DRAINED = "drained"
110 _NR_MASTER_CANDIATE = "master-candidate"
111 _NR_MASTER = "master"
112 _NR_OFFLINE = "offline"
113 _NR_REGULAR = "regular"
114
115 _NR_MAP = {
116 constants.NR_MASTER: _NR_MASTER,
117 constants.NR_MCANDIDATE: _NR_MASTER_CANDIATE,
118 constants.NR_DRAINED: _NR_DRAINED,
119 constants.NR_OFFLINE: _NR_OFFLINE,
120 constants.NR_REGULAR: _NR_REGULAR,
121 }
122
123 assert frozenset(_NR_MAP.keys()) == constants.NR_ALL
124
125 # Request data version field
126 _REQ_DATA_VERSION = "__version__"
127
128 # Feature string for instance creation request data version 1
129 _INST_CREATE_REQV1 = "instance-create-reqv1"
130
131 # Feature string for instance reinstall request version 1
132 _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1"
133
134 # Feature string for node migration version 1
135 _NODE_MIGRATE_REQV1 = "node-migrate-reqv1"
136
137 # Feature string for node evacuation with LU-generated jobs
138 _NODE_EVAC_RES1 = "node-evac-res1"
139
140 ALL_FEATURES = frozenset([
141 _INST_CREATE_REQV1,
142 _INST_REINSTALL_REQV1,
143 _NODE_MIGRATE_REQV1,
144 _NODE_EVAC_RES1,
145 ])
146
147 # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change.
148 _WFJC_TIMEOUT = 10
152 """/version resource.
153
154 This resource should be used to determine the remote API version and
155 to adapt clients accordingly.
156
157 """
158 @staticmethod
164
167 """/2/info resource.
168
169 """
170 @staticmethod
172 """Returns cluster information.
173
174 """
175 client = baserlib.GetClient()
176 return client.QueryClusterInfo()
177
180 """/2/features resource.
181
182 """
183 @staticmethod
185 """Returns list of optional RAPI features implemented.
186
187 """
188 return list(ALL_FEATURES)
189
192 """/2/os resource.
193
194 """
195 @staticmethod
197 """Return a list of all OSes.
198
199 Can return error 500 in case of a problem.
200
201 Example: ["debian-etch"]
202
203 """
204 cl = baserlib.GetClient()
205 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[])
206 job_id = baserlib.SubmitJob([op], cl)
207 # we use custom feedback function, instead of print we log the status
208 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn)
209 diagnose_data = result[0]
210
211 if not isinstance(diagnose_data, list):
212 raise http.HttpBadGateway(message="Can't get OS list")
213
214 os_names = []
215 for (name, variants) in diagnose_data:
216 os_names.extend(cli.CalculateOSNames(name, variants))
217
218 return os_names
219
222 """/2/redistribute-config resource.
223
224 """
225 @staticmethod
227 """Redistribute configuration to all nodes.
228
229 """
230 return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
231
234 """/2/modify resource.
235
236 """
238 """Modifies cluster parameters.
239
240 @return: a job id
241
242 """
243 op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body,
244 None)
245
246 return baserlib.SubmitJob([op])
247
250 """/2/jobs resource.
251
252 """
254 """Returns a dictionary of jobs.
255
256 @return: a dictionary with jobs id and uri.
257
258 """
259 client = baserlib.GetClient()
260
261 if self.useBulk():
262 bulkdata = client.QueryJobs(None, J_FIELDS_BULK)
263 return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK)
264 else:
265 jobdata = map(compat.fst, client.QueryJobs(None, ["id"]))
266 return baserlib.BuildUriList(jobdata, "/2/jobs/%s",
267 uri_fields=("id", "uri"))
268
271 """/2/jobs/[job_id] resource.
272
273 """
275 """Returns a job status.
276
277 @return: a dictionary with job parameters.
278 The result includes:
279 - id: job ID as a number
280 - status: current job status as a string
281 - ops: involved OpCodes as a list of dictionaries for each
282 opcodes in the job
283 - opstatus: OpCodes status as a list
284 - opresult: OpCodes results as a list of lists
285
286 """
287 job_id = self.items[0]
288 result = baserlib.GetClient().QueryJobs([job_id, ], J_FIELDS)[0]
289 if result is None:
290 raise http.HttpNotFound()
291 return baserlib.MapFields(J_FIELDS, result)
292
300
303 """/2/jobs/[job_id]/wait resource.
304
305 """
306 # WaitForJobChange provides access to sensitive information and blocks
307 # machine resources (it's a blocking RAPI call), hence restricting access.
308 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
309
311 """Waits for job changes.
312
313 """
314 job_id = self.items[0]
315
316 fields = self.getBodyParameter("fields")
317 prev_job_info = self.getBodyParameter("previous_job_info", None)
318 prev_log_serial = self.getBodyParameter("previous_log_serial", None)
319
320 if not isinstance(fields, list):
321 raise http.HttpBadRequest("The 'fields' parameter should be a list")
322
323 if not (prev_job_info is None or isinstance(prev_job_info, list)):
324 raise http.HttpBadRequest("The 'previous_job_info' parameter should"
325 " be a list")
326
327 if not (prev_log_serial is None or
328 isinstance(prev_log_serial, (int, long))):
329 raise http.HttpBadRequest("The 'previous_log_serial' parameter should"
330 " be a number")
331
332 client = baserlib.GetClient()
333 result = client.WaitForJobChangeOnce(job_id, fields,
334 prev_job_info, prev_log_serial,
335 timeout=_WFJC_TIMEOUT)
336 if not result:
337 raise http.HttpNotFound()
338
339 if result == constants.JOB_NOTCHANGED:
340 # No changes
341 return None
342
343 (job_info, log_entries) = result
344
345 return {
346 "job_info": job_info,
347 "log_entries": log_entries,
348 }
349
352 """/2/nodes resource.
353
354 """
356 """Returns a list of all nodes.
357
358 """
359 client = baserlib.GetClient()
360
361 if self.useBulk():
362 bulkdata = client.QueryNodes([], N_FIELDS, False)
363 return baserlib.MapBulkFields(bulkdata, N_FIELDS)
364 else:
365 nodesdata = client.QueryNodes([], ["name"], False)
366 nodeslist = [row[0] for row in nodesdata]
367 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s",
368 uri_fields=("id", "uri"))
369
372 """/2/nodes/[node_name] resource.
373
374 """
376 """Send information about a node.
377
378 """
379 node_name = self.items[0]
380 client = baserlib.GetClient()
381
382 result = baserlib.HandleItemQueryErrors(client.QueryNodes,
383 names=[node_name], fields=N_FIELDS,
384 use_locking=self.useLocking())
385
386 return baserlib.MapFields(N_FIELDS, result[0])
387
390 """ /2/nodes/[node_name]/role resource.
391
392 """
394 """Returns the current node role.
395
396 @return: Node role
397
398 """
399 node_name = self.items[0]
400 client = baserlib.GetClient()
401 result = client.QueryNodes(names=[node_name], fields=["role"],
402 use_locking=self.useLocking())
403
404 return _NR_MAP[result[0][0]]
405
407 """Sets the node role.
408
409 @return: a job id
410
411 """
412 if not isinstance(self.request_body, basestring):
413 raise http.HttpBadRequest("Invalid body contents, not a string")
414
415 node_name = self.items[0]
416 role = self.request_body
417
418 if role == _NR_REGULAR:
419 candidate = False
420 offline = False
421 drained = False
422
423 elif role == _NR_MASTER_CANDIATE:
424 candidate = True
425 offline = drained = None
426
427 elif role == _NR_DRAINED:
428 drained = True
429 candidate = offline = None
430
431 elif role == _NR_OFFLINE:
432 offline = True
433 candidate = drained = None
434
435 else:
436 raise http.HttpBadRequest("Can't set '%s' role" % role)
437
438 op = opcodes.OpNodeSetParams(node_name=node_name,
439 master_candidate=candidate,
440 offline=offline,
441 drained=drained,
442 force=bool(self.useForce()))
443
444 return baserlib.SubmitJob([op])
445
448 """/2/nodes/[node_name]/evacuate resource.
449
450 """
452 """Evacuate all instances off a node.
453
454 """
455 op = baserlib.FillOpcode(opcodes.OpNodeEvacuate, self.request_body, {
456 "node_name": self.items[0],
457 "dry_run": self.dryRun(),
458 })
459
460 return baserlib.SubmitJob([op])
461
464 """/2/nodes/[node_name]/migrate resource.
465
466 """
468 """Migrate all primary instances from a node.
469
470 """
471 node_name = self.items[0]
472
473 if self.queryargs:
474 # Support old-style requests
475 if "live" in self.queryargs and "mode" in self.queryargs:
476 raise http.HttpBadRequest("Only one of 'live' and 'mode' should"
477 " be passed")
478
479 if "live" in self.queryargs:
480 if self._checkIntVariable("live", default=1):
481 mode = constants.HT_MIGRATION_LIVE
482 else:
483 mode = constants.HT_MIGRATION_NONLIVE
484 else:
485 mode = self._checkStringVariable("mode", default=None)
486
487 data = {
488 "mode": mode,
489 }
490 else:
491 data = self.request_body
492
493 op = baserlib.FillOpcode(opcodes.OpNodeMigrate, data, {
494 "node_name": node_name,
495 })
496
497 return baserlib.SubmitJob([op])
498
501 """/2/nodes/[node_name]/storage resource.
502
503 """
504 # LUNodeQueryStorage acquires locks, hence restricting access to GET
505 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
506
508 node_name = self.items[0]
509
510 storage_type = self._checkStringVariable("storage_type", None)
511 if not storage_type:
512 raise http.HttpBadRequest("Missing the required 'storage_type'"
513 " parameter")
514
515 output_fields = self._checkStringVariable("output_fields", None)
516 if not output_fields:
517 raise http.HttpBadRequest("Missing the required 'output_fields'"
518 " parameter")
519
520 op = opcodes.OpNodeQueryStorage(nodes=[node_name],
521 storage_type=storage_type,
522 output_fields=output_fields.split(","))
523 return baserlib.SubmitJob([op])
524
527 """/2/nodes/[node_name]/storage/modify resource.
528
529 """
531 node_name = self.items[0]
532
533 storage_type = self._checkStringVariable("storage_type", None)
534 if not storage_type:
535 raise http.HttpBadRequest("Missing the required 'storage_type'"
536 " parameter")
537
538 name = self._checkStringVariable("name", None)
539 if not name:
540 raise http.HttpBadRequest("Missing the required 'name'"
541 " parameter")
542
543 changes = {}
544
545 if "allocatable" in self.queryargs:
546 changes[constants.SF_ALLOCATABLE] = \
547 bool(self._checkIntVariable("allocatable", default=1))
548
549 op = opcodes.OpNodeModifyStorage(node_name=node_name,
550 storage_type=storage_type,
551 name=name,
552 changes=changes)
553 return baserlib.SubmitJob([op])
554
557 """/2/nodes/[node_name]/storage/repair resource.
558
559 """
561 node_name = self.items[0]
562
563 storage_type = self._checkStringVariable("storage_type", None)
564 if not storage_type:
565 raise http.HttpBadRequest("Missing the required 'storage_type'"
566 " parameter")
567
568 name = self._checkStringVariable("name", None)
569 if not name:
570 raise http.HttpBadRequest("Missing the required 'name'"
571 " parameter")
572
573 op = opcodes.OpRepairNodeStorage(node_name=node_name,
574 storage_type=storage_type,
575 name=name)
576 return baserlib.SubmitJob([op])
577
580 """Parses a request for creating a node group.
581
582 @rtype: L{opcodes.OpGroupAdd}
583 @return: Group creation opcode
584
585 """
586 override = {
587 "dry_run": dry_run,
588 }
589
590 rename = {
591 "name": "group_name",
592 }
593
594 return baserlib.FillOpcode(opcodes.OpGroupAdd, data, override,
595 rename=rename)
596
599 """/2/groups resource.
600
601 """
603 """Returns a list of all node groups.
604
605 """
606 client = baserlib.GetClient()
607
608 if self.useBulk():
609 bulkdata = client.QueryGroups([], G_FIELDS, False)
610 return baserlib.MapBulkFields(bulkdata, G_FIELDS)
611 else:
612 data = client.QueryGroups([], ["name"], False)
613 groupnames = [row[0] for row in data]
614 return baserlib.BuildUriList(groupnames, "/2/groups/%s",
615 uri_fields=("name", "uri"))
616
618 """Create a node group.
619
620 @return: a job id
621
622 """
623 baserlib.CheckType(self.request_body, dict, "Body contents")
624 op = _ParseCreateGroupRequest(self.request_body, self.dryRun())
625 return baserlib.SubmitJob([op])
626
629 """/2/groups/[group_name] resource.
630
631 """
633 """Send information about a node group.
634
635 """
636 group_name = self.items[0]
637 client = baserlib.GetClient()
638
639 result = baserlib.HandleItemQueryErrors(client.QueryGroups,
640 names=[group_name], fields=G_FIELDS,
641 use_locking=self.useLocking())
642
643 return baserlib.MapFields(G_FIELDS, result[0])
644
646 """Delete a node group.
647
648 """
649 op = opcodes.OpGroupRemove(group_name=self.items[0],
650 dry_run=bool(self.dryRun()))
651
652 return baserlib.SubmitJob([op])
653
656 """Parses a request for modifying a node group.
657
658 @rtype: L{opcodes.OpGroupSetParams}
659 @return: Group modify opcode
660
661 """
662 return baserlib.FillOpcode(opcodes.OpGroupSetParams, data, {
663 "group_name": name,
664 })
665
668 """/2/groups/[group_name]/modify resource.
669
670 """
672 """Changes some parameters of node group.
673
674 @return: a job id
675
676 """
677 baserlib.CheckType(self.request_body, dict, "Body contents")
678
679 op = _ParseModifyGroupRequest(self.items[0], self.request_body)
680
681 return baserlib.SubmitJob([op])
682
685 """Parses a request for renaming a node group.
686
687 @type name: string
688 @param name: name of the node group to rename
689 @type data: dict
690 @param data: the body received by the rename request
691 @type dry_run: bool
692 @param dry_run: whether to perform a dry run
693
694 @rtype: L{opcodes.OpGroupRename}
695 @return: Node group rename opcode
696
697 """
698 return baserlib.FillOpcode(opcodes.OpGroupRename, data, {
699 "group_name": name,
700 "dry_run": dry_run,
701 })
702
705 """/2/groups/[group_name]/rename resource.
706
707 """
709 """Changes the name of a node group.
710
711 @return: a job id
712
713 """
714 baserlib.CheckType(self.request_body, dict, "Body contents")
715 op = _ParseRenameGroupRequest(self.items[0], self.request_body,
716 self.dryRun())
717 return baserlib.SubmitJob([op])
718
721 """/2/groups/[group_name]/assign-nodes resource.
722
723 """
725 """Assigns nodes to a group.
726
727 @return: a job id
728
729 """
730 op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, {
731 "group_name": self.items[0],
732 "dry_run": self.dryRun(),
733 "force": self.useForce(),
734 })
735
736 return baserlib.SubmitJob([op])
737
740 """Parses an instance creation request version 1.
741
742 @rtype: L{opcodes.OpInstanceCreate}
743 @return: Instance creation opcode
744
745 """
746 override = {
747 "dry_run": dry_run,
748 }
749
750 rename = {
751 "os": "os_type",
752 "name": "instance_name",
753 }
754
755 return baserlib.FillOpcode(opcodes.OpInstanceCreate, data, override,
756 rename=rename)
757
760 """/2/instances resource.
761
762 """
764 """Returns a list of all available instances.
765
766 """
767 client = baserlib.GetClient()
768
769 use_locking = self.useLocking()
770 if self.useBulk():
771 bulkdata = client.QueryInstances([], I_FIELDS, use_locking)
772 return baserlib.MapBulkFields(bulkdata, I_FIELDS)
773 else:
774 instancesdata = client.QueryInstances([], ["name"], use_locking)
775 instanceslist = [row[0] for row in instancesdata]
776 return baserlib.BuildUriList(instanceslist, "/2/instances/%s",
777 uri_fields=("id", "uri"))
778
780 """Create an instance.
781
782 @return: a job id
783
784 """
785 if not isinstance(self.request_body, dict):
786 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
787
788 # Default to request data version 0
789 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0)
790
791 if data_version == 0:
792 raise http.HttpBadRequest("Instance creation request version 0 is no"
793 " longer supported")
794 elif data_version == 1:
795 data = self.request_body.copy()
796 # Remove "__version__"
797 data.pop(_REQ_DATA_VERSION, None)
798 op = _ParseInstanceCreateRequestVersion1(data, self.dryRun())
799 else:
800 raise http.HttpBadRequest("Unsupported request data version %s" %
801 data_version)
802
803 return baserlib.SubmitJob([op])
804
807 """/2/instances/[instance_name] resource.
808
809 """
811 """Send information about an instance.
812
813 """
814 client = baserlib.GetClient()
815 instance_name = self.items[0]
816
817 result = baserlib.HandleItemQueryErrors(client.QueryInstances,
818 names=[instance_name],
819 fields=I_FIELDS,
820 use_locking=self.useLocking())
821
822 return baserlib.MapFields(I_FIELDS, result[0])
823
825 """Delete an instance.
826
827 """
828 op = opcodes.OpInstanceRemove(instance_name=self.items[0],
829 ignore_failures=False,
830 dry_run=bool(self.dryRun()))
831 return baserlib.SubmitJob([op])
832
835 """/2/instances/[instance_name]/info resource.
836
837 """
839 """Request detailed instance information.
840
841 """
842 instance_name = self.items[0]
843 static = bool(self._checkIntVariable("static", default=0))
844
845 op = opcodes.OpInstanceQueryData(instances=[instance_name],
846 static=static)
847 return baserlib.SubmitJob([op])
848
851 """/2/instances/[instance_name]/reboot resource.
852
853 Implements an instance reboot.
854
855 """
857 """Reboot an instance.
858
859 The URI takes type=[hard|soft|full] and
860 ignore_secondaries=[False|True] parameters.
861
862 """
863 instance_name = self.items[0]
864 reboot_type = self.queryargs.get("type",
865 [constants.INSTANCE_REBOOT_HARD])[0]
866 ignore_secondaries = bool(self._checkIntVariable("ignore_secondaries"))
867 op = opcodes.OpInstanceReboot(instance_name=instance_name,
868 reboot_type=reboot_type,
869 ignore_secondaries=ignore_secondaries,
870 dry_run=bool(self.dryRun()))
871
872 return baserlib.SubmitJob([op])
873
876 """/2/instances/[instance_name]/startup resource.
877
878 Implements an instance startup.
879
880 """
882 """Startup an instance.
883
884 The URI takes force=[False|True] parameter to start the instance
885 if even if secondary disks are failing.
886
887 """
888 instance_name = self.items[0]
889 force_startup = bool(self._checkIntVariable("force"))
890 no_remember = bool(self._checkIntVariable("no_remember"))
891 op = opcodes.OpInstanceStartup(instance_name=instance_name,
892 force=force_startup,
893 dry_run=bool(self.dryRun()),
894 no_remember=no_remember)
895
896 return baserlib.SubmitJob([op])
897
900 """Parses a request for an instance shutdown.
901
902 @rtype: L{opcodes.OpInstanceShutdown}
903 @return: Instance shutdown opcode
904
905 """
906 return baserlib.FillOpcode(opcodes.OpInstanceShutdown, data, {
907 "instance_name": name,
908 "dry_run": dry_run,
909 "no_remember": no_remember,
910 })
911
914 """/2/instances/[instance_name]/shutdown resource.
915
916 Implements an instance shutdown.
917
918 """
920 """Shutdown an instance.
921
922 @return: a job id
923
924 """
925 no_remember = bool(self._checkIntVariable("no_remember"))
926 op = _ParseShutdownInstanceRequest(self.items[0], self.request_body,
927 bool(self.dryRun()), no_remember)
928
929 return baserlib.SubmitJob([op])
930
933 """Parses a request for reinstalling an instance.
934
935 """
936 if not isinstance(data, dict):
937 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
938
939 ostype = baserlib.CheckParameter(data, "os", default=None)
940 start = baserlib.CheckParameter(data, "start", exptype=bool,
941 default=True)
942 osparams = baserlib.CheckParameter(data, "osparams", default=None)
943
944 ops = [
945 opcodes.OpInstanceShutdown(instance_name=name),
946 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype,
947 osparams=osparams),
948 ]
949
950 if start:
951 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False))
952
953 return ops
954
957 """/2/instances/[instance_name]/reinstall resource.
958
959 Implements an instance reinstall.
960
961 """
963 """Reinstall an instance.
964
965 The URI takes os=name and nostartup=[0|1] optional
966 parameters. By default, the instance will be started
967 automatically.
968
969 """
970 if self.request_body:
971 if self.queryargs:
972 raise http.HttpBadRequest("Can't combine query and body parameters")
973
974 body = self.request_body
975 elif self.queryargs:
976 # Legacy interface, do not modify/extend
977 body = {
978 "os": self._checkStringVariable("os"),
979 "start": not self._checkIntVariable("nostartup"),
980 }
981 else:
982 body = {}
983
984 ops = _ParseInstanceReinstallRequest(self.items[0], body)
985
986 return baserlib.SubmitJob(ops)
987
990 """Parses a request for an instance export.
991
992 @rtype: L{opcodes.OpInstanceReplaceDisks}
993 @return: Instance export opcode
994
995 """
996 override = {
997 "instance_name": name,
998 }
999
1000 # Parse disks
1001 try:
1002 raw_disks = data.pop("disks")
1003 except KeyError:
1004 pass
1005 else:
1006 if raw_disks:
1007 if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102
1008 data["disks"] = raw_disks
1009 else:
1010 # Backwards compatibility for strings of the format "1, 2, 3"
1011 try:
1012 data["disks"] = [int(part) for part in raw_disks.split(",")]
1013 except (TypeError, ValueError), err:
1014 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err))
1015
1016 return baserlib.FillOpcode(opcodes.OpInstanceReplaceDisks, data, override)
1017
1020 """/2/instances/[instance_name]/replace-disks resource.
1021
1022 """
1024 """Replaces disks on an instance.
1025
1026 """
1027 if self.request_body:
1028 body = self.request_body
1029 elif self.queryargs:
1030 # Legacy interface, do not modify/extend
1031 body = {
1032 "remote_node": self._checkStringVariable("remote_node", default=None),
1033 "mode": self._checkStringVariable("mode", default=None),
1034 "disks": self._checkStringVariable("disks", default=None),
1035 "iallocator": self._checkStringVariable("iallocator", default=None),
1036 }
1037 else:
1038 body = {}
1039
1040 op = _ParseInstanceReplaceDisksRequest(self.items[0], body)
1041
1042 return baserlib.SubmitJob([op])
1043
1046 """/2/instances/[instance_name]/activate-disks resource.
1047
1048 """
1050 """Activate disks for an instance.
1051
1052 The URI might contain ignore_size to ignore current recorded size.
1053
1054 """
1055 instance_name = self.items[0]
1056 ignore_size = bool(self._checkIntVariable("ignore_size"))
1057
1058 op = opcodes.OpInstanceActivateDisks(instance_name=instance_name,
1059 ignore_size=ignore_size)
1060
1061 return baserlib.SubmitJob([op])
1062
1065 """/2/instances/[instance_name]/deactivate-disks resource.
1066
1067 """
1069 """Deactivate disks for an instance.
1070
1071 """
1072 instance_name = self.items[0]
1073
1074 op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name)
1075
1076 return baserlib.SubmitJob([op])
1077
1080 """/2/instances/[instance_name]/prepare-export resource.
1081
1082 """
1084 """Prepares an export for an instance.
1085
1086 @return: a job id
1087
1088 """
1089 instance_name = self.items[0]
1090 mode = self._checkStringVariable("mode")
1091
1092 op = opcodes.OpBackupPrepare(instance_name=instance_name,
1093 mode=mode)
1094
1095 return baserlib.SubmitJob([op])
1096
1099 """Parses a request for an instance export.
1100
1101 @rtype: L{opcodes.OpBackupExport}
1102 @return: Instance export opcode
1103
1104 """
1105 # Rename "destination" to "target_node"
1106 try:
1107 data["target_node"] = data.pop("destination")
1108 except KeyError:
1109 pass
1110
1111 return baserlib.FillOpcode(opcodes.OpBackupExport, data, {
1112 "instance_name": name,
1113 })
1114
1117 """/2/instances/[instance_name]/export resource.
1118
1119 """
1121 """Exports an instance.
1122
1123 @return: a job id
1124
1125 """
1126 if not isinstance(self.request_body, dict):
1127 raise http.HttpBadRequest("Invalid body contents, not a dictionary")
1128
1129 op = _ParseExportInstanceRequest(self.items[0], self.request_body)
1130
1131 return baserlib.SubmitJob([op])
1132
1135 """Parses a request for an instance migration.
1136
1137 @rtype: L{opcodes.OpInstanceMigrate}
1138 @return: Instance migration opcode
1139
1140 """
1141 return baserlib.FillOpcode(opcodes.OpInstanceMigrate, data, {
1142 "instance_name": name,
1143 })
1144
1147 """/2/instances/[instance_name]/migrate resource.
1148
1149 """
1151 """Migrates an instance.
1152
1153 @return: a job id
1154
1155 """
1156 baserlib.CheckType(self.request_body, dict, "Body contents")
1157
1158 op = _ParseMigrateInstanceRequest(self.items[0], self.request_body)
1159
1160 return baserlib.SubmitJob([op])
1161
1164 """/2/instances/[instance_name]/failover resource.
1165
1166 """
1168 """Does a failover of an instance.
1169
1170 @return: a job id
1171
1172 """
1173 baserlib.CheckType(self.request_body, dict, "Body contents")
1174
1175 op = baserlib.FillOpcode(opcodes.OpInstanceFailover, self.request_body, {
1176 "instance_name": self.items[0],
1177 })
1178
1179 return baserlib.SubmitJob([op])
1180
1183 """Parses a request for renaming an instance.
1184
1185 @rtype: L{opcodes.OpInstanceRename}
1186 @return: Instance rename opcode
1187
1188 """
1189 return baserlib.FillOpcode(opcodes.OpInstanceRename, data, {
1190 "instance_name": name,
1191 })
1192
1195 """/2/instances/[instance_name]/rename resource.
1196
1197 """
1199 """Changes the name of an instance.
1200
1201 @return: a job id
1202
1203 """
1204 baserlib.CheckType(self.request_body, dict, "Body contents")
1205
1206 op = _ParseRenameInstanceRequest(self.items[0], self.request_body)
1207
1208 return baserlib.SubmitJob([op])
1209
1212 """Parses a request for modifying an instance.
1213
1214 @rtype: L{opcodes.OpInstanceSetParams}
1215 @return: Instance modify opcode
1216
1217 """
1218 return baserlib.FillOpcode(opcodes.OpInstanceSetParams, data, {
1219 "instance_name": name,
1220 })
1221
1224 """/2/instances/[instance_name]/modify resource.
1225
1226 """
1228 """Changes some parameters of an instance.
1229
1230 @return: a job id
1231
1232 """
1233 baserlib.CheckType(self.request_body, dict, "Body contents")
1234
1235 op = _ParseModifyInstanceRequest(self.items[0], self.request_body)
1236
1237 return baserlib.SubmitJob([op])
1238
1241 """/2/instances/[instance_name]/disk/[disk_index]/grow resource.
1242
1243 """
1245 """Increases the size of an instance disk.
1246
1247 @return: a job id
1248
1249 """
1250 op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, {
1251 "instance_name": self.items[0],
1252 "disk": int(self.items[1]),
1253 })
1254
1255 return baserlib.SubmitJob([op])
1256
1259 """/2/instances/[instance_name]/console resource.
1260
1261 """
1262 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1263
1265 """Request information for connecting to instance's console.
1266
1267 @return: Serialized instance console description, see
1268 L{objects.InstanceConsole}
1269
1270 """
1271 client = baserlib.GetClient()
1272
1273 ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False)
1274
1275 if console is None:
1276 raise http.HttpServiceUnavailable("Instance console unavailable")
1277
1278 assert isinstance(console, dict)
1279 return console
1280
1283 """
1284
1285 """
1286 try:
1287 fields = args["fields"]
1288 except KeyError:
1289 raise http.HttpBadRequest("Missing 'fields' query argument")
1290
1291 return _SplitQueryFields(fields[0])
1292
1299
1302 """/2/query/[resource] resource.
1303
1304 """
1305 # Results might contain sensitive information
1306 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE]
1307
1310
1312 """Returns resource information.
1313
1314 @return: Query result, see L{objects.QueryResponse}
1315
1316 """
1317 return self._Query(_GetQueryFields(self.queryargs), None)
1318
1320 """Submits job querying for resources.
1321
1322 @return: Query result, see L{objects.QueryResponse}
1323
1324 """
1325 body = self.request_body
1326
1327 baserlib.CheckType(body, dict, "Body contents")
1328
1329 try:
1330 fields = body["fields"]
1331 except KeyError:
1332 fields = _GetQueryFields(self.queryargs)
1333
1334 return self._Query(fields, self.request_body.get("filter", None))
1335
1338 """/2/query/[resource]/fields resource.
1339
1340 """
1342 """Retrieves list of available fields for a resource.
1343
1344 @return: List of serialized L{objects.QueryFieldDefinition}
1345
1346 """
1347 try:
1348 raw_fields = self.queryargs["fields"]
1349 except KeyError:
1350 fields = None
1351 else:
1352 fields = _SplitQueryFields(raw_fields[0])
1353
1354 return baserlib.GetClient().QueryFields(self.items[0], fields).ToDict()
1355
1358 """ Quasiclass for tagging resources
1359
1360 Manages tags. When inheriting this class you must define the
1361 TAG_LEVEL for it.
1362
1363 """
1364 TAG_LEVEL = None
1365
1367 """A tag resource constructor.
1368
1369 We have to override the default to sort out cluster naming case.
1370
1371 """
1372 baserlib.R_Generic.__init__(self, items, queryargs, req)
1373
1374 if self.TAG_LEVEL == constants.TAG_CLUSTER:
1375 self.name = None
1376 else:
1377 self.name = items[0]
1378
1380 """Returns a list of tags.
1381
1382 Example: ["tag1", "tag2", "tag3"]
1383
1384 """
1385 # pylint: disable=W0212
1386 return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1387
1389 """Add a set of tags.
1390
1391 The request as a list of strings should be PUT to this URI. And
1392 you'll have back a job id.
1393
1394 """
1395 # pylint: disable=W0212
1396 if "tag" not in self.queryargs:
1397 raise http.HttpBadRequest("Please specify tag(s) to add using the"
1398 " the 'tag' parameter")
1399 return baserlib._Tags_PUT(self.TAG_LEVEL,
1400 self.queryargs["tag"], name=self.name,
1401 dry_run=bool(self.dryRun()))
1402
1404 """Delete a tag.
1405
1406 In order to delete a set of tags, the DELETE
1407 request should be addressed to URI like:
1408 /tags?tag=[tag]&tag=[tag]
1409
1410 """
1411 # pylint: disable=W0212
1412 if "tag" not in self.queryargs:
1413 # no we not gonna delete all tags
1414 raise http.HttpBadRequest("Cannot delete all tags - please specify"
1415 " tag(s) using the 'tag' parameter")
1416 return baserlib._Tags_DELETE(self.TAG_LEVEL,
1417 self.queryargs["tag"],
1418 name=self.name,
1419 dry_run=bool(self.dryRun()))
1420
1429
1438
1447
1456
| Trees | Indices | Help |
|
|---|
| Generated by Epydoc 3.0.1 on Tue Jul 24 16:51:33 2012 | http://epydoc.sourceforge.net |