Package ganeti :: Package rapi :: Module rlib2
[hide private]
[frames] | no frames]

Source Code for Module ganeti.rapi.rlib2

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Remote API resource implementations. 
  32   
  33  PUT or POST? 
  34  ============ 
  35   
  36  According to RFC2616 the main difference between PUT and POST is that 
  37  POST can create new resources but PUT can only create the resource the 
  38  URI was pointing to on the PUT request. 
  39   
  40  In the context of this module POST on ``/2/instances`` to change an existing 
  41  entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a 
  42  new instance) with a name specified in the request. 
  43   
  44  Quoting from RFC2616, section 9.6:: 
  45   
  46    The fundamental difference between the POST and PUT requests is reflected in 
  47    the different meaning of the Request-URI. The URI in a POST request 
  48    identifies the resource that will handle the enclosed entity. That resource 
  49    might be a data-accepting process, a gateway to some other protocol, or a 
  50    separate entity that accepts annotations. In contrast, the URI in a PUT 
  51    request identifies the entity enclosed with the request -- the user agent 
  52    knows what URI is intended and the server MUST NOT attempt to apply the 
  53    request to some other resource. If the server desires that the request be 
  54    applied to a different URI, it MUST send a 301 (Moved Permanently) response; 
  55    the user agent MAY then make its own decision regarding whether or not to 
  56    redirect the request. 
  57   
  58  So when adding new methods, if they are operating on the URI entity itself, 
  59  PUT should be prefered over POST. 
  60   
  61  """ 
  62   
  63  # pylint: disable=C0103 
  64   
  65  # C0103: Invalid name, since the R_* names are not conforming 
  66   
  67  from ganeti import opcodes 
  68  from ganeti import objects 
  69  from ganeti import http 
  70  from ganeti import constants 
  71  from ganeti import cli 
  72  from ganeti import rapi 
  73  from ganeti import ht 
  74  from ganeti import compat 
  75  from ganeti.rapi import baserlib 
  76   
  77   
  78  _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"] 
  79  I_FIELDS = ["name", "admin_state", "os", 
  80              "pnode", "snodes", 
  81              "disk_template", 
  82              "nic.ips", "nic.macs", "nic.modes", "nic.uuids", "nic.names", 
  83              "nic.links", "nic.networks", "nic.networks.names", "nic.bridges", 
  84              "network_port", 
  85              "disk.sizes", "disk.spindles", "disk_usage", "disk.uuids", 
  86              "disk.names", 
  87              "beparams", "hvparams", 
  88              "oper_state", "oper_ram", "oper_vcpus", "status", 
  89              "custom_hvparams", "custom_beparams", "custom_nicparams", 
  90              ] + _COMMON_FIELDS 
  91   
  92  N_FIELDS = ["name", "offline", "master_candidate", "drained", 
  93              "dtotal", "dfree", "sptotal", "spfree", 
  94              "mtotal", "mnode", "mfree", 
  95              "pinst_cnt", "sinst_cnt", 
  96              "ctotal", "cnos", "cnodes", "csockets", 
  97              "pip", "sip", "role", 
  98              "pinst_list", "sinst_list", 
  99              "master_capable", "vm_capable", 
 100              "ndparams", 
 101              "group.uuid", 
 102              ] + _COMMON_FIELDS 
 103   
 104  NET_FIELDS = ["name", "network", "gateway", 
 105                "network6", "gateway6", 
 106                "mac_prefix", 
 107                "free_count", "reserved_count", 
 108                "map", "group_list", "inst_list", 
 109                "external_reservations", 
 110                ] + _COMMON_FIELDS 
 111   
 112  G_FIELDS = [ 
 113    "alloc_policy", 
 114    "name", 
 115    "node_cnt", 
 116    "node_list", 
 117    "ipolicy", 
 118    "custom_ipolicy", 
 119    "diskparams", 
 120    "custom_diskparams", 
 121    "ndparams", 
 122    "custom_ndparams", 
 123    ] + _COMMON_FIELDS 
 124   
 125  J_FIELDS_BULK = [ 
 126    "id", "ops", "status", "summary", 
 127    "opstatus", 
 128    "received_ts", "start_ts", "end_ts", 
 129    ] 
 130   
 131  J_FIELDS = J_FIELDS_BULK + [ 
 132    "oplog", 
 133    "opresult", 
 134    ] 
 135   
 136  _NR_DRAINED = "drained" 
 137  _NR_MASTER_CANDIDATE = "master-candidate" 
 138  _NR_MASTER = "master" 
 139  _NR_OFFLINE = "offline" 
 140  _NR_REGULAR = "regular" 
 141   
 142  _NR_MAP = { 
 143    constants.NR_MASTER: _NR_MASTER, 
 144    constants.NR_MCANDIDATE: _NR_MASTER_CANDIDATE, 
 145    constants.NR_DRAINED: _NR_DRAINED, 
 146    constants.NR_OFFLINE: _NR_OFFLINE, 
 147    constants.NR_REGULAR: _NR_REGULAR, 
 148    } 
 149   
 150  assert frozenset(_NR_MAP.keys()) == constants.NR_ALL 
 151   
 152  # Request data version field 
 153  _REQ_DATA_VERSION = "__version__" 
 154   
 155  # Feature string for instance creation request data version 1 
 156  _INST_CREATE_REQV1 = "instance-create-reqv1" 
 157   
 158  # Feature string for instance reinstall request version 1 
 159  _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1" 
 160   
 161  # Feature string for node migration version 1 
 162  _NODE_MIGRATE_REQV1 = "node-migrate-reqv1" 
 163   
 164  # Feature string for node evacuation with LU-generated jobs 
 165  _NODE_EVAC_RES1 = "node-evac-res1" 
 166   
 167  ALL_FEATURES = compat.UniqueFrozenset([ 
 168    _INST_CREATE_REQV1, 
 169    _INST_REINSTALL_REQV1, 
 170    _NODE_MIGRATE_REQV1, 
 171    _NODE_EVAC_RES1, 
 172    ]) 
 173   
 174  # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change. 
 175  _WFJC_TIMEOUT = 10 
176 177 178 # FIXME: For compatibility we update the beparams/memory field. Needs to be 179 # removed in Ganeti 2.8 180 -def _UpdateBeparams(inst):
181 """Updates the beparams dict of inst to support the memory field. 182 183 @param inst: Inst dict 184 @return: Updated inst dict 185 186 """ 187 beparams = inst["beparams"] 188 beparams[constants.BE_MEMORY] = beparams[constants.BE_MAXMEM] 189 190 return inst
191
192 193 -class R_root(baserlib.ResourceBase):
194 """/ resource. 195 196 """ 197 @staticmethod
198 - def GET():
199 """Supported for legacy reasons. 200 201 """ 202 return None
203
204 205 -class R_2(R_root):
206 """/2 resource. 207 208 """
209
210 211 -class R_version(baserlib.ResourceBase):
212 """/version resource. 213 214 This resource should be used to determine the remote API version and 215 to adapt clients accordingly. 216 217 """ 218 @staticmethod
219 - def GET():
220 """Returns the remote API version. 221 222 """ 223 return constants.RAPI_VERSION
224
225 226 -class R_2_info(baserlib.OpcodeResource):
227 """/2/info resource. 228 229 """ 230 GET_OPCODE = opcodes.OpClusterQuery 231 GET_ALIASES = { 232 "volume_group_name": "vg_name", 233 "drbd_usermode_helper": "drbd_helper", 234 } 235
236 - def GET(self):
237 """Returns cluster information. 238 239 """ 240 client = self.GetClient() 241 return client.QueryClusterInfo()
242
243 244 -class R_2_features(baserlib.ResourceBase):
245 """/2/features resource. 246 247 """ 248 @staticmethod
249 - def GET():
250 """Returns list of optional RAPI features implemented. 251 252 """ 253 return list(ALL_FEATURES)
254
255 256 -class R_2_os(baserlib.OpcodeResource):
257 """/2/os resource. 258 259 """ 260 GET_OPCODE = opcodes.OpOsDiagnose 261
262 - def GET(self):
263 """Return a list of all OSes. 264 265 Can return error 500 in case of a problem. 266 267 Example: ["debian-etch"] 268 269 """ 270 cl = self.GetClient() 271 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[]) 272 job_id = self.SubmitJob([op], cl=cl) 273 # we use custom feedback function, instead of print we log the status 274 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn) 275 diagnose_data = result[0] 276 277 if not isinstance(diagnose_data, list): 278 raise http.HttpBadGateway(message="Can't get OS list") 279 280 os_names = [] 281 for (name, variants) in diagnose_data: 282 os_names.extend(cli.CalculateOSNames(name, variants)) 283 284 return os_names
285
286 287 -class R_2_redist_config(baserlib.OpcodeResource):
288 """/2/redistribute-config resource. 289 290 """ 291 PUT_OPCODE = opcodes.OpClusterRedistConf
292
293 294 -class R_2_cluster_modify(baserlib.OpcodeResource):
295 """/2/modify resource. 296 297 """ 298 PUT_OPCODE = opcodes.OpClusterSetParams 299 PUT_FORBIDDEN = [ 300 "compression_tools", 301 ]
302
303 304 -class R_2_jobs(baserlib.ResourceBase):
305 """/2/jobs resource. 306 307 """
308 - def GET(self):
309 """Returns a dictionary of jobs. 310 311 @return: a dictionary with jobs id and uri. 312 313 """ 314 client = self.GetClient() 315 316 if self.useBulk(): 317 bulkdata = client.QueryJobs(None, J_FIELDS_BULK) 318 return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK) 319 else: 320 jobdata = map(compat.fst, client.QueryJobs(None, ["id"])) 321 return baserlib.BuildUriList(jobdata, "/2/jobs/%s", 322 uri_fields=("id", "uri"))
323
324 325 -class R_2_jobs_id(baserlib.ResourceBase):
326 """/2/jobs/[job_id] resource. 327 328 """
329 - def GET(self):
330 """Returns a job status. 331 332 @return: a dictionary with job parameters. 333 The result includes: 334 - id: job ID as a number 335 - status: current job status as a string 336 - ops: involved OpCodes as a list of dictionaries for each 337 opcodes in the job 338 - opstatus: OpCodes status as a list 339 - opresult: OpCodes results as a list of lists 340 341 """ 342 job_id = self.items[0] 343 result = self.GetClient().QueryJobs([job_id, ], J_FIELDS)[0] 344 if result is None: 345 raise http.HttpNotFound() 346 return baserlib.MapFields(J_FIELDS, result)
347
348 - def DELETE(self):
349 """Cancel not-yet-started job. 350 351 """ 352 job_id = self.items[0] 353 result = self.GetClient().CancelJob(job_id) 354 return result
355
356 357 -class R_2_jobs_id_wait(baserlib.ResourceBase):
358 """/2/jobs/[job_id]/wait resource. 359 360 """ 361 # WaitForJobChange provides access to sensitive information and blocks 362 # machine resources (it's a blocking RAPI call), hence restricting access. 363 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] 364
365 - def GET(self):
366 """Waits for job changes. 367 368 """ 369 job_id = self.items[0] 370 371 fields = self.getBodyParameter("fields") 372 prev_job_info = self.getBodyParameter("previous_job_info", None) 373 prev_log_serial = self.getBodyParameter("previous_log_serial", None) 374 375 if not isinstance(fields, list): 376 raise http.HttpBadRequest("The 'fields' parameter should be a list") 377 378 if not (prev_job_info is None or isinstance(prev_job_info, list)): 379 raise http.HttpBadRequest("The 'previous_job_info' parameter should" 380 " be a list") 381 382 if not (prev_log_serial is None or 383 isinstance(prev_log_serial, (int, long))): 384 raise http.HttpBadRequest("The 'previous_log_serial' parameter should" 385 " be a number") 386 387 client = self.GetClient() 388 result = client.WaitForJobChangeOnce(job_id, fields, 389 prev_job_info, prev_log_serial, 390 timeout=_WFJC_TIMEOUT) 391 if not result: 392 raise http.HttpNotFound() 393 394 if result == constants.JOB_NOTCHANGED: 395 # No changes 396 return None 397 398 (job_info, log_entries) = result 399 400 return { 401 "job_info": job_info, 402 "log_entries": log_entries, 403 }
404
405 406 -class R_2_nodes(baserlib.OpcodeResource):
407 """/2/nodes resource. 408 409 """ 410
411 - def GET(self):
412 """Returns a list of all nodes. 413 414 """ 415 client = self.GetClient() 416 417 if self.useBulk(): 418 bulkdata = client.QueryNodes([], N_FIELDS, False) 419 return baserlib.MapBulkFields(bulkdata, N_FIELDS) 420 else: 421 nodesdata = client.QueryNodes([], ["name"], False) 422 nodeslist = [row[0] for row in nodesdata] 423 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s", 424 uri_fields=("id", "uri"))
425
426 427 -class R_2_nodes_name(baserlib.OpcodeResource):
428 """/2/nodes/[node_name] resource. 429 430 """ 431 GET_ALIASES = { 432 "sip": "secondary_ip", 433 } 434
435 - def GET(self):
436 """Send information about a node. 437 438 """ 439 node_name = self.items[0] 440 client = self.GetClient() 441 442 result = baserlib.HandleItemQueryErrors(client.QueryNodes, 443 names=[node_name], fields=N_FIELDS, 444 use_locking=self.useLocking()) 445 446 return baserlib.MapFields(N_FIELDS, result[0])
447
448 449 -class R_2_nodes_name_powercycle(baserlib.OpcodeResource):
450 """/2/nodes/[node_name]/powercycle resource. 451 452 """ 453 POST_OPCODE = opcodes.OpNodePowercycle 454
455 - def GetPostOpInput(self):
456 """Tries to powercycle a node. 457 458 """ 459 return (self.request_body, { 460 "node_name": self.items[0], 461 "force": self.useForce(), 462 })
463
464 465 -class R_2_nodes_name_role(baserlib.OpcodeResource):
466 """/2/nodes/[node_name]/role resource. 467 468 """ 469 PUT_OPCODE = opcodes.OpNodeSetParams 470
471 - def GET(self):
472 """Returns the current node role. 473 474 @return: Node role 475 476 """ 477 node_name = self.items[0] 478 client = self.GetClient() 479 result = client.QueryNodes(names=[node_name], fields=["role"], 480 use_locking=self.useLocking()) 481 482 return _NR_MAP[result[0][0]]
483
484 - def GetPutOpInput(self):
485 """Sets the node role. 486 487 """ 488 baserlib.CheckType(self.request_body, basestring, "Body contents") 489 490 role = self.request_body 491 492 if role == _NR_REGULAR: 493 candidate = False 494 offline = False 495 drained = False 496 497 elif role == _NR_MASTER_CANDIDATE: 498 candidate = True 499 offline = drained = None 500 501 elif role == _NR_DRAINED: 502 drained = True 503 candidate = offline = None 504 505 elif role == _NR_OFFLINE: 506 offline = True 507 candidate = drained = None 508 509 else: 510 raise http.HttpBadRequest("Can't set '%s' role" % role) 511 512 assert len(self.items) == 1 513 514 return ({}, { 515 "node_name": self.items[0], 516 "master_candidate": candidate, 517 "offline": offline, 518 "drained": drained, 519 "force": self.useForce(), 520 "auto_promote": bool(self._checkIntVariable("auto-promote", default=0)), 521 })
522
523 524 -class R_2_nodes_name_evacuate(baserlib.OpcodeResource):
525 """/2/nodes/[node_name]/evacuate resource. 526 527 """ 528 POST_OPCODE = opcodes.OpNodeEvacuate 529
530 - def GetPostOpInput(self):
531 """Evacuate all instances off a node. 532 533 """ 534 return (self.request_body, { 535 "node_name": self.items[0], 536 "dry_run": self.dryRun(), 537 })
538
539 540 -class R_2_nodes_name_migrate(baserlib.OpcodeResource):
541 """/2/nodes/[node_name]/migrate resource. 542 543 """ 544 POST_OPCODE = opcodes.OpNodeMigrate 545
546 - def GetPostOpInput(self):
547 """Migrate all primary instances from a node. 548 549 """ 550 if self.queryargs: 551 # Support old-style requests 552 if "live" in self.queryargs and "mode" in self.queryargs: 553 raise http.HttpBadRequest("Only one of 'live' and 'mode' should" 554 " be passed") 555 556 if "live" in self.queryargs: 557 if self._checkIntVariable("live", default=1): 558 mode = constants.HT_MIGRATION_LIVE 559 else: 560 mode = constants.HT_MIGRATION_NONLIVE 561 else: 562 mode = self._checkStringVariable("mode", default=None) 563 564 data = { 565 "mode": mode, 566 } 567 else: 568 data = self.request_body 569 570 return (data, { 571 "node_name": self.items[0], 572 })
573
574 575 -class R_2_nodes_name_modify(baserlib.OpcodeResource):
576 """/2/nodes/[node_name]/modify resource. 577 578 """ 579 POST_OPCODE = opcodes.OpNodeSetParams 580
581 - def GetPostOpInput(self):
582 """Changes parameters of a node. 583 584 """ 585 assert len(self.items) == 1 586 587 return (self.request_body, { 588 "node_name": self.items[0], 589 })
590
591 592 -class R_2_nodes_name_storage(baserlib.OpcodeResource):
593 """/2/nodes/[node_name]/storage resource. 594 595 """ 596 # LUNodeQueryStorage acquires locks, hence restricting access to GET 597 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] 598 GET_OPCODE = opcodes.OpNodeQueryStorage 599
600 - def GetGetOpInput(self):
601 """List storage available on a node. 602 603 """ 604 storage_type = self._checkStringVariable("storage_type", None) 605 output_fields = self._checkStringVariable("output_fields", None) 606 607 if not output_fields: 608 raise http.HttpBadRequest("Missing the required 'output_fields'" 609 " parameter") 610 611 return ({}, { 612 "nodes": [self.items[0]], 613 "storage_type": storage_type, 614 "output_fields": output_fields.split(","), 615 })
616
617 618 -class R_2_nodes_name_storage_modify(baserlib.OpcodeResource):
619 """/2/nodes/[node_name]/storage/modify resource. 620 621 """ 622 PUT_OPCODE = opcodes.OpNodeModifyStorage 623
624 - def GetPutOpInput(self):
625 """Modifies a storage volume on a node. 626 627 """ 628 storage_type = self._checkStringVariable("storage_type", None) 629 name = self._checkStringVariable("name", None) 630 631 if not name: 632 raise http.HttpBadRequest("Missing the required 'name'" 633 " parameter") 634 635 changes = {} 636 637 if "allocatable" in self.queryargs: 638 changes[constants.SF_ALLOCATABLE] = \ 639 bool(self._checkIntVariable("allocatable", default=1)) 640 641 return ({}, { 642 "node_name": self.items[0], 643 "storage_type": storage_type, 644 "name": name, 645 "changes": changes, 646 })
647
648 649 -class R_2_nodes_name_storage_repair(baserlib.OpcodeResource):
650 """/2/nodes/[node_name]/storage/repair resource. 651 652 """ 653 PUT_OPCODE = opcodes.OpRepairNodeStorage 654
655 - def GetPutOpInput(self):
656 """Repairs a storage volume on a node. 657 658 """ 659 storage_type = self._checkStringVariable("storage_type", None) 660 name = self._checkStringVariable("name", None) 661 if not name: 662 raise http.HttpBadRequest("Missing the required 'name'" 663 " parameter") 664 665 return ({}, { 666 "node_name": self.items[0], 667 "storage_type": storage_type, 668 "name": name, 669 })
670
671 672 -class R_2_networks(baserlib.OpcodeResource):
673 """/2/networks resource. 674 675 """ 676 POST_OPCODE = opcodes.OpNetworkAdd 677 POST_RENAME = { 678 "name": "network_name", 679 } 680
681 - def GetPostOpInput(self):
682 """Create a network. 683 684 """ 685 assert not self.items 686 return (self.request_body, { 687 "dry_run": self.dryRun(), 688 })
689
690 - def GET(self):
691 """Returns a list of all networks. 692 693 """ 694 client = self.GetClient() 695 696 if self.useBulk(): 697 bulkdata = client.QueryNetworks([], NET_FIELDS, False) 698 return baserlib.MapBulkFields(bulkdata, NET_FIELDS) 699 else: 700 data = client.QueryNetworks([], ["name"], False) 701 networknames = [row[0] for row in data] 702 return baserlib.BuildUriList(networknames, "/2/networks/%s", 703 uri_fields=("name", "uri"))
704
705 706 -class R_2_networks_name(baserlib.OpcodeResource):
707 """/2/networks/[network_name] resource. 708 709 """ 710 DELETE_OPCODE = opcodes.OpNetworkRemove 711
712 - def GET(self):
713 """Send information about a network. 714 715 """ 716 network_name = self.items[0] 717 client = self.GetClient() 718 719 result = baserlib.HandleItemQueryErrors(client.QueryNetworks, 720 names=[network_name], 721 fields=NET_FIELDS, 722 use_locking=self.useLocking()) 723 724 return baserlib.MapFields(NET_FIELDS, result[0])
725
726 - def GetDeleteOpInput(self):
727 """Delete a network. 728 729 """ 730 assert len(self.items) == 1 731 return (self.request_body, { 732 "network_name": self.items[0], 733 "dry_run": self.dryRun(), 734 })
735
736 737 -class R_2_networks_name_connect(baserlib.OpcodeResource):
738 """/2/networks/[network_name]/connect resource. 739 740 """ 741 PUT_OPCODE = opcodes.OpNetworkConnect 742
743 - def GetPutOpInput(self):
744 """Changes some parameters of node group. 745 746 """ 747 assert self.items 748 return (self.request_body, { 749 "network_name": self.items[0], 750 "dry_run": self.dryRun(), 751 })
752
753 754 -class R_2_networks_name_disconnect(baserlib.OpcodeResource):
755 """/2/networks/[network_name]/disconnect resource. 756 757 """ 758 PUT_OPCODE = opcodes.OpNetworkDisconnect 759
760 - def GetPutOpInput(self):
761 """Changes some parameters of node group. 762 763 """ 764 assert self.items 765 return (self.request_body, { 766 "network_name": self.items[0], 767 "dry_run": self.dryRun(), 768 })
769
770 771 -class R_2_networks_name_modify(baserlib.OpcodeResource):
772 """/2/networks/[network_name]/modify resource. 773 774 """ 775 PUT_OPCODE = opcodes.OpNetworkSetParams 776
777 - def GetPutOpInput(self):
778 """Changes some parameters of network. 779 780 """ 781 assert self.items 782 return (self.request_body, { 783 "network_name": self.items[0], 784 })
785
786 787 -class R_2_groups(baserlib.OpcodeResource):
788 """/2/groups resource. 789 790 """ 791 POST_OPCODE = opcodes.OpGroupAdd 792 POST_RENAME = { 793 "name": "group_name", 794 } 795
796 - def GetPostOpInput(self):
797 """Create a node group. 798 799 800 """ 801 assert not self.items 802 return (self.request_body, { 803 "dry_run": self.dryRun(), 804 })
805
806 - def GET(self):
807 """Returns a list of all node groups. 808 809 """ 810 client = self.GetClient() 811 812 if self.useBulk(): 813 bulkdata = client.QueryGroups([], G_FIELDS, False) 814 return baserlib.MapBulkFields(bulkdata, G_FIELDS) 815 else: 816 data = client.QueryGroups([], ["name"], False) 817 groupnames = [row[0] for row in data] 818 return baserlib.BuildUriList(groupnames, "/2/groups/%s", 819 uri_fields=("name", "uri"))
820
821 822 -class R_2_groups_name(baserlib.OpcodeResource):
823 """/2/groups/[group_name] resource. 824 825 """ 826 DELETE_OPCODE = opcodes.OpGroupRemove 827
828 - def GET(self):
829 """Send information about a node group. 830 831 """ 832 group_name = self.items[0] 833 client = self.GetClient() 834 835 result = baserlib.HandleItemQueryErrors(client.QueryGroups, 836 names=[group_name], fields=G_FIELDS, 837 use_locking=self.useLocking()) 838 839 return baserlib.MapFields(G_FIELDS, result[0])
840
841 - def GetDeleteOpInput(self):
842 """Delete a node group. 843 844 """ 845 assert len(self.items) == 1 846 return ({}, { 847 "group_name": self.items[0], 848 "dry_run": self.dryRun(), 849 })
850
851 852 -class R_2_groups_name_modify(baserlib.OpcodeResource):
853 """/2/groups/[group_name]/modify resource. 854 855 """ 856 PUT_OPCODE = opcodes.OpGroupSetParams 857 PUT_RENAME = { 858 "custom_ndparams": "ndparams", 859 "custom_ipolicy": "ipolicy", 860 "custom_diskparams": "diskparams", 861 } 862
863 - def GetPutOpInput(self):
864 """Changes some parameters of node group. 865 866 """ 867 assert self.items 868 return (self.request_body, { 869 "group_name": self.items[0], 870 })
871
872 873 -class R_2_groups_name_rename(baserlib.OpcodeResource):
874 """/2/groups/[group_name]/rename resource. 875 876 """ 877 PUT_OPCODE = opcodes.OpGroupRename 878
879 - def GetPutOpInput(self):
880 """Changes the name of a node group. 881 882 """ 883 assert len(self.items) == 1 884 return (self.request_body, { 885 "group_name": self.items[0], 886 "dry_run": self.dryRun(), 887 })
888
889 890 -class R_2_groups_name_assign_nodes(baserlib.OpcodeResource):
891 """/2/groups/[group_name]/assign-nodes resource. 892 893 """ 894 PUT_OPCODE = opcodes.OpGroupAssignNodes 895
896 - def GetPutOpInput(self):
897 """Assigns nodes to a group. 898 899 """ 900 assert len(self.items) == 1 901 return (self.request_body, { 902 "group_name": self.items[0], 903 "dry_run": self.dryRun(), 904 "force": self.useForce(), 905 })
906
907 908 -def _ConvertUsbDevices(data):
909 """Convert in place the usb_devices string to the proper format. 910 911 In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from 912 comma to space because commas cannot be accepted on the command line 913 (they already act as the separator between different hvparams). RAPI 914 should be able to accept commas for backwards compatibility, but we want 915 it to also accept the new space separator. Therefore, we convert 916 spaces into commas here and keep the old parsing logic elsewhere. 917 918 """ 919 try: 920 hvparams = data["hvparams"] 921 usb_devices = hvparams[constants.HV_USB_DEVICES] 922 hvparams[constants.HV_USB_DEVICES] = usb_devices.replace(" ", ",") 923 data["hvparams"] = hvparams 924 except KeyError: 925 #No usb_devices, no modification required 926 pass
927
928 929 -class R_2_instances(baserlib.OpcodeResource):
930 """/2/instances resource. 931 932 """ 933 POST_OPCODE = opcodes.OpInstanceCreate 934 POST_RENAME = { 935 "os": "os_type", 936 "name": "instance_name", 937 } 938
939 - def GET(self):
940 """Returns a list of all available instances. 941 942 """ 943 client = self.GetClient() 944 945 use_locking = self.useLocking() 946 if self.useBulk(): 947 bulkdata = client.QueryInstances([], I_FIELDS, use_locking) 948 return map(_UpdateBeparams, baserlib.MapBulkFields(bulkdata, I_FIELDS)) 949 else: 950 instancesdata = client.QueryInstances([], ["name"], use_locking) 951 instanceslist = [row[0] for row in instancesdata] 952 return baserlib.BuildUriList(instanceslist, "/2/instances/%s", 953 uri_fields=("id", "uri"))
954
955 - def GetPostOpInput(self):
956 """Create an instance. 957 958 @return: a job id 959 960 """ 961 baserlib.CheckType(self.request_body, dict, "Body contents") 962 963 # Default to request data version 0 964 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0) 965 966 if data_version == 0: 967 raise http.HttpBadRequest("Instance creation request version 0 is no" 968 " longer supported") 969 elif data_version != 1: 970 raise http.HttpBadRequest("Unsupported request data version %s" % 971 data_version) 972 973 data = self.request_body.copy() 974 # Remove "__version__" 975 data.pop(_REQ_DATA_VERSION, None) 976 977 _ConvertUsbDevices(data) 978 979 return (data, { 980 "dry_run": self.dryRun(), 981 })
982
983 984 -class R_2_instances_multi_alloc(baserlib.OpcodeResource):
985 """/2/instances-multi-alloc resource. 986 987 """ 988 POST_OPCODE = opcodes.OpInstanceMultiAlloc 989
990 - def GetPostOpInput(self):
991 """Try to allocate multiple instances. 992 993 @return: A dict with submitted jobs, allocatable instances and failed 994 allocations 995 996 """ 997 if "instances" not in self.request_body: 998 raise http.HttpBadRequest("Request is missing required 'instances' field" 999 " in body") 1000 1001 # Unlike most other RAPI calls, this one is composed of individual opcodes, 1002 # and we have to do the filling ourselves 1003 OPCODE_RENAME = { 1004 "os": "os_type", 1005 "name": "instance_name", 1006 } 1007 1008 body = objects.FillDict(self.request_body, { 1009 "instances": [ 1010 baserlib.FillOpcode(opcodes.OpInstanceCreate, inst, {}, 1011 rename=OPCODE_RENAME) 1012 for inst in self.request_body["instances"] 1013 ], 1014 }) 1015 1016 return (body, { 1017 "dry_run": self.dryRun(), 1018 })
1019
1020 1021 -class R_2_instances_name(baserlib.OpcodeResource):
1022 """/2/instances/[instance_name] resource. 1023 1024 """ 1025 DELETE_OPCODE = opcodes.OpInstanceRemove 1026
1027 - def GET(self):
1028 """Send information about an instance. 1029 1030 """ 1031 client = self.GetClient() 1032 instance_name = self.items[0] 1033 1034 result = baserlib.HandleItemQueryErrors(client.QueryInstances, 1035 names=[instance_name], 1036 fields=I_FIELDS, 1037 use_locking=self.useLocking()) 1038 1039 return _UpdateBeparams(baserlib.MapFields(I_FIELDS, result[0]))
1040
1041 - def GetDeleteOpInput(self):
1042 """Delete an instance. 1043 1044 """ 1045 assert len(self.items) == 1 1046 return (self.request_body, { 1047 "instance_name": self.items[0], 1048 "ignore_failures": False, 1049 "dry_run": self.dryRun(), 1050 })
1051
1052 1053 -class R_2_instances_name_info(baserlib.OpcodeResource):
1054 """/2/instances/[instance_name]/info resource. 1055 1056 """ 1057 GET_OPCODE = opcodes.OpInstanceQueryData 1058
1059 - def GetGetOpInput(self):
1060 """Request detailed instance information. 1061 1062 """ 1063 assert len(self.items) == 1 1064 return ({}, { 1065 "instances": [self.items[0]], 1066 "static": bool(self._checkIntVariable("static", default=0)), 1067 })
1068
1069 1070 -class R_2_instances_name_reboot(baserlib.OpcodeResource):
1071 """/2/instances/[instance_name]/reboot resource. 1072 1073 Implements an instance reboot. 1074 1075 """ 1076 POST_OPCODE = opcodes.OpInstanceReboot 1077
1078 - def GetPostOpInput(self):
1079 """Reboot an instance. 1080 1081 The URI takes type=[hard|soft|full] and 1082 ignore_secondaries=[False|True] parameters. 1083 1084 """ 1085 return (self.request_body, { 1086 "instance_name": self.items[0], 1087 "reboot_type": 1088 self.queryargs.get("type", [constants.INSTANCE_REBOOT_HARD])[0], 1089 "ignore_secondaries": bool(self._checkIntVariable("ignore_secondaries")), 1090 "dry_run": self.dryRun(), 1091 })
1092
1093 1094 -class R_2_instances_name_startup(baserlib.OpcodeResource):
1095 """/2/instances/[instance_name]/startup resource. 1096 1097 Implements an instance startup. 1098 1099 """ 1100 PUT_OPCODE = opcodes.OpInstanceStartup 1101
1102 - def GetPutOpInput(self):
1103 """Startup an instance. 1104 1105 The URI takes force=[False|True] parameter to start the instance 1106 if even if secondary disks are failing. 1107 1108 """ 1109 return ({}, { 1110 "instance_name": self.items[0], 1111 "force": self.useForce(), 1112 "dry_run": self.dryRun(), 1113 "no_remember": bool(self._checkIntVariable("no_remember")), 1114 })
1115
1116 1117 -class R_2_instances_name_shutdown(baserlib.OpcodeResource):
1118 """/2/instances/[instance_name]/shutdown resource. 1119 1120 Implements an instance shutdown. 1121 1122 """ 1123 PUT_OPCODE = opcodes.OpInstanceShutdown 1124
1125 - def GetPutOpInput(self):
1126 """Shutdown an instance. 1127 1128 """ 1129 return (self.request_body, { 1130 "instance_name": self.items[0], 1131 "no_remember": bool(self._checkIntVariable("no_remember")), 1132 "dry_run": self.dryRun(), 1133 })
1134
1135 1136 -def _ParseInstanceReinstallRequest(name, data):
1137 """Parses a request for reinstalling an instance. 1138 1139 """ 1140 if not isinstance(data, dict): 1141 raise http.HttpBadRequest("Invalid body contents, not a dictionary") 1142 1143 ostype = baserlib.CheckParameter(data, "os", default=None) 1144 start = baserlib.CheckParameter(data, "start", exptype=bool, 1145 default=True) 1146 osparams = baserlib.CheckParameter(data, "osparams", default=None) 1147 1148 ops = [ 1149 opcodes.OpInstanceShutdown(instance_name=name), 1150 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype, 1151 osparams=osparams), 1152 ] 1153 1154 if start: 1155 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False)) 1156 1157 return ops
1158
1159 1160 -class R_2_instances_name_reinstall(baserlib.OpcodeResource):
1161 """/2/instances/[instance_name]/reinstall resource. 1162 1163 Implements an instance reinstall. 1164 1165 """ 1166 POST_OPCODE = opcodes.OpInstanceReinstall 1167
1168 - def POST(self):
1169 """Reinstall an instance. 1170 1171 The URI takes os=name and nostartup=[0|1] optional 1172 parameters. By default, the instance will be started 1173 automatically. 1174 1175 """ 1176 if self.request_body: 1177 if self.queryargs: 1178 raise http.HttpBadRequest("Can't combine query and body parameters") 1179 1180 body = self.request_body 1181 elif self.queryargs: 1182 # Legacy interface, do not modify/extend 1183 body = { 1184 "os": self._checkStringVariable("os"), 1185 "start": not self._checkIntVariable("nostartup"), 1186 } 1187 else: 1188 body = {} 1189 1190 ops = _ParseInstanceReinstallRequest(self.items[0], body) 1191 1192 return self.SubmitJob(ops)
1193
1194 1195 -class R_2_instances_name_replace_disks(baserlib.OpcodeResource):
1196 """/2/instances/[instance_name]/replace-disks resource. 1197 1198 """ 1199 POST_OPCODE = opcodes.OpInstanceReplaceDisks 1200
1201 - def GetPostOpInput(self):
1202 """Replaces disks on an instance. 1203 1204 """ 1205 static = { 1206 "instance_name": self.items[0], 1207 } 1208 1209 if self.request_body: 1210 data = self.request_body 1211 elif self.queryargs: 1212 # Legacy interface, do not modify/extend 1213 data = { 1214 "remote_node": self._checkStringVariable("remote_node", default=None), 1215 "mode": self._checkStringVariable("mode", default=None), 1216 "disks": self._checkStringVariable("disks", default=None), 1217 "iallocator": self._checkStringVariable("iallocator", default=None), 1218 } 1219 else: 1220 data = {} 1221 1222 # Parse disks 1223 try: 1224 raw_disks = data.pop("disks") 1225 except KeyError: 1226 pass 1227 else: 1228 if raw_disks: 1229 if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102 1230 data["disks"] = raw_disks 1231 else: 1232 # Backwards compatibility for strings of the format "1, 2, 3" 1233 try: 1234 data["disks"] = [int(part) for part in raw_disks.split(",")] 1235 except (TypeError, ValueError), err: 1236 raise http.HttpBadRequest("Invalid disk index passed: %s" % err) 1237 1238 return (data, static)
1239
1240 1241 -class R_2_instances_name_activate_disks(baserlib.OpcodeResource):
1242 """/2/instances/[instance_name]/activate-disks resource. 1243 1244 """ 1245 PUT_OPCODE = opcodes.OpInstanceActivateDisks 1246
1247 - def GetPutOpInput(self):
1248 """Activate disks for an instance. 1249 1250 The URI might contain ignore_size to ignore current recorded size. 1251 1252 """ 1253 return ({}, { 1254 "instance_name": self.items[0], 1255 "ignore_size": bool(self._checkIntVariable("ignore_size")), 1256 })
1257
1258 1259 -class R_2_instances_name_deactivate_disks(baserlib.OpcodeResource):
1260 """/2/instances/[instance_name]/deactivate-disks resource. 1261 1262 """ 1263 PUT_OPCODE = opcodes.OpInstanceDeactivateDisks 1264
1265 - def GetPutOpInput(self):
1266 """Deactivate disks for an instance. 1267 1268 """ 1269 return ({}, { 1270 "instance_name": self.items[0], 1271 })
1272
1273 1274 -class R_2_instances_name_recreate_disks(baserlib.OpcodeResource):
1275 """/2/instances/[instance_name]/recreate-disks resource. 1276 1277 """ 1278 POST_OPCODE = opcodes.OpInstanceRecreateDisks 1279
1280 - def GetPostOpInput(self):
1281 """Recreate disks for an instance. 1282 1283 """ 1284 return (self.request_body, { 1285 "instance_name": self.items[0], 1286 })
1287
1288 1289 -class R_2_instances_name_prepare_export(baserlib.OpcodeResource):
1290 """/2/instances/[instance_name]/prepare-export resource. 1291 1292 """ 1293 PUT_OPCODE = opcodes.OpBackupPrepare 1294
1295 - def GetPutOpInput(self):
1296 """Prepares an export for an instance. 1297 1298 """ 1299 return ({}, { 1300 "instance_name": self.items[0], 1301 "mode": self._checkStringVariable("mode"), 1302 })
1303
1304 1305 -class R_2_instances_name_export(baserlib.OpcodeResource):
1306 """/2/instances/[instance_name]/export resource. 1307 1308 """ 1309 PUT_OPCODE = opcodes.OpBackupExport 1310 PUT_RENAME = { 1311 "destination": "target_node", 1312 } 1313
1314 - def GetPutOpInput(self):
1315 """Exports an instance. 1316 1317 """ 1318 return (self.request_body, { 1319 "instance_name": self.items[0], 1320 })
1321
1322 1323 -class R_2_instances_name_migrate(baserlib.OpcodeResource):
1324 """/2/instances/[instance_name]/migrate resource. 1325 1326 """ 1327 PUT_OPCODE = opcodes.OpInstanceMigrate 1328
1329 - def GetPutOpInput(self):
1330 """Migrates an instance. 1331 1332 """ 1333 return (self.request_body, { 1334 "instance_name": self.items[0], 1335 })
1336
1337 1338 -class R_2_instances_name_failover(baserlib.OpcodeResource):
1339 """/2/instances/[instance_name]/failover resource. 1340 1341 """ 1342 PUT_OPCODE = opcodes.OpInstanceFailover 1343
1344 - def GetPutOpInput(self):
1345 """Does a failover of an instance. 1346 1347 """ 1348 return (self.request_body, { 1349 "instance_name": self.items[0], 1350 })
1351
1352 1353 -class R_2_instances_name_rename(baserlib.OpcodeResource):
1354 """/2/instances/[instance_name]/rename resource. 1355 1356 """ 1357 PUT_OPCODE = opcodes.OpInstanceRename 1358
1359 - def GetPutOpInput(self):
1360 """Changes the name of an instance. 1361 1362 """ 1363 return (self.request_body, { 1364 "instance_name": self.items[0], 1365 })
1366
1367 1368 -class R_2_instances_name_modify(baserlib.OpcodeResource):
1369 """/2/instances/[instance_name]/modify resource. 1370 1371 """ 1372 PUT_OPCODE = opcodes.OpInstanceSetParams 1373 PUT_RENAME = { 1374 "custom_beparams": "beparams", 1375 "custom_hvparams": "hvparams", 1376 } 1377
1378 - def GetPutOpInput(self):
1379 """Changes parameters of an instance. 1380 1381 """ 1382 data = self.request_body.copy() 1383 _ConvertUsbDevices(data) 1384 1385 return (data, { 1386 "instance_name": self.items[0], 1387 })
1388
1389 1390 -class R_2_instances_name_disk_grow(baserlib.OpcodeResource):
1391 """/2/instances/[instance_name]/disk/[disk_index]/grow resource. 1392 1393 """ 1394 POST_OPCODE = opcodes.OpInstanceGrowDisk 1395
1396 - def GetPostOpInput(self):
1397 """Increases the size of an instance disk. 1398 1399 """ 1400 return (self.request_body, { 1401 "instance_name": self.items[0], 1402 "disk": int(self.items[1]), 1403 })
1404
1405 1406 -class R_2_instances_name_console(baserlib.ResourceBase):
1407 """/2/instances/[instance_name]/console resource. 1408 1409 """ 1410 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ] 1411 GET_OPCODE = opcodes.OpInstanceConsole 1412
1413 - def GET(self):
1414 """Request information for connecting to instance's console. 1415 1416 @return: Serialized instance console description, see 1417 L{objects.InstanceConsole} 1418 1419 """ 1420 instance_name = self.items[0] 1421 client = self.GetClient() 1422 1423 ((console, oper_state), ) = \ 1424 client.QueryInstances([instance_name], ["console", "oper_state"], False) 1425 1426 if not oper_state: 1427 raise http.HttpServiceUnavailable("Instance console unavailable") 1428 1429 assert isinstance(console, dict) 1430 return console
1431
1432 1433 -def _GetQueryFields(args):
1434 """Tries to extract C{fields} query parameter. 1435 1436 @type args: dictionary 1437 @rtype: list of string 1438 @raise http.HttpBadRequest: When parameter can't be found 1439 1440 """ 1441 try: 1442 fields = args["fields"] 1443 except KeyError: 1444 raise http.HttpBadRequest("Missing 'fields' query argument") 1445 1446 return _SplitQueryFields(fields[0])
1447
1448 1449 -def _SplitQueryFields(fields):
1450 """Splits fields as given for a query request. 1451 1452 @type fields: string 1453 @rtype: list of string 1454 1455 """ 1456 return [i.strip() for i in fields.split(",")]
1457
1458 1459 -class R_2_query(baserlib.ResourceBase):
1460 """/2/query/[resource] resource. 1461 1462 """ 1463 # Results might contain sensitive information 1464 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ] 1465 PUT_ACCESS = GET_ACCESS 1466 GET_OPCODE = opcodes.OpQuery 1467 PUT_OPCODE = opcodes.OpQuery 1468
1469 - def _Query(self, fields, qfilter):
1470 client = self.GetClient() 1471 return client.Query(self.items[0], fields, qfilter).ToDict()
1472
1473 - def GET(self):
1474 """Returns resource information. 1475 1476 @return: Query result, see L{objects.QueryResponse} 1477 1478 """ 1479 return self._Query(_GetQueryFields(self.queryargs), None)
1480
1481 - def PUT(self):
1482 """Submits job querying for resources. 1483 1484 @return: Query result, see L{objects.QueryResponse} 1485 1486 """ 1487 body = self.request_body 1488 1489 baserlib.CheckType(body, dict, "Body contents") 1490 1491 try: 1492 fields = body["fields"] 1493 except KeyError: 1494 fields = _GetQueryFields(self.queryargs) 1495 1496 qfilter = body.get("qfilter", None) 1497 # TODO: remove this after 2.7 1498 if qfilter is None: 1499 qfilter = body.get("filter", None) 1500 1501 return self._Query(fields, qfilter)
1502
1503 1504 -class R_2_query_fields(baserlib.ResourceBase):
1505 """/2/query/[resource]/fields resource. 1506 1507 """ 1508 GET_OPCODE = opcodes.OpQueryFields 1509
1510 - def GET(self):
1511 """Retrieves list of available fields for a resource. 1512 1513 @return: List of serialized L{objects.QueryFieldDefinition} 1514 1515 """ 1516 try: 1517 raw_fields = self.queryargs["fields"] 1518 except KeyError: 1519 fields = None 1520 else: 1521 fields = _SplitQueryFields(raw_fields[0]) 1522 1523 return self.GetClient().QueryFields(self.items[0], fields).ToDict()
1524
1525 1526 -class _R_Tags(baserlib.OpcodeResource):
1527 """Quasiclass for tagging resources. 1528 1529 Manages tags. When inheriting this class you must define the 1530 TAG_LEVEL for it. 1531 1532 """ 1533 TAG_LEVEL = None 1534 GET_OPCODE = opcodes.OpTagsGet 1535 PUT_OPCODE = opcodes.OpTagsSet 1536 DELETE_OPCODE = opcodes.OpTagsDel 1537
1538 - def __init__(self, items, queryargs, req, **kwargs):
1539 """A tag resource constructor. 1540 1541 We have to override the default to sort out cluster naming case. 1542 1543 """ 1544 baserlib.OpcodeResource.__init__(self, items, queryargs, req, **kwargs) 1545 1546 if self.TAG_LEVEL == constants.TAG_CLUSTER: 1547 self.name = None 1548 else: 1549 self.name = items[0]
1550
1551 - def GET(self):
1552 """Returns a list of tags. 1553 1554 Example: ["tag1", "tag2", "tag3"] 1555 1556 """ 1557 kind = self.TAG_LEVEL 1558 1559 if kind in constants.VALID_TAG_TYPES: 1560 cl = self.GetClient() 1561 if kind == constants.TAG_CLUSTER: 1562 if self.name: 1563 raise http.HttpBadRequest("Can't specify a name" 1564 " for cluster tag request") 1565 tags = list(cl.QueryTags(kind, "")) 1566 else: 1567 if not self.name: 1568 raise http.HttpBadRequest("Missing name on tag request") 1569 tags = list(cl.QueryTags(kind, self.name)) 1570 1571 else: 1572 raise http.HttpBadRequest("Unhandled tag type!") 1573 1574 return list(tags)
1575
1576 - def GetPutOpInput(self):
1577 """Add a set of tags. 1578 1579 The request as a list of strings should be PUT to this URI. And 1580 you'll have back a job id. 1581 1582 """ 1583 return ({}, { 1584 "kind": self.TAG_LEVEL, 1585 "name": self.name, 1586 "tags": self.queryargs.get("tag", []), 1587 "dry_run": self.dryRun(), 1588 })
1589
1590 - def GetDeleteOpInput(self):
1591 """Delete a tag. 1592 1593 In order to delete a set of tags, the DELETE 1594 request should be addressed to URI like: 1595 /tags?tag=[tag]&tag=[tag] 1596 1597 """ 1598 # Re-use code 1599 return self.GetPutOpInput()
1600
1601 1602 -class R_2_instances_name_tags(_R_Tags):
1603 """ /2/instances/[instance_name]/tags resource. 1604 1605 Manages per-instance tags. 1606 1607 """ 1608 TAG_LEVEL = constants.TAG_INSTANCE
1609
1610 1611 -class R_2_nodes_name_tags(_R_Tags):
1612 """ /2/nodes/[node_name]/tags resource. 1613 1614 Manages per-node tags. 1615 1616 """ 1617 TAG_LEVEL = constants.TAG_NODE
1618
1619 1620 -class R_2_groups_name_tags(_R_Tags):
1621 """ /2/groups/[group_name]/tags resource. 1622 1623 Manages per-nodegroup tags. 1624 1625 """ 1626 TAG_LEVEL = constants.TAG_NODEGROUP
1627
1628 1629 -class R_2_networks_name_tags(_R_Tags):
1630 """ /2/networks/[network_name]/tags resource. 1631 1632 Manages per-network tags. 1633 1634 """ 1635 TAG_LEVEL = constants.TAG_NETWORK
1636
1637 1638 -class R_2_tags(_R_Tags):
1639 """ /2/tags resource. 1640 1641 Manages cluster tags. 1642 1643 """ 1644 TAG_LEVEL = constants.TAG_CLUSTER
1645