Package ganeti :: Package rapi :: Module rlib2
[hide private]
[frames] | no frames]

Source Code for Module ganeti.rapi.rlib2

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Remote API resource implementations. 
  32   
  33  PUT or POST? 
  34  ============ 
  35   
  36  According to RFC2616 the main difference between PUT and POST is that 
  37  POST can create new resources but PUT can only create the resource the 
  38  URI was pointing to on the PUT request. 
  39   
  40  In the context of this module POST on ``/2/instances`` to change an existing 
  41  entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a 
  42  new instance) with a name specified in the request. 
  43   
  44  Quoting from RFC2616, section 9.6:: 
  45   
  46    The fundamental difference between the POST and PUT requests is reflected in 
  47    the different meaning of the Request-URI. The URI in a POST request 
  48    identifies the resource that will handle the enclosed entity. That resource 
  49    might be a data-accepting process, a gateway to some other protocol, or a 
  50    separate entity that accepts annotations. In contrast, the URI in a PUT 
  51    request identifies the entity enclosed with the request -- the user agent 
  52    knows what URI is intended and the server MUST NOT attempt to apply the 
  53    request to some other resource. If the server desires that the request be 
  54    applied to a different URI, it MUST send a 301 (Moved Permanently) response; 
  55    the user agent MAY then make its own decision regarding whether or not to 
  56    redirect the request. 
  57   
  58  So when adding new methods, if they are operating on the URI entity itself, 
  59  PUT should be prefered over POST. 
  60   
  61  """ 
  62   
  63  # pylint: disable=C0103 
  64   
  65  # C0103: Invalid name, since the R_* names are not conforming 
  66   
  67  from ganeti import opcodes 
  68  from ganeti import objects 
  69  from ganeti import http 
  70  from ganeti import constants 
  71  from ganeti import cli 
  72  from ganeti import rapi 
  73  from ganeti import ht 
  74  from ganeti import compat 
  75  from ganeti import ssconf 
  76  from ganeti.rapi import baserlib 
  77   
  78   
  79  _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"] 
  80  I_FIELDS = ["name", "admin_state", "os", 
  81              "pnode", "snodes", 
  82              "disk_template", 
  83              "nic.ips", "nic.macs", "nic.modes", "nic.uuids", "nic.names", 
  84              "nic.links", "nic.networks", "nic.networks.names", "nic.bridges", 
  85              "network_port", 
  86              "disk.sizes", "disk.spindles", "disk_usage", "disk.uuids", 
  87              "disk.names", 
  88              "beparams", "hvparams", 
  89              "oper_state", "oper_ram", "oper_vcpus", "status", 
  90              "custom_hvparams", "custom_beparams", "custom_nicparams", 
  91              ] + _COMMON_FIELDS 
  92   
  93  N_FIELDS = ["name", "offline", "master_candidate", "drained", 
  94              "dtotal", "dfree", "sptotal", "spfree", 
  95              "mtotal", "mnode", "mfree", 
  96              "pinst_cnt", "sinst_cnt", 
  97              "ctotal", "cnos", "cnodes", "csockets", 
  98              "pip", "sip", "role", 
  99              "pinst_list", "sinst_list", 
 100              "master_capable", "vm_capable", 
 101              "ndparams", 
 102              "group.uuid", 
 103              ] + _COMMON_FIELDS 
 104   
 105  NET_FIELDS = ["name", "network", "gateway", 
 106                "network6", "gateway6", 
 107                "mac_prefix", 
 108                "free_count", "reserved_count", 
 109                "map", "group_list", "inst_list", 
 110                "external_reservations", 
 111                ] + _COMMON_FIELDS 
 112   
 113  G_FIELDS = [ 
 114    "alloc_policy", 
 115    "name", 
 116    "node_cnt", 
 117    "node_list", 
 118    "ipolicy", 
 119    "custom_ipolicy", 
 120    "diskparams", 
 121    "custom_diskparams", 
 122    "ndparams", 
 123    "custom_ndparams", 
 124    ] + _COMMON_FIELDS 
 125   
 126  J_FIELDS_BULK = [ 
 127    "id", "ops", "status", "summary", 
 128    "opstatus", 
 129    "received_ts", "start_ts", "end_ts", 
 130    ] 
 131   
 132  J_FIELDS = J_FIELDS_BULK + [ 
 133    "oplog", 
 134    "opresult", 
 135    ] 
 136   
 137  _NR_DRAINED = "drained" 
 138  _NR_MASTER_CANDIDATE = "master-candidate" 
 139  _NR_MASTER = "master" 
 140  _NR_OFFLINE = "offline" 
 141  _NR_REGULAR = "regular" 
 142   
 143  _NR_MAP = { 
 144    constants.NR_MASTER: _NR_MASTER, 
 145    constants.NR_MCANDIDATE: _NR_MASTER_CANDIDATE, 
 146    constants.NR_DRAINED: _NR_DRAINED, 
 147    constants.NR_OFFLINE: _NR_OFFLINE, 
 148    constants.NR_REGULAR: _NR_REGULAR, 
 149    } 
 150   
 151  assert frozenset(_NR_MAP.keys()) == constants.NR_ALL 
 152   
 153  # Request data version field 
 154  _REQ_DATA_VERSION = "__version__" 
 155   
 156  # Feature string for instance creation request data version 1 
 157  _INST_CREATE_REQV1 = "instance-create-reqv1" 
 158   
 159  # Feature string for instance reinstall request version 1 
 160  _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1" 
 161   
 162  # Feature string for node migration version 1 
 163  _NODE_MIGRATE_REQV1 = "node-migrate-reqv1" 
 164   
 165  # Feature string for node evacuation with LU-generated jobs 
 166  _NODE_EVAC_RES1 = "node-evac-res1" 
 167   
 168  ALL_FEATURES = compat.UniqueFrozenset([ 
 169    _INST_CREATE_REQV1, 
 170    _INST_REINSTALL_REQV1, 
 171    _NODE_MIGRATE_REQV1, 
 172    _NODE_EVAC_RES1, 
 173    ]) 
 174   
 175  # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change. 
 176  _WFJC_TIMEOUT = 10 
177 178 179 # FIXME: For compatibility we update the beparams/memory field. Needs to be 180 # removed in Ganeti 2.8 181 -def _UpdateBeparams(inst):
182 """Updates the beparams dict of inst to support the memory field. 183 184 @param inst: Inst dict 185 @return: Updated inst dict 186 187 """ 188 beparams = inst["beparams"] 189 beparams[constants.BE_MEMORY] = beparams[constants.BE_MAXMEM] 190 191 return inst
192
193 194 -class R_root(baserlib.ResourceBase):
195 """/ resource. 196 197 """ 198 @staticmethod
199 - def GET():
200 """Supported for legacy reasons. 201 202 """ 203 return None
204
205 206 -class R_2(R_root):
207 """/2 resource. 208 209 """
210
211 212 -class R_version(baserlib.ResourceBase):
213 """/version resource. 214 215 This resource should be used to determine the remote API version and 216 to adapt clients accordingly. 217 218 """ 219 @staticmethod
220 - def GET():
221 """Returns the remote API version. 222 223 """ 224 return constants.RAPI_VERSION
225
226 227 -class R_2_info(baserlib.OpcodeResource):
228 """/2/info resource. 229 230 """ 231 GET_OPCODE = opcodes.OpClusterQuery 232 GET_ALIASES = { 233 "volume_group_name": "vg_name", 234 "drbd_usermode_helper": "drbd_helper", 235 } 236
237 - def GET(self):
238 """Returns cluster information. 239 240 """ 241 client = self.GetClient(query=True) 242 return client.QueryClusterInfo()
243
244 245 -class R_2_features(baserlib.ResourceBase):
246 """/2/features resource. 247 248 """ 249 @staticmethod
250 - def GET():
251 """Returns list of optional RAPI features implemented. 252 253 """ 254 return list(ALL_FEATURES)
255
256 257 -class R_2_os(baserlib.OpcodeResource):
258 """/2/os resource. 259 260 """ 261 GET_OPCODE = opcodes.OpOsDiagnose 262
263 - def GET(self):
264 """Return a list of all OSes. 265 266 Can return error 500 in case of a problem. 267 268 Example: ["debian-etch"] 269 270 """ 271 cl = self.GetClient() 272 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[]) 273 job_id = self.SubmitJob([op], cl=cl) 274 # we use custom feedback function, instead of print we log the status 275 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn) 276 diagnose_data = result[0] 277 278 if not isinstance(diagnose_data, list): 279 raise http.HttpBadGateway(message="Can't get OS list") 280 281 os_names = [] 282 for (name, variants) in diagnose_data: 283 os_names.extend(cli.CalculateOSNames(name, variants)) 284 285 return os_names
286
287 288 -class R_2_redist_config(baserlib.OpcodeResource):
289 """/2/redistribute-config resource. 290 291 """ 292 PUT_OPCODE = opcodes.OpClusterRedistConf
293
294 295 -class R_2_cluster_modify(baserlib.OpcodeResource):
296 """/2/modify resource. 297 298 """ 299 PUT_OPCODE = opcodes.OpClusterSetParams
300
301 302 -class R_2_jobs(baserlib.ResourceBase):
303 """/2/jobs resource. 304 305 """
306 - def GET(self):
307 """Returns a dictionary of jobs. 308 309 @return: a dictionary with jobs id and uri. 310 311 """ 312 client = self.GetClient(query=True) 313 314 if self.useBulk(): 315 bulkdata = client.QueryJobs(None, J_FIELDS_BULK) 316 return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK) 317 else: 318 jobdata = map(compat.fst, client.QueryJobs(None, ["id"])) 319 return baserlib.BuildUriList(jobdata, "/2/jobs/%s", 320 uri_fields=("id", "uri"))
321
322 323 -class R_2_jobs_id(baserlib.ResourceBase):
324 """/2/jobs/[job_id] resource. 325 326 """
327 - def GET(self):
328 """Returns a job status. 329 330 @return: a dictionary with job parameters. 331 The result includes: 332 - id: job ID as a number 333 - status: current job status as a string 334 - ops: involved OpCodes as a list of dictionaries for each 335 opcodes in the job 336 - opstatus: OpCodes status as a list 337 - opresult: OpCodes results as a list of lists 338 339 """ 340 job_id = self.items[0] 341 result = self.GetClient(query=True).QueryJobs([job_id, ], J_FIELDS)[0] 342 if result is None: 343 raise http.HttpNotFound() 344 return baserlib.MapFields(J_FIELDS, result)
345
346 - def DELETE(self):
347 """Cancel not-yet-started job. 348 349 """ 350 job_id = self.items[0] 351 result = self.GetClient().CancelJob(job_id) 352 return result
353
354 355 -class R_2_jobs_id_wait(baserlib.ResourceBase):
356 """/2/jobs/[job_id]/wait resource. 357 358 """ 359 # WaitForJobChange provides access to sensitive information and blocks 360 # machine resources (it's a blocking RAPI call), hence restricting access. 361 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] 362
363 - def GET(self):
364 """Waits for job changes. 365 366 """ 367 job_id = self.items[0] 368 369 fields = self.getBodyParameter("fields") 370 prev_job_info = self.getBodyParameter("previous_job_info", None) 371 prev_log_serial = self.getBodyParameter("previous_log_serial", None) 372 373 if not isinstance(fields, list): 374 raise http.HttpBadRequest("The 'fields' parameter should be a list") 375 376 if not (prev_job_info is None or isinstance(prev_job_info, list)): 377 raise http.HttpBadRequest("The 'previous_job_info' parameter should" 378 " be a list") 379 380 if not (prev_log_serial is None or 381 isinstance(prev_log_serial, (int, long))): 382 raise http.HttpBadRequest("The 'previous_log_serial' parameter should" 383 " be a number") 384 385 client = self.GetClient() 386 result = client.WaitForJobChangeOnce(job_id, fields, 387 prev_job_info, prev_log_serial, 388 timeout=_WFJC_TIMEOUT) 389 if not result: 390 raise http.HttpNotFound() 391 392 if result == constants.JOB_NOTCHANGED: 393 # No changes 394 return None 395 396 (job_info, log_entries) = result 397 398 return { 399 "job_info": job_info, 400 "log_entries": log_entries, 401 }
402
403 404 -class R_2_nodes(baserlib.OpcodeResource):
405 """/2/nodes resource. 406 407 """ 408
409 - def GET(self):
410 """Returns a list of all nodes. 411 412 """ 413 client = self.GetClient(query=True) 414 415 if self.useBulk(): 416 bulkdata = client.QueryNodes([], N_FIELDS, False) 417 return baserlib.MapBulkFields(bulkdata, N_FIELDS) 418 else: 419 nodesdata = client.QueryNodes([], ["name"], False) 420 nodeslist = [row[0] for row in nodesdata] 421 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s", 422 uri_fields=("id", "uri"))
423
424 425 -class R_2_nodes_name(baserlib.OpcodeResource):
426 """/2/nodes/[node_name] resource. 427 428 """ 429 GET_ALIASES = { 430 "sip": "secondary_ip", 431 } 432
433 - def GET(self):
434 """Send information about a node. 435 436 """ 437 node_name = self.items[0] 438 client = self.GetClient(query=True) 439 440 result = baserlib.HandleItemQueryErrors(client.QueryNodes, 441 names=[node_name], fields=N_FIELDS, 442 use_locking=self.useLocking()) 443 444 return baserlib.MapFields(N_FIELDS, result[0])
445
446 447 -class R_2_nodes_name_powercycle(baserlib.OpcodeResource):
448 """/2/nodes/[node_name]/powercycle resource. 449 450 """ 451 POST_OPCODE = opcodes.OpNodePowercycle 452
453 - def GetPostOpInput(self):
454 """Tries to powercycle a node. 455 456 """ 457 return (self.request_body, { 458 "node_name": self.items[0], 459 "force": self.useForce(), 460 })
461
462 463 -class R_2_nodes_name_role(baserlib.OpcodeResource):
464 """/2/nodes/[node_name]/role resource. 465 466 """ 467 PUT_OPCODE = opcodes.OpNodeSetParams 468
469 - def GET(self):
470 """Returns the current node role. 471 472 @return: Node role 473 474 """ 475 node_name = self.items[0] 476 client = self.GetClient(query=True) 477 result = client.QueryNodes(names=[node_name], fields=["role"], 478 use_locking=self.useLocking()) 479 480 return _NR_MAP[result[0][0]]
481
482 - def GetPutOpInput(self):
483 """Sets the node role. 484 485 """ 486 baserlib.CheckType(self.request_body, basestring, "Body contents") 487 488 role = self.request_body 489 490 if role == _NR_REGULAR: 491 candidate = False 492 offline = False 493 drained = False 494 495 elif role == _NR_MASTER_CANDIDATE: 496 candidate = True 497 offline = drained = None 498 499 elif role == _NR_DRAINED: 500 drained = True 501 candidate = offline = None 502 503 elif role == _NR_OFFLINE: 504 offline = True 505 candidate = drained = None 506 507 else: 508 raise http.HttpBadRequest("Can't set '%s' role" % role) 509 510 assert len(self.items) == 1 511 512 return ({}, { 513 "node_name": self.items[0], 514 "master_candidate": candidate, 515 "offline": offline, 516 "drained": drained, 517 "force": self.useForce(), 518 "auto_promote": bool(self._checkIntVariable("auto-promote", default=0)), 519 })
520
521 522 -class R_2_nodes_name_evacuate(baserlib.OpcodeResource):
523 """/2/nodes/[node_name]/evacuate resource. 524 525 """ 526 POST_OPCODE = opcodes.OpNodeEvacuate 527
528 - def GetPostOpInput(self):
529 """Evacuate all instances off a node. 530 531 """ 532 return (self.request_body, { 533 "node_name": self.items[0], 534 "dry_run": self.dryRun(), 535 })
536
537 538 -class R_2_nodes_name_migrate(baserlib.OpcodeResource):
539 """/2/nodes/[node_name]/migrate resource. 540 541 """ 542 POST_OPCODE = opcodes.OpNodeMigrate 543
544 - def GetPostOpInput(self):
545 """Migrate all primary instances from a node. 546 547 """ 548 if self.queryargs: 549 # Support old-style requests 550 if "live" in self.queryargs and "mode" in self.queryargs: 551 raise http.HttpBadRequest("Only one of 'live' and 'mode' should" 552 " be passed") 553 554 if "live" in self.queryargs: 555 if self._checkIntVariable("live", default=1): 556 mode = constants.HT_MIGRATION_LIVE 557 else: 558 mode = constants.HT_MIGRATION_NONLIVE 559 else: 560 mode = self._checkStringVariable("mode", default=None) 561 562 data = { 563 "mode": mode, 564 } 565 else: 566 data = self.request_body 567 568 return (data, { 569 "node_name": self.items[0], 570 })
571
572 573 -class R_2_nodes_name_modify(baserlib.OpcodeResource):
574 """/2/nodes/[node_name]/modify resource. 575 576 """ 577 POST_OPCODE = opcodes.OpNodeSetParams 578
579 - def GetPostOpInput(self):
580 """Changes parameters of a node. 581 582 """ 583 assert len(self.items) == 1 584 585 return (self.request_body, { 586 "node_name": self.items[0], 587 })
588
589 590 -class R_2_nodes_name_storage(baserlib.OpcodeResource):
591 """/2/nodes/[node_name]/storage resource. 592 593 """ 594 # LUNodeQueryStorage acquires locks, hence restricting access to GET 595 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] 596 GET_OPCODE = opcodes.OpNodeQueryStorage 597
598 - def GetGetOpInput(self):
599 """List storage available on a node. 600 601 """ 602 storage_type = self._checkStringVariable("storage_type", None) 603 output_fields = self._checkStringVariable("output_fields", None) 604 605 if not output_fields: 606 raise http.HttpBadRequest("Missing the required 'output_fields'" 607 " parameter") 608 609 return ({}, { 610 "nodes": [self.items[0]], 611 "storage_type": storage_type, 612 "output_fields": output_fields.split(","), 613 })
614
615 616 -class R_2_nodes_name_storage_modify(baserlib.OpcodeResource):
617 """/2/nodes/[node_name]/storage/modify resource. 618 619 """ 620 PUT_OPCODE = opcodes.OpNodeModifyStorage 621
622 - def GetPutOpInput(self):
623 """Modifies a storage volume on a node. 624 625 """ 626 storage_type = self._checkStringVariable("storage_type", None) 627 name = self._checkStringVariable("name", None) 628 629 if not name: 630 raise http.HttpBadRequest("Missing the required 'name'" 631 " parameter") 632 633 changes = {} 634 635 if "allocatable" in self.queryargs: 636 changes[constants.SF_ALLOCATABLE] = \ 637 bool(self._checkIntVariable("allocatable", default=1)) 638 639 return ({}, { 640 "node_name": self.items[0], 641 "storage_type": storage_type, 642 "name": name, 643 "changes": changes, 644 })
645
646 647 -class R_2_nodes_name_storage_repair(baserlib.OpcodeResource):
648 """/2/nodes/[node_name]/storage/repair resource. 649 650 """ 651 PUT_OPCODE = opcodes.OpRepairNodeStorage 652
653 - def GetPutOpInput(self):
654 """Repairs a storage volume on a node. 655 656 """ 657 storage_type = self._checkStringVariable("storage_type", None) 658 name = self._checkStringVariable("name", None) 659 if not name: 660 raise http.HttpBadRequest("Missing the required 'name'" 661 " parameter") 662 663 return ({}, { 664 "node_name": self.items[0], 665 "storage_type": storage_type, 666 "name": name, 667 })
668
669 670 -class R_2_networks(baserlib.OpcodeResource):
671 """/2/networks resource. 672 673 """ 674 POST_OPCODE = opcodes.OpNetworkAdd 675 POST_RENAME = { 676 "name": "network_name", 677 } 678
679 - def GetPostOpInput(self):
680 """Create a network. 681 682 """ 683 assert not self.items 684 return (self.request_body, { 685 "dry_run": self.dryRun(), 686 })
687
688 - def GET(self):
689 """Returns a list of all networks. 690 691 """ 692 client = self.GetClient(query=True) 693 694 if self.useBulk(): 695 bulkdata = client.QueryNetworks([], NET_FIELDS, False) 696 return baserlib.MapBulkFields(bulkdata, NET_FIELDS) 697 else: 698 data = client.QueryNetworks([], ["name"], False) 699 networknames = [row[0] for row in data] 700 return baserlib.BuildUriList(networknames, "/2/networks/%s", 701 uri_fields=("name", "uri"))
702
703 704 -class R_2_networks_name(baserlib.OpcodeResource):
705 """/2/networks/[network_name] resource. 706 707 """ 708 DELETE_OPCODE = opcodes.OpNetworkRemove 709
710 - def GET(self):
711 """Send information about a network. 712 713 """ 714 network_name = self.items[0] 715 client = self.GetClient(query=True) 716 717 result = baserlib.HandleItemQueryErrors(client.QueryNetworks, 718 names=[network_name], 719 fields=NET_FIELDS, 720 use_locking=self.useLocking()) 721 722 return baserlib.MapFields(NET_FIELDS, result[0])
723
724 - def GetDeleteOpInput(self):
725 """Delete a network. 726 727 """ 728 assert len(self.items) == 1 729 return (self.request_body, { 730 "network_name": self.items[0], 731 "dry_run": self.dryRun(), 732 })
733
734 735 -class R_2_networks_name_connect(baserlib.OpcodeResource):
736 """/2/networks/[network_name]/connect resource. 737 738 """ 739 PUT_OPCODE = opcodes.OpNetworkConnect 740
741 - def GetPutOpInput(self):
742 """Changes some parameters of node group. 743 744 """ 745 assert self.items 746 return (self.request_body, { 747 "network_name": self.items[0], 748 "dry_run": self.dryRun(), 749 })
750
751 752 -class R_2_networks_name_disconnect(baserlib.OpcodeResource):
753 """/2/networks/[network_name]/disconnect resource. 754 755 """ 756 PUT_OPCODE = opcodes.OpNetworkDisconnect 757
758 - def GetPutOpInput(self):
759 """Changes some parameters of node group. 760 761 """ 762 assert self.items 763 return (self.request_body, { 764 "network_name": self.items[0], 765 "dry_run": self.dryRun(), 766 })
767
768 769 -class R_2_networks_name_modify(baserlib.OpcodeResource):
770 """/2/networks/[network_name]/modify resource. 771 772 """ 773 PUT_OPCODE = opcodes.OpNetworkSetParams 774
775 - def GetPutOpInput(self):
776 """Changes some parameters of network. 777 778 """ 779 assert self.items 780 return (self.request_body, { 781 "network_name": self.items[0], 782 })
783
784 785 -class R_2_groups(baserlib.OpcodeResource):
786 """/2/groups resource. 787 788 """ 789 POST_OPCODE = opcodes.OpGroupAdd 790 POST_RENAME = { 791 "name": "group_name", 792 } 793
794 - def GetPostOpInput(self):
795 """Create a node group. 796 797 798 """ 799 assert not self.items 800 return (self.request_body, { 801 "dry_run": self.dryRun(), 802 })
803
804 - def GET(self):
805 """Returns a list of all node groups. 806 807 """ 808 client = self.GetClient(query=True) 809 810 if self.useBulk(): 811 bulkdata = client.QueryGroups([], G_FIELDS, False) 812 return baserlib.MapBulkFields(bulkdata, G_FIELDS) 813 else: 814 data = client.QueryGroups([], ["name"], False) 815 groupnames = [row[0] for row in data] 816 return baserlib.BuildUriList(groupnames, "/2/groups/%s", 817 uri_fields=("name", "uri"))
818
819 820 -class R_2_groups_name(baserlib.OpcodeResource):
821 """/2/groups/[group_name] resource. 822 823 """ 824 DELETE_OPCODE = opcodes.OpGroupRemove 825
826 - def GET(self):
827 """Send information about a node group. 828 829 """ 830 group_name = self.items[0] 831 client = self.GetClient(query=True) 832 833 result = baserlib.HandleItemQueryErrors(client.QueryGroups, 834 names=[group_name], fields=G_FIELDS, 835 use_locking=self.useLocking()) 836 837 return baserlib.MapFields(G_FIELDS, result[0])
838
839 - def GetDeleteOpInput(self):
840 """Delete a node group. 841 842 """ 843 assert len(self.items) == 1 844 return ({}, { 845 "group_name": self.items[0], 846 "dry_run": self.dryRun(), 847 })
848
849 850 -class R_2_groups_name_modify(baserlib.OpcodeResource):
851 """/2/groups/[group_name]/modify resource. 852 853 """ 854 PUT_OPCODE = opcodes.OpGroupSetParams 855 PUT_RENAME = { 856 "custom_ndparams": "ndparams", 857 "custom_ipolicy": "ipolicy", 858 "custom_diskparams": "diskparams", 859 } 860
861 - def GetPutOpInput(self):
862 """Changes some parameters of node group. 863 864 """ 865 assert self.items 866 return (self.request_body, { 867 "group_name": self.items[0], 868 })
869
870 871 -class R_2_groups_name_rename(baserlib.OpcodeResource):
872 """/2/groups/[group_name]/rename resource. 873 874 """ 875 PUT_OPCODE = opcodes.OpGroupRename 876
877 - def GetPutOpInput(self):
878 """Changes the name of a node group. 879 880 """ 881 assert len(self.items) == 1 882 return (self.request_body, { 883 "group_name": self.items[0], 884 "dry_run": self.dryRun(), 885 })
886
887 888 -class R_2_groups_name_assign_nodes(baserlib.OpcodeResource):
889 """/2/groups/[group_name]/assign-nodes resource. 890 891 """ 892 PUT_OPCODE = opcodes.OpGroupAssignNodes 893
894 - def GetPutOpInput(self):
895 """Assigns nodes to a group. 896 897 """ 898 assert len(self.items) == 1 899 return (self.request_body, { 900 "group_name": self.items[0], 901 "dry_run": self.dryRun(), 902 "force": self.useForce(), 903 })
904
905 906 -def _ConvertUsbDevices(data):
907 """Convert in place the usb_devices string to the proper format. 908 909 In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from 910 comma to space because commas cannot be accepted on the command line 911 (they already act as the separator between different hvparams). RAPI 912 should be able to accept commas for backwards compatibility, but we want 913 it to also accept the new space separator. Therefore, we convert 914 spaces into commas here and keep the old parsing logic elsewhere. 915 916 """ 917 try: 918 hvparams = data["hvparams"] 919 usb_devices = hvparams[constants.HV_USB_DEVICES] 920 hvparams[constants.HV_USB_DEVICES] = usb_devices.replace(" ", ",") 921 data["hvparams"] = hvparams 922 except KeyError: 923 #No usb_devices, no modification required 924 pass
925
926 927 -class R_2_instances(baserlib.OpcodeResource):
928 """/2/instances resource. 929 930 """ 931 POST_OPCODE = opcodes.OpInstanceCreate 932 POST_RENAME = { 933 "os": "os_type", 934 "name": "instance_name", 935 } 936
937 - def GET(self):
938 """Returns a list of all available instances. 939 940 """ 941 client = self.GetClient(query=True) 942 943 use_locking = self.useLocking() 944 if self.useBulk(): 945 bulkdata = client.QueryInstances([], I_FIELDS, use_locking) 946 return map(_UpdateBeparams, baserlib.MapBulkFields(bulkdata, I_FIELDS)) 947 else: 948 instancesdata = client.QueryInstances([], ["name"], use_locking) 949 instanceslist = [row[0] for row in instancesdata] 950 return baserlib.BuildUriList(instanceslist, "/2/instances/%s", 951 uri_fields=("id", "uri"))
952
953 - def GetPostOpInput(self):
954 """Create an instance. 955 956 @return: a job id 957 958 """ 959 baserlib.CheckType(self.request_body, dict, "Body contents") 960 961 # Default to request data version 0 962 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0) 963 964 if data_version == 0: 965 raise http.HttpBadRequest("Instance creation request version 0 is no" 966 " longer supported") 967 elif data_version != 1: 968 raise http.HttpBadRequest("Unsupported request data version %s" % 969 data_version) 970 971 data = self.request_body.copy() 972 # Remove "__version__" 973 data.pop(_REQ_DATA_VERSION, None) 974 975 _ConvertUsbDevices(data) 976 977 return (data, { 978 "dry_run": self.dryRun(), 979 })
980
981 982 -class R_2_instances_multi_alloc(baserlib.OpcodeResource):
983 """/2/instances-multi-alloc resource. 984 985 """ 986 POST_OPCODE = opcodes.OpInstanceMultiAlloc 987
988 - def GetPostOpInput(self):
989 """Try to allocate multiple instances. 990 991 @return: A dict with submitted jobs, allocatable instances and failed 992 allocations 993 994 """ 995 if "instances" not in self.request_body: 996 raise http.HttpBadRequest("Request is missing required 'instances' field" 997 " in body") 998 999 # Unlike most other RAPI calls, this one is composed of individual opcodes, 1000 # and we have to do the filling ourselves 1001 OPCODE_RENAME = { 1002 "os": "os_type", 1003 "name": "instance_name", 1004 } 1005 1006 body = objects.FillDict(self.request_body, { 1007 "instances": [ 1008 baserlib.FillOpcode(opcodes.OpInstanceCreate, inst, {}, 1009 rename=OPCODE_RENAME) 1010 for inst in self.request_body["instances"] 1011 ], 1012 }) 1013 1014 return (body, { 1015 "dry_run": self.dryRun(), 1016 })
1017
1018 1019 -class R_2_instances_name(baserlib.OpcodeResource):
1020 """/2/instances/[instance_name] resource. 1021 1022 """ 1023 DELETE_OPCODE = opcodes.OpInstanceRemove 1024
1025 - def GET(self):
1026 """Send information about an instance. 1027 1028 """ 1029 client = self.GetClient(query=True) 1030 instance_name = self.items[0] 1031 1032 result = baserlib.HandleItemQueryErrors(client.QueryInstances, 1033 names=[instance_name], 1034 fields=I_FIELDS, 1035 use_locking=self.useLocking()) 1036 1037 return _UpdateBeparams(baserlib.MapFields(I_FIELDS, result[0]))
1038
1039 - def GetDeleteOpInput(self):
1040 """Delete an instance. 1041 1042 """ 1043 assert len(self.items) == 1 1044 return (self.request_body, { 1045 "instance_name": self.items[0], 1046 "ignore_failures": False, 1047 "dry_run": self.dryRun(), 1048 })
1049
1050 1051 -class R_2_instances_name_info(baserlib.OpcodeResource):
1052 """/2/instances/[instance_name]/info resource. 1053 1054 """ 1055 GET_OPCODE = opcodes.OpInstanceQueryData 1056
1057 - def GetGetOpInput(self):
1058 """Request detailed instance information. 1059 1060 """ 1061 assert len(self.items) == 1 1062 return ({}, { 1063 "instances": [self.items[0]], 1064 "static": bool(self._checkIntVariable("static", default=0)), 1065 })
1066
1067 1068 -class R_2_instances_name_reboot(baserlib.OpcodeResource):
1069 """/2/instances/[instance_name]/reboot resource. 1070 1071 Implements an instance reboot. 1072 1073 """ 1074 POST_OPCODE = opcodes.OpInstanceReboot 1075
1076 - def GetPostOpInput(self):
1077 """Reboot an instance. 1078 1079 The URI takes type=[hard|soft|full] and 1080 ignore_secondaries=[False|True] parameters. 1081 1082 """ 1083 return (self.request_body, { 1084 "instance_name": self.items[0], 1085 "reboot_type": 1086 self.queryargs.get("type", [constants.INSTANCE_REBOOT_HARD])[0], 1087 "ignore_secondaries": bool(self._checkIntVariable("ignore_secondaries")), 1088 "dry_run": self.dryRun(), 1089 })
1090
1091 1092 -class R_2_instances_name_startup(baserlib.OpcodeResource):
1093 """/2/instances/[instance_name]/startup resource. 1094 1095 Implements an instance startup. 1096 1097 """ 1098 PUT_OPCODE = opcodes.OpInstanceStartup 1099
1100 - def GetPutOpInput(self):
1101 """Startup an instance. 1102 1103 The URI takes force=[False|True] parameter to start the instance 1104 if even if secondary disks are failing. 1105 1106 """ 1107 return ({}, { 1108 "instance_name": self.items[0], 1109 "force": self.useForce(), 1110 "dry_run": self.dryRun(), 1111 "no_remember": bool(self._checkIntVariable("no_remember")), 1112 })
1113
1114 1115 -class R_2_instances_name_shutdown(baserlib.OpcodeResource):
1116 """/2/instances/[instance_name]/shutdown resource. 1117 1118 Implements an instance shutdown. 1119 1120 """ 1121 PUT_OPCODE = opcodes.OpInstanceShutdown 1122
1123 - def GetPutOpInput(self):
1124 """Shutdown an instance. 1125 1126 """ 1127 return (self.request_body, { 1128 "instance_name": self.items[0], 1129 "no_remember": bool(self._checkIntVariable("no_remember")), 1130 "dry_run": self.dryRun(), 1131 })
1132
1133 1134 -def _ParseInstanceReinstallRequest(name, data):
1135 """Parses a request for reinstalling an instance. 1136 1137 """ 1138 if not isinstance(data, dict): 1139 raise http.HttpBadRequest("Invalid body contents, not a dictionary") 1140 1141 ostype = baserlib.CheckParameter(data, "os", default=None) 1142 start = baserlib.CheckParameter(data, "start", exptype=bool, 1143 default=True) 1144 osparams = baserlib.CheckParameter(data, "osparams", default=None) 1145 1146 ops = [ 1147 opcodes.OpInstanceShutdown(instance_name=name), 1148 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype, 1149 osparams=osparams), 1150 ] 1151 1152 if start: 1153 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False)) 1154 1155 return ops
1156
1157 1158 -class R_2_instances_name_reinstall(baserlib.OpcodeResource):
1159 """/2/instances/[instance_name]/reinstall resource. 1160 1161 Implements an instance reinstall. 1162 1163 """ 1164 POST_OPCODE = opcodes.OpInstanceReinstall 1165
1166 - def POST(self):
1167 """Reinstall an instance. 1168 1169 The URI takes os=name and nostartup=[0|1] optional 1170 parameters. By default, the instance will be started 1171 automatically. 1172 1173 """ 1174 if self.request_body: 1175 if self.queryargs: 1176 raise http.HttpBadRequest("Can't combine query and body parameters") 1177 1178 body = self.request_body 1179 elif self.queryargs: 1180 # Legacy interface, do not modify/extend 1181 body = { 1182 "os": self._checkStringVariable("os"), 1183 "start": not self._checkIntVariable("nostartup"), 1184 } 1185 else: 1186 body = {} 1187 1188 ops = _ParseInstanceReinstallRequest(self.items[0], body) 1189 1190 return self.SubmitJob(ops)
1191
1192 1193 -class R_2_instances_name_replace_disks(baserlib.OpcodeResource):
1194 """/2/instances/[instance_name]/replace-disks resource. 1195 1196 """ 1197 POST_OPCODE = opcodes.OpInstanceReplaceDisks 1198
1199 - def GetPostOpInput(self):
1200 """Replaces disks on an instance. 1201 1202 """ 1203 static = { 1204 "instance_name": self.items[0], 1205 } 1206 1207 if self.request_body: 1208 data = self.request_body 1209 elif self.queryargs: 1210 # Legacy interface, do not modify/extend 1211 data = { 1212 "remote_node": self._checkStringVariable("remote_node", default=None), 1213 "mode": self._checkStringVariable("mode", default=None), 1214 "disks": self._checkStringVariable("disks", default=None), 1215 "iallocator": self._checkStringVariable("iallocator", default=None), 1216 } 1217 else: 1218 data = {} 1219 1220 # Parse disks 1221 try: 1222 raw_disks = data.pop("disks") 1223 except KeyError: 1224 pass 1225 else: 1226 if raw_disks: 1227 if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102 1228 data["disks"] = raw_disks 1229 else: 1230 # Backwards compatibility for strings of the format "1, 2, 3" 1231 try: 1232 data["disks"] = [int(part) for part in raw_disks.split(",")] 1233 except (TypeError, ValueError), err: 1234 raise http.HttpBadRequest("Invalid disk index passed: %s" % err) 1235 1236 return (data, static)
1237
1238 1239 -class R_2_instances_name_activate_disks(baserlib.OpcodeResource):
1240 """/2/instances/[instance_name]/activate-disks resource. 1241 1242 """ 1243 PUT_OPCODE = opcodes.OpInstanceActivateDisks 1244
1245 - def GetPutOpInput(self):
1246 """Activate disks for an instance. 1247 1248 The URI might contain ignore_size to ignore current recorded size. 1249 1250 """ 1251 return ({}, { 1252 "instance_name": self.items[0], 1253 "ignore_size": bool(self._checkIntVariable("ignore_size")), 1254 })
1255
1256 1257 -class R_2_instances_name_deactivate_disks(baserlib.OpcodeResource):
1258 """/2/instances/[instance_name]/deactivate-disks resource. 1259 1260 """ 1261 PUT_OPCODE = opcodes.OpInstanceDeactivateDisks 1262
1263 - def GetPutOpInput(self):
1264 """Deactivate disks for an instance. 1265 1266 """ 1267 return ({}, { 1268 "instance_name": self.items[0], 1269 })
1270
1271 1272 -class R_2_instances_name_recreate_disks(baserlib.OpcodeResource):
1273 """/2/instances/[instance_name]/recreate-disks resource. 1274 1275 """ 1276 POST_OPCODE = opcodes.OpInstanceRecreateDisks 1277
1278 - def GetPostOpInput(self):
1279 """Recreate disks for an instance. 1280 1281 """ 1282 return ({}, { 1283 "instance_name": self.items[0], 1284 })
1285
1286 1287 -class R_2_instances_name_prepare_export(baserlib.OpcodeResource):
1288 """/2/instances/[instance_name]/prepare-export resource. 1289 1290 """ 1291 PUT_OPCODE = opcodes.OpBackupPrepare 1292
1293 - def GetPutOpInput(self):
1294 """Prepares an export for an instance. 1295 1296 """ 1297 return ({}, { 1298 "instance_name": self.items[0], 1299 "mode": self._checkStringVariable("mode"), 1300 })
1301
1302 1303 -class R_2_instances_name_export(baserlib.OpcodeResource):
1304 """/2/instances/[instance_name]/export resource. 1305 1306 """ 1307 PUT_OPCODE = opcodes.OpBackupExport 1308 PUT_RENAME = { 1309 "destination": "target_node", 1310 } 1311
1312 - def GetPutOpInput(self):
1313 """Exports an instance. 1314 1315 """ 1316 return (self.request_body, { 1317 "instance_name": self.items[0], 1318 })
1319
1320 1321 -class R_2_instances_name_migrate(baserlib.OpcodeResource):
1322 """/2/instances/[instance_name]/migrate resource. 1323 1324 """ 1325 PUT_OPCODE = opcodes.OpInstanceMigrate 1326
1327 - def GetPutOpInput(self):
1328 """Migrates an instance. 1329 1330 """ 1331 return (self.request_body, { 1332 "instance_name": self.items[0], 1333 })
1334
1335 1336 -class R_2_instances_name_failover(baserlib.OpcodeResource):
1337 """/2/instances/[instance_name]/failover resource. 1338 1339 """ 1340 PUT_OPCODE = opcodes.OpInstanceFailover 1341
1342 - def GetPutOpInput(self):
1343 """Does a failover of an instance. 1344 1345 """ 1346 return (self.request_body, { 1347 "instance_name": self.items[0], 1348 })
1349
1350 1351 -class R_2_instances_name_rename(baserlib.OpcodeResource):
1352 """/2/instances/[instance_name]/rename resource. 1353 1354 """ 1355 PUT_OPCODE = opcodes.OpInstanceRename 1356
1357 - def GetPutOpInput(self):
1358 """Changes the name of an instance. 1359 1360 """ 1361 return (self.request_body, { 1362 "instance_name": self.items[0], 1363 })
1364
1365 1366 -class R_2_instances_name_modify(baserlib.OpcodeResource):
1367 """/2/instances/[instance_name]/modify resource. 1368 1369 """ 1370 PUT_OPCODE = opcodes.OpInstanceSetParams 1371 PUT_RENAME = { 1372 "custom_beparams": "beparams", 1373 "custom_hvparams": "hvparams", 1374 } 1375
1376 - def GetPutOpInput(self):
1377 """Changes parameters of an instance. 1378 1379 """ 1380 data = self.request_body.copy() 1381 _ConvertUsbDevices(data) 1382 1383 return (data, { 1384 "instance_name": self.items[0], 1385 })
1386
1387 1388 -class R_2_instances_name_disk_grow(baserlib.OpcodeResource):
1389 """/2/instances/[instance_name]/disk/[disk_index]/grow resource. 1390 1391 """ 1392 POST_OPCODE = opcodes.OpInstanceGrowDisk 1393
1394 - def GetPostOpInput(self):
1395 """Increases the size of an instance disk. 1396 1397 """ 1398 return (self.request_body, { 1399 "instance_name": self.items[0], 1400 "disk": int(self.items[1]), 1401 })
1402
1403 1404 -class R_2_instances_name_console(baserlib.ResourceBase):
1405 """/2/instances/[instance_name]/console resource. 1406 1407 """ 1408 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ] 1409 GET_OPCODE = opcodes.OpInstanceConsole 1410
1411 - def GET(self):
1412 """Request information for connecting to instance's console. 1413 1414 @return: Serialized instance console description, see 1415 L{objects.InstanceConsole} 1416 1417 """ 1418 instance_name = self.items[0] 1419 client = self.GetClient(query=True) 1420 1421 ((console, oper_state), ) = \ 1422 client.QueryInstances([instance_name], ["console", "oper_state"], False) 1423 1424 if not oper_state: 1425 raise http.HttpServiceUnavailable("Instance console unavailable") 1426 1427 assert isinstance(console, dict) 1428 return console
1429
1430 1431 -def _GetQueryFields(args):
1432 """Tries to extract C{fields} query parameter. 1433 1434 @type args: dictionary 1435 @rtype: list of string 1436 @raise http.HttpBadRequest: When parameter can't be found 1437 1438 """ 1439 try: 1440 fields = args["fields"] 1441 except KeyError: 1442 raise http.HttpBadRequest("Missing 'fields' query argument") 1443 1444 return _SplitQueryFields(fields[0])
1445
1446 1447 -def _SplitQueryFields(fields):
1448 """Splits fields as given for a query request. 1449 1450 @type fields: string 1451 @rtype: list of string 1452 1453 """ 1454 return [i.strip() for i in fields.split(",")]
1455
1456 1457 -class R_2_query(baserlib.ResourceBase):
1458 """/2/query/[resource] resource. 1459 1460 """ 1461 # Results might contain sensitive information 1462 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ] 1463 PUT_ACCESS = GET_ACCESS 1464 GET_OPCODE = opcodes.OpQuery 1465 PUT_OPCODE = opcodes.OpQuery 1466
1467 - def _Query(self, fields, qfilter):
1468 client = self.GetClient() 1469 return client.Query(self.items[0], fields, qfilter).ToDict()
1470
1471 - def GET(self):
1472 """Returns resource information. 1473 1474 @return: Query result, see L{objects.QueryResponse} 1475 1476 """ 1477 return self._Query(_GetQueryFields(self.queryargs), None)
1478
1479 - def PUT(self):
1480 """Submits job querying for resources. 1481 1482 @return: Query result, see L{objects.QueryResponse} 1483 1484 """ 1485 body = self.request_body 1486 1487 baserlib.CheckType(body, dict, "Body contents") 1488 1489 try: 1490 fields = body["fields"] 1491 except KeyError: 1492 fields = _GetQueryFields(self.queryargs) 1493 1494 qfilter = body.get("qfilter", None) 1495 # TODO: remove this after 2.7 1496 if qfilter is None: 1497 qfilter = body.get("filter", None) 1498 1499 return self._Query(fields, qfilter)
1500
1501 1502 -class R_2_query_fields(baserlib.ResourceBase):
1503 """/2/query/[resource]/fields resource. 1504 1505 """ 1506 GET_OPCODE = opcodes.OpQueryFields 1507
1508 - def GET(self):
1509 """Retrieves list of available fields for a resource. 1510 1511 @return: List of serialized L{objects.QueryFieldDefinition} 1512 1513 """ 1514 try: 1515 raw_fields = self.queryargs["fields"] 1516 except KeyError: 1517 fields = None 1518 else: 1519 fields = _SplitQueryFields(raw_fields[0]) 1520 1521 return self.GetClient().QueryFields(self.items[0], fields).ToDict()
1522
1523 1524 -class _R_Tags(baserlib.OpcodeResource):
1525 """Quasiclass for tagging resources. 1526 1527 Manages tags. When inheriting this class you must define the 1528 TAG_LEVEL for it. 1529 1530 """ 1531 TAG_LEVEL = None 1532 GET_OPCODE = opcodes.OpTagsGet 1533 PUT_OPCODE = opcodes.OpTagsSet 1534 DELETE_OPCODE = opcodes.OpTagsDel 1535
1536 - def __init__(self, items, queryargs, req, **kwargs):
1537 """A tag resource constructor. 1538 1539 We have to override the default to sort out cluster naming case. 1540 1541 """ 1542 baserlib.OpcodeResource.__init__(self, items, queryargs, req, **kwargs) 1543 1544 if self.TAG_LEVEL == constants.TAG_CLUSTER: 1545 self.name = None 1546 else: 1547 self.name = items[0]
1548
1549 - def GET(self):
1550 """Returns a list of tags. 1551 1552 Example: ["tag1", "tag2", "tag3"] 1553 1554 """ 1555 kind = self.TAG_LEVEL 1556 1557 if kind in (constants.TAG_INSTANCE, 1558 constants.TAG_NODEGROUP, 1559 constants.TAG_NODE, 1560 constants.TAG_NETWORK): 1561 if not self.name: 1562 raise http.HttpBadRequest("Missing name on tag request") 1563 1564 cl = self.GetClient(query=True) 1565 tags = list(cl.QueryTags(kind, self.name)) 1566 1567 elif kind == constants.TAG_CLUSTER: 1568 assert not self.name 1569 # TODO: Use query API? 1570 ssc = ssconf.SimpleStore() 1571 tags = ssc.GetClusterTags() 1572 1573 else: 1574 raise http.HttpBadRequest("Unhandled tag type!") 1575 1576 return list(tags)
1577
1578 - def GetPutOpInput(self):
1579 """Add a set of tags. 1580 1581 The request as a list of strings should be PUT to this URI. And 1582 you'll have back a job id. 1583 1584 """ 1585 return ({}, { 1586 "kind": self.TAG_LEVEL, 1587 "name": self.name, 1588 "tags": self.queryargs.get("tag", []), 1589 "dry_run": self.dryRun(), 1590 })
1591
1592 - def GetDeleteOpInput(self):
1593 """Delete a tag. 1594 1595 In order to delete a set of tags, the DELETE 1596 request should be addressed to URI like: 1597 /tags?tag=[tag]&tag=[tag] 1598 1599 """ 1600 # Re-use code 1601 return self.GetPutOpInput()
1602
1603 1604 -class R_2_instances_name_tags(_R_Tags):
1605 """ /2/instances/[instance_name]/tags resource. 1606 1607 Manages per-instance tags. 1608 1609 """ 1610 TAG_LEVEL = constants.TAG_INSTANCE
1611
1612 1613 -class R_2_nodes_name_tags(_R_Tags):
1614 """ /2/nodes/[node_name]/tags resource. 1615 1616 Manages per-node tags. 1617 1618 """ 1619 TAG_LEVEL = constants.TAG_NODE
1620
1621 1622 -class R_2_groups_name_tags(_R_Tags):
1623 """ /2/groups/[group_name]/tags resource. 1624 1625 Manages per-nodegroup tags. 1626 1627 """ 1628 TAG_LEVEL = constants.TAG_NODEGROUP
1629
1630 1631 -class R_2_networks_name_tags(_R_Tags):
1632 """ /2/networks/[network_name]/tags resource. 1633 1634 Manages per-network tags. 1635 1636 """ 1637 TAG_LEVEL = constants.TAG_NETWORK
1638
1639 1640 -class R_2_tags(_R_Tags):
1641 """ /2/tags resource. 1642 1643 Manages cluster tags. 1644 1645 """ 1646 TAG_LEVEL = constants.TAG_CLUSTER
1647