Package ganeti :: Package rapi :: Module rlib2
[hide private]
[frames] | no frames]

Source Code for Module ganeti.rapi.rlib2

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Remote API resource implementations. 
  32   
  33  PUT or POST? 
  34  ============ 
  35   
  36  According to RFC2616 the main difference between PUT and POST is that 
  37  POST can create new resources but PUT can only create the resource the 
  38  URI was pointing to on the PUT request. 
  39   
  40  In the context of this module POST on ``/2/instances`` to change an existing 
  41  entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a 
  42  new instance) with a name specified in the request. 
  43   
  44  Quoting from RFC2616, section 9.6:: 
  45   
  46    The fundamental difference between the POST and PUT requests is reflected in 
  47    the different meaning of the Request-URI. The URI in a POST request 
  48    identifies the resource that will handle the enclosed entity. That resource 
  49    might be a data-accepting process, a gateway to some other protocol, or a 
  50    separate entity that accepts annotations. In contrast, the URI in a PUT 
  51    request identifies the entity enclosed with the request -- the user agent 
  52    knows what URI is intended and the server MUST NOT attempt to apply the 
  53    request to some other resource. If the server desires that the request be 
  54    applied to a different URI, it MUST send a 301 (Moved Permanently) response; 
  55    the user agent MAY then make its own decision regarding whether or not to 
  56    redirect the request. 
  57   
  58  So when adding new methods, if they are operating on the URI entity itself, 
  59  PUT should be prefered over POST. 
  60   
  61  """ 
  62   
  63  # pylint: disable=C0103 
  64   
  65  # C0103: Invalid name, since the R_* names are not conforming 
  66   
  67  from ganeti import opcodes 
  68  from ganeti import objects 
  69  from ganeti import http 
  70  from ganeti import constants 
  71  from ganeti import cli 
  72  from ganeti import rapi 
  73  from ganeti import ht 
  74  from ganeti import compat 
  75  from ganeti import ssconf 
  76  from ganeti.rapi import baserlib 
  77   
  78   
  79  _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"] 
  80  I_FIELDS = ["name", "admin_state", "os", 
  81              "pnode", "snodes", 
  82              "disk_template", 
  83              "nic.ips", "nic.macs", "nic.modes", "nic.uuids", "nic.names", 
  84              "nic.links", "nic.networks", "nic.networks.names", "nic.bridges", 
  85              "network_port", 
  86              "disk.sizes", "disk.spindles", "disk_usage", "disk.uuids", 
  87              "disk.names", 
  88              "beparams", "hvparams", 
  89              "oper_state", "oper_ram", "oper_vcpus", "status", 
  90              "custom_hvparams", "custom_beparams", "custom_nicparams", 
  91              ] + _COMMON_FIELDS 
  92   
  93  N_FIELDS = ["name", "offline", "master_candidate", "drained", 
  94              "dtotal", "dfree", "sptotal", "spfree", 
  95              "mtotal", "mnode", "mfree", 
  96              "pinst_cnt", "sinst_cnt", 
  97              "ctotal", "cnos", "cnodes", "csockets", 
  98              "pip", "sip", "role", 
  99              "pinst_list", "sinst_list", 
 100              "master_capable", "vm_capable", 
 101              "ndparams", 
 102              "group.uuid", 
 103              ] + _COMMON_FIELDS 
 104   
 105  NET_FIELDS = ["name", "network", "gateway", 
 106                "network6", "gateway6", 
 107                "mac_prefix", 
 108                "free_count", "reserved_count", 
 109                "map", "group_list", "inst_list", 
 110                "external_reservations", 
 111                ] + _COMMON_FIELDS 
 112   
 113  G_FIELDS = [ 
 114    "alloc_policy", 
 115    "name", 
 116    "node_cnt", 
 117    "node_list", 
 118    "ipolicy", 
 119    "custom_ipolicy", 
 120    "diskparams", 
 121    "custom_diskparams", 
 122    "ndparams", 
 123    "custom_ndparams", 
 124    ] + _COMMON_FIELDS 
 125   
 126  J_FIELDS_BULK = [ 
 127    "id", "ops", "status", "summary", 
 128    "opstatus", 
 129    "received_ts", "start_ts", "end_ts", 
 130    ] 
 131   
 132  J_FIELDS = J_FIELDS_BULK + [ 
 133    "oplog", 
 134    "opresult", 
 135    ] 
 136   
 137  _NR_DRAINED = "drained" 
 138  _NR_MASTER_CANDIDATE = "master-candidate" 
 139  _NR_MASTER = "master" 
 140  _NR_OFFLINE = "offline" 
 141  _NR_REGULAR = "regular" 
 142   
 143  _NR_MAP = { 
 144    constants.NR_MASTER: _NR_MASTER, 
 145    constants.NR_MCANDIDATE: _NR_MASTER_CANDIDATE, 
 146    constants.NR_DRAINED: _NR_DRAINED, 
 147    constants.NR_OFFLINE: _NR_OFFLINE, 
 148    constants.NR_REGULAR: _NR_REGULAR, 
 149    } 
 150   
 151  assert frozenset(_NR_MAP.keys()) == constants.NR_ALL 
 152   
 153  # Request data version field 
 154  _REQ_DATA_VERSION = "__version__" 
 155   
 156  # Feature string for instance creation request data version 1 
 157  _INST_CREATE_REQV1 = "instance-create-reqv1" 
 158   
 159  # Feature string for instance reinstall request version 1 
 160  _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1" 
 161   
 162  # Feature string for node migration version 1 
 163  _NODE_MIGRATE_REQV1 = "node-migrate-reqv1" 
 164   
 165  # Feature string for node evacuation with LU-generated jobs 
 166  _NODE_EVAC_RES1 = "node-evac-res1" 
 167   
 168  ALL_FEATURES = compat.UniqueFrozenset([ 
 169    _INST_CREATE_REQV1, 
 170    _INST_REINSTALL_REQV1, 
 171    _NODE_MIGRATE_REQV1, 
 172    _NODE_EVAC_RES1, 
 173    ]) 
 174   
 175  # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change. 
 176  _WFJC_TIMEOUT = 10 
177 178 179 # FIXME: For compatibility we update the beparams/memory field. Needs to be 180 # removed in Ganeti 2.8 181 -def _UpdateBeparams(inst):
182 """Updates the beparams dict of inst to support the memory field. 183 184 @param inst: Inst dict 185 @return: Updated inst dict 186 187 """ 188 beparams = inst["beparams"] 189 beparams[constants.BE_MEMORY] = beparams[constants.BE_MAXMEM] 190 191 return inst
192
193 194 -class R_root(baserlib.ResourceBase):
195 """/ resource. 196 197 """ 198 @staticmethod
199 - def GET():
200 """Supported for legacy reasons. 201 202 """ 203 return None
204
205 206 -class R_2(R_root):
207 """/2 resource. 208 209 """
210
211 212 -class R_version(baserlib.ResourceBase):
213 """/version resource. 214 215 This resource should be used to determine the remote API version and 216 to adapt clients accordingly. 217 218 """ 219 @staticmethod
220 - def GET():
221 """Returns the remote API version. 222 223 """ 224 return constants.RAPI_VERSION
225
226 227 -class R_2_info(baserlib.OpcodeResource):
228 """/2/info resource. 229 230 """ 231 GET_OPCODE = opcodes.OpClusterQuery 232 GET_ALIASES = { 233 "volume_group_name": "vg_name", 234 "drbd_usermode_helper": "drbd_helper", 235 } 236
237 - def GET(self):
238 """Returns cluster information. 239 240 """ 241 client = self.GetClient(query=True) 242 return client.QueryClusterInfo()
243
244 245 -class R_2_features(baserlib.ResourceBase):
246 """/2/features resource. 247 248 """ 249 @staticmethod
250 - def GET():
251 """Returns list of optional RAPI features implemented. 252 253 """ 254 return list(ALL_FEATURES)
255
256 257 -class R_2_os(baserlib.OpcodeResource):
258 """/2/os resource. 259 260 """ 261 GET_OPCODE = opcodes.OpOsDiagnose 262
263 - def GET(self):
264 """Return a list of all OSes. 265 266 Can return error 500 in case of a problem. 267 268 Example: ["debian-etch"] 269 270 """ 271 cl = self.GetClient() 272 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[]) 273 job_id = self.SubmitJob([op], cl=cl) 274 # we use custom feedback function, instead of print we log the status 275 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn) 276 diagnose_data = result[0] 277 278 if not isinstance(diagnose_data, list): 279 raise http.HttpBadGateway(message="Can't get OS list") 280 281 os_names = [] 282 for (name, variants) in diagnose_data: 283 os_names.extend(cli.CalculateOSNames(name, variants)) 284 285 return os_names
286
287 288 -class R_2_redist_config(baserlib.OpcodeResource):
289 """/2/redistribute-config resource. 290 291 """ 292 PUT_OPCODE = opcodes.OpClusterRedistConf
293
294 295 -class R_2_cluster_modify(baserlib.OpcodeResource):
296 """/2/modify resource. 297 298 """ 299 PUT_OPCODE = opcodes.OpClusterSetParams
300
301 302 -class R_2_jobs(baserlib.ResourceBase):
303 """/2/jobs resource. 304 305 """
306 - def GET(self):
307 """Returns a dictionary of jobs. 308 309 @return: a dictionary with jobs id and uri. 310 311 """ 312 client = self.GetClient(query=True) 313 314 if self.useBulk(): 315 bulkdata = client.QueryJobs(None, J_FIELDS_BULK) 316 return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK) 317 else: 318 jobdata = map(compat.fst, client.QueryJobs(None, ["id"])) 319 return baserlib.BuildUriList(jobdata, "/2/jobs/%s", 320 uri_fields=("id", "uri"))
321
322 323 -class R_2_jobs_id(baserlib.ResourceBase):
324 """/2/jobs/[job_id] resource. 325 326 """
327 - def GET(self):
328 """Returns a job status. 329 330 @return: a dictionary with job parameters. 331 The result includes: 332 - id: job ID as a number 333 - status: current job status as a string 334 - ops: involved OpCodes as a list of dictionaries for each 335 opcodes in the job 336 - opstatus: OpCodes status as a list 337 - opresult: OpCodes results as a list of lists 338 339 """ 340 job_id = self.items[0] 341 result = self.GetClient(query=True).QueryJobs([job_id, ], J_FIELDS)[0] 342 if result is None: 343 raise http.HttpNotFound() 344 return baserlib.MapFields(J_FIELDS, result)
345
346 - def DELETE(self):
347 """Cancel not-yet-started job. 348 349 """ 350 job_id = self.items[0] 351 result = self.GetClient().CancelJob(job_id) 352 return result
353
354 355 -class R_2_jobs_id_wait(baserlib.ResourceBase):
356 """/2/jobs/[job_id]/wait resource. 357 358 """ 359 # WaitForJobChange provides access to sensitive information and blocks 360 # machine resources (it's a blocking RAPI call), hence restricting access. 361 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] 362
363 - def GET(self):
364 """Waits for job changes. 365 366 """ 367 job_id = self.items[0] 368 369 fields = self.getBodyParameter("fields") 370 prev_job_info = self.getBodyParameter("previous_job_info", None) 371 prev_log_serial = self.getBodyParameter("previous_log_serial", None) 372 373 if not isinstance(fields, list): 374 raise http.HttpBadRequest("The 'fields' parameter should be a list") 375 376 if not (prev_job_info is None or isinstance(prev_job_info, list)): 377 raise http.HttpBadRequest("The 'previous_job_info' parameter should" 378 " be a list") 379 380 if not (prev_log_serial is None or 381 isinstance(prev_log_serial, (int, long))): 382 raise http.HttpBadRequest("The 'previous_log_serial' parameter should" 383 " be a number") 384 385 client = self.GetClient() 386 result = client.WaitForJobChangeOnce(job_id, fields, 387 prev_job_info, prev_log_serial, 388 timeout=_WFJC_TIMEOUT) 389 if not result: 390 raise http.HttpNotFound() 391 392 if result == constants.JOB_NOTCHANGED: 393 # No changes 394 return None 395 396 (job_info, log_entries) = result 397 398 return { 399 "job_info": job_info, 400 "log_entries": log_entries, 401 }
402
403 404 -class R_2_nodes(baserlib.OpcodeResource):
405 """/2/nodes resource. 406 407 """ 408 GET_OPCODE = opcodes.OpNodeQuery 409
410 - def GET(self):
411 """Returns a list of all nodes. 412 413 """ 414 client = self.GetClient(query=True) 415 416 if self.useBulk(): 417 bulkdata = client.QueryNodes([], N_FIELDS, False) 418 return baserlib.MapBulkFields(bulkdata, N_FIELDS) 419 else: 420 nodesdata = client.QueryNodes([], ["name"], False) 421 nodeslist = [row[0] for row in nodesdata] 422 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s", 423 uri_fields=("id", "uri"))
424
425 426 -class R_2_nodes_name(baserlib.OpcodeResource):
427 """/2/nodes/[node_name] resource. 428 429 """ 430 GET_OPCODE = opcodes.OpNodeQuery 431 GET_ALIASES = { 432 "sip": "secondary_ip", 433 } 434
435 - def GET(self):
436 """Send information about a node. 437 438 """ 439 node_name = self.items[0] 440 client = self.GetClient(query=True) 441 442 result = baserlib.HandleItemQueryErrors(client.QueryNodes, 443 names=[node_name], fields=N_FIELDS, 444 use_locking=self.useLocking()) 445 446 return baserlib.MapFields(N_FIELDS, result[0])
447
448 449 -class R_2_nodes_name_powercycle(baserlib.OpcodeResource):
450 """/2/nodes/[node_name]/powercycle resource. 451 452 """ 453 POST_OPCODE = opcodes.OpNodePowercycle 454
455 - def GetPostOpInput(self):
456 """Tries to powercycle a node. 457 458 """ 459 return (self.request_body, { 460 "node_name": self.items[0], 461 "force": self.useForce(), 462 })
463
464 465 -class R_2_nodes_name_role(baserlib.OpcodeResource):
466 """/2/nodes/[node_name]/role resource. 467 468 """ 469 PUT_OPCODE = opcodes.OpNodeSetParams 470
471 - def GET(self):
472 """Returns the current node role. 473 474 @return: Node role 475 476 """ 477 node_name = self.items[0] 478 client = self.GetClient(query=True) 479 result = client.QueryNodes(names=[node_name], fields=["role"], 480 use_locking=self.useLocking()) 481 482 return _NR_MAP[result[0][0]]
483
484 - def GetPutOpInput(self):
485 """Sets the node role. 486 487 """ 488 baserlib.CheckType(self.request_body, basestring, "Body contents") 489 490 role = self.request_body 491 492 if role == _NR_REGULAR: 493 candidate = False 494 offline = False 495 drained = False 496 497 elif role == _NR_MASTER_CANDIDATE: 498 candidate = True 499 offline = drained = None 500 501 elif role == _NR_DRAINED: 502 drained = True 503 candidate = offline = None 504 505 elif role == _NR_OFFLINE: 506 offline = True 507 candidate = drained = None 508 509 else: 510 raise http.HttpBadRequest("Can't set '%s' role" % role) 511 512 assert len(self.items) == 1 513 514 return ({}, { 515 "node_name": self.items[0], 516 "master_candidate": candidate, 517 "offline": offline, 518 "drained": drained, 519 "force": self.useForce(), 520 "auto_promote": bool(self._checkIntVariable("auto-promote", default=0)), 521 })
522
523 524 -class R_2_nodes_name_evacuate(baserlib.OpcodeResource):
525 """/2/nodes/[node_name]/evacuate resource. 526 527 """ 528 POST_OPCODE = opcodes.OpNodeEvacuate 529
530 - def GetPostOpInput(self):
531 """Evacuate all instances off a node. 532 533 """ 534 return (self.request_body, { 535 "node_name": self.items[0], 536 "dry_run": self.dryRun(), 537 })
538
539 540 -class R_2_nodes_name_migrate(baserlib.OpcodeResource):
541 """/2/nodes/[node_name]/migrate resource. 542 543 """ 544 POST_OPCODE = opcodes.OpNodeMigrate 545
546 - def GetPostOpInput(self):
547 """Migrate all primary instances from a node. 548 549 """ 550 if self.queryargs: 551 # Support old-style requests 552 if "live" in self.queryargs and "mode" in self.queryargs: 553 raise http.HttpBadRequest("Only one of 'live' and 'mode' should" 554 " be passed") 555 556 if "live" in self.queryargs: 557 if self._checkIntVariable("live", default=1): 558 mode = constants.HT_MIGRATION_LIVE 559 else: 560 mode = constants.HT_MIGRATION_NONLIVE 561 else: 562 mode = self._checkStringVariable("mode", default=None) 563 564 data = { 565 "mode": mode, 566 } 567 else: 568 data = self.request_body 569 570 return (data, { 571 "node_name": self.items[0], 572 })
573
574 575 -class R_2_nodes_name_modify(baserlib.OpcodeResource):
576 """/2/nodes/[node_name]/modify resource. 577 578 """ 579 POST_OPCODE = opcodes.OpNodeSetParams 580
581 - def GetPostOpInput(self):
582 """Changes parameters of a node. 583 584 """ 585 assert len(self.items) == 1 586 587 return (self.request_body, { 588 "node_name": self.items[0], 589 })
590
591 592 -class R_2_nodes_name_storage(baserlib.OpcodeResource):
593 """/2/nodes/[node_name]/storage resource. 594 595 """ 596 # LUNodeQueryStorage acquires locks, hence restricting access to GET 597 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] 598 GET_OPCODE = opcodes.OpNodeQueryStorage 599
600 - def GetGetOpInput(self):
601 """List storage available on a node. 602 603 """ 604 storage_type = self._checkStringVariable("storage_type", None) 605 output_fields = self._checkStringVariable("output_fields", None) 606 607 if not output_fields: 608 raise http.HttpBadRequest("Missing the required 'output_fields'" 609 " parameter") 610 611 return ({}, { 612 "nodes": [self.items[0]], 613 "storage_type": storage_type, 614 "output_fields": output_fields.split(","), 615 })
616
617 618 -class R_2_nodes_name_storage_modify(baserlib.OpcodeResource):
619 """/2/nodes/[node_name]/storage/modify resource. 620 621 """ 622 PUT_OPCODE = opcodes.OpNodeModifyStorage 623
624 - def GetPutOpInput(self):
625 """Modifies a storage volume on a node. 626 627 """ 628 storage_type = self._checkStringVariable("storage_type", None) 629 name = self._checkStringVariable("name", None) 630 631 if not name: 632 raise http.HttpBadRequest("Missing the required 'name'" 633 " parameter") 634 635 changes = {} 636 637 if "allocatable" in self.queryargs: 638 changes[constants.SF_ALLOCATABLE] = \ 639 bool(self._checkIntVariable("allocatable", default=1)) 640 641 return ({}, { 642 "node_name": self.items[0], 643 "storage_type": storage_type, 644 "name": name, 645 "changes": changes, 646 })
647
648 649 -class R_2_nodes_name_storage_repair(baserlib.OpcodeResource):
650 """/2/nodes/[node_name]/storage/repair resource. 651 652 """ 653 PUT_OPCODE = opcodes.OpRepairNodeStorage 654
655 - def GetPutOpInput(self):
656 """Repairs a storage volume on a node. 657 658 """ 659 storage_type = self._checkStringVariable("storage_type", None) 660 name = self._checkStringVariable("name", None) 661 if not name: 662 raise http.HttpBadRequest("Missing the required 'name'" 663 " parameter") 664 665 return ({}, { 666 "node_name": self.items[0], 667 "storage_type": storage_type, 668 "name": name, 669 })
670
671 672 -class R_2_networks(baserlib.OpcodeResource):
673 """/2/networks resource. 674 675 """ 676 GET_OPCODE = opcodes.OpNetworkQuery 677 POST_OPCODE = opcodes.OpNetworkAdd 678 POST_RENAME = { 679 "name": "network_name", 680 } 681
682 - def GetPostOpInput(self):
683 """Create a network. 684 685 """ 686 assert not self.items 687 return (self.request_body, { 688 "dry_run": self.dryRun(), 689 })
690
691 - def GET(self):
692 """Returns a list of all networks. 693 694 """ 695 client = self.GetClient(query=True) 696 697 if self.useBulk(): 698 bulkdata = client.QueryNetworks([], NET_FIELDS, False) 699 return baserlib.MapBulkFields(bulkdata, NET_FIELDS) 700 else: 701 data = client.QueryNetworks([], ["name"], False) 702 networknames = [row[0] for row in data] 703 return baserlib.BuildUriList(networknames, "/2/networks/%s", 704 uri_fields=("name", "uri"))
705
706 707 -class R_2_networks_name(baserlib.OpcodeResource):
708 """/2/networks/[network_name] resource. 709 710 """ 711 DELETE_OPCODE = opcodes.OpNetworkRemove 712
713 - def GET(self):
714 """Send information about a network. 715 716 """ 717 network_name = self.items[0] 718 client = self.GetClient(query=True) 719 720 result = baserlib.HandleItemQueryErrors(client.QueryNetworks, 721 names=[network_name], 722 fields=NET_FIELDS, 723 use_locking=self.useLocking()) 724 725 return baserlib.MapFields(NET_FIELDS, result[0])
726
727 - def GetDeleteOpInput(self):
728 """Delete a network. 729 730 """ 731 assert len(self.items) == 1 732 return (self.request_body, { 733 "network_name": self.items[0], 734 "dry_run": self.dryRun(), 735 })
736
737 738 -class R_2_networks_name_connect(baserlib.OpcodeResource):
739 """/2/networks/[network_name]/connect resource. 740 741 """ 742 PUT_OPCODE = opcodes.OpNetworkConnect 743
744 - def GetPutOpInput(self):
745 """Changes some parameters of node group. 746 747 """ 748 assert self.items 749 return (self.request_body, { 750 "network_name": self.items[0], 751 "dry_run": self.dryRun(), 752 })
753
754 755 -class R_2_networks_name_disconnect(baserlib.OpcodeResource):
756 """/2/networks/[network_name]/disconnect resource. 757 758 """ 759 PUT_OPCODE = opcodes.OpNetworkDisconnect 760
761 - def GetPutOpInput(self):
762 """Changes some parameters of node group. 763 764 """ 765 assert self.items 766 return (self.request_body, { 767 "network_name": self.items[0], 768 "dry_run": self.dryRun(), 769 })
770
771 772 -class R_2_networks_name_modify(baserlib.OpcodeResource):
773 """/2/networks/[network_name]/modify resource. 774 775 """ 776 PUT_OPCODE = opcodes.OpNetworkSetParams 777
778 - def GetPutOpInput(self):
779 """Changes some parameters of network. 780 781 """ 782 assert self.items 783 return (self.request_body, { 784 "network_name": self.items[0], 785 })
786
787 788 -class R_2_groups(baserlib.OpcodeResource):
789 """/2/groups resource. 790 791 """ 792 GET_OPCODE = opcodes.OpGroupQuery 793 POST_OPCODE = opcodes.OpGroupAdd 794 POST_RENAME = { 795 "name": "group_name", 796 } 797
798 - def GetPostOpInput(self):
799 """Create a node group. 800 801 802 """ 803 assert not self.items 804 return (self.request_body, { 805 "dry_run": self.dryRun(), 806 })
807
808 - def GET(self):
809 """Returns a list of all node groups. 810 811 """ 812 client = self.GetClient(query=True) 813 814 if self.useBulk(): 815 bulkdata = client.QueryGroups([], G_FIELDS, False) 816 return baserlib.MapBulkFields(bulkdata, G_FIELDS) 817 else: 818 data = client.QueryGroups([], ["name"], False) 819 groupnames = [row[0] for row in data] 820 return baserlib.BuildUriList(groupnames, "/2/groups/%s", 821 uri_fields=("name", "uri"))
822
823 824 -class R_2_groups_name(baserlib.OpcodeResource):
825 """/2/groups/[group_name] resource. 826 827 """ 828 DELETE_OPCODE = opcodes.OpGroupRemove 829
830 - def GET(self):
831 """Send information about a node group. 832 833 """ 834 group_name = self.items[0] 835 client = self.GetClient(query=True) 836 837 result = baserlib.HandleItemQueryErrors(client.QueryGroups, 838 names=[group_name], fields=G_FIELDS, 839 use_locking=self.useLocking()) 840 841 return baserlib.MapFields(G_FIELDS, result[0])
842
843 - def GetDeleteOpInput(self):
844 """Delete a node group. 845 846 """ 847 assert len(self.items) == 1 848 return ({}, { 849 "group_name": self.items[0], 850 "dry_run": self.dryRun(), 851 })
852
853 854 -class R_2_groups_name_modify(baserlib.OpcodeResource):
855 """/2/groups/[group_name]/modify resource. 856 857 """ 858 PUT_OPCODE = opcodes.OpGroupSetParams 859 PUT_RENAME = { 860 "custom_ndparams": "ndparams", 861 "custom_ipolicy": "ipolicy", 862 "custom_diskparams": "diskparams", 863 } 864
865 - def GetPutOpInput(self):
866 """Changes some parameters of node group. 867 868 """ 869 assert self.items 870 return (self.request_body, { 871 "group_name": self.items[0], 872 })
873
874 875 -class R_2_groups_name_rename(baserlib.OpcodeResource):
876 """/2/groups/[group_name]/rename resource. 877 878 """ 879 PUT_OPCODE = opcodes.OpGroupRename 880
881 - def GetPutOpInput(self):
882 """Changes the name of a node group. 883 884 """ 885 assert len(self.items) == 1 886 return (self.request_body, { 887 "group_name": self.items[0], 888 "dry_run": self.dryRun(), 889 })
890
891 892 -class R_2_groups_name_assign_nodes(baserlib.OpcodeResource):
893 """/2/groups/[group_name]/assign-nodes resource. 894 895 """ 896 PUT_OPCODE = opcodes.OpGroupAssignNodes 897
898 - def GetPutOpInput(self):
899 """Assigns nodes to a group. 900 901 """ 902 assert len(self.items) == 1 903 return (self.request_body, { 904 "group_name": self.items[0], 905 "dry_run": self.dryRun(), 906 "force": self.useForce(), 907 })
908
909 910 -def _ConvertUsbDevices(data):
911 """Convert in place the usb_devices string to the proper format. 912 913 In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from 914 comma to space because commas cannot be accepted on the command line 915 (they already act as the separator between different hvparams). RAPI 916 should be able to accept commas for backwards compatibility, but we want 917 it to also accept the new space separator. Therefore, we convert 918 spaces into commas here and keep the old parsing logic elsewhere. 919 920 """ 921 try: 922 hvparams = data["hvparams"] 923 usb_devices = hvparams[constants.HV_USB_DEVICES] 924 hvparams[constants.HV_USB_DEVICES] = usb_devices.replace(" ", ",") 925 data["hvparams"] = hvparams 926 except KeyError: 927 #No usb_devices, no modification required 928 pass
929
930 931 -class R_2_instances(baserlib.OpcodeResource):
932 """/2/instances resource. 933 934 """ 935 GET_OPCODE = opcodes.OpInstanceQuery 936 POST_OPCODE = opcodes.OpInstanceCreate 937 POST_RENAME = { 938 "os": "os_type", 939 "name": "instance_name", 940 } 941
942 - def GET(self):
943 """Returns a list of all available instances. 944 945 """ 946 client = self.GetClient() 947 948 use_locking = self.useLocking() 949 if self.useBulk(): 950 bulkdata = client.QueryInstances([], I_FIELDS, use_locking) 951 return map(_UpdateBeparams, baserlib.MapBulkFields(bulkdata, I_FIELDS)) 952 else: 953 instancesdata = client.QueryInstances([], ["name"], use_locking) 954 instanceslist = [row[0] for row in instancesdata] 955 return baserlib.BuildUriList(instanceslist, "/2/instances/%s", 956 uri_fields=("id", "uri"))
957
958 - def GetPostOpInput(self):
959 """Create an instance. 960 961 @return: a job id 962 963 """ 964 baserlib.CheckType(self.request_body, dict, "Body contents") 965 966 # Default to request data version 0 967 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0) 968 969 if data_version == 0: 970 raise http.HttpBadRequest("Instance creation request version 0 is no" 971 " longer supported") 972 elif data_version != 1: 973 raise http.HttpBadRequest("Unsupported request data version %s" % 974 data_version) 975 976 data = self.request_body.copy() 977 # Remove "__version__" 978 data.pop(_REQ_DATA_VERSION, None) 979 980 _ConvertUsbDevices(data) 981 982 return (data, { 983 "dry_run": self.dryRun(), 984 })
985
986 987 -class R_2_instances_multi_alloc(baserlib.OpcodeResource):
988 """/2/instances-multi-alloc resource. 989 990 """ 991 POST_OPCODE = opcodes.OpInstanceMultiAlloc 992
993 - def GetPostOpInput(self):
994 """Try to allocate multiple instances. 995 996 @return: A dict with submitted jobs, allocatable instances and failed 997 allocations 998 999 """ 1000 if "instances" not in self.request_body: 1001 raise http.HttpBadRequest("Request is missing required 'instances' field" 1002 " in body") 1003 1004 # Unlike most other RAPI calls, this one is composed of individual opcodes, 1005 # and we have to do the filling ourselves 1006 OPCODE_RENAME = { 1007 "os": "os_type", 1008 "name": "instance_name", 1009 } 1010 1011 body = objects.FillDict(self.request_body, { 1012 "instances": [ 1013 baserlib.FillOpcode(opcodes.OpInstanceCreate, inst, {}, 1014 rename=OPCODE_RENAME) 1015 for inst in self.request_body["instances"] 1016 ], 1017 }) 1018 1019 return (body, { 1020 "dry_run": self.dryRun(), 1021 })
1022
1023 1024 -class R_2_instances_name(baserlib.OpcodeResource):
1025 """/2/instances/[instance_name] resource. 1026 1027 """ 1028 GET_OPCODE = opcodes.OpInstanceQuery 1029 DELETE_OPCODE = opcodes.OpInstanceRemove 1030
1031 - def GET(self):
1032 """Send information about an instance. 1033 1034 """ 1035 client = self.GetClient() 1036 instance_name = self.items[0] 1037 1038 result = baserlib.HandleItemQueryErrors(client.QueryInstances, 1039 names=[instance_name], 1040 fields=I_FIELDS, 1041 use_locking=self.useLocking()) 1042 1043 return _UpdateBeparams(baserlib.MapFields(I_FIELDS, result[0]))
1044
1045 - def GetDeleteOpInput(self):
1046 """Delete an instance. 1047 1048 """ 1049 assert len(self.items) == 1 1050 return (self.request_body, { 1051 "instance_name": self.items[0], 1052 "ignore_failures": False, 1053 "dry_run": self.dryRun(), 1054 })
1055
1056 1057 -class R_2_instances_name_info(baserlib.OpcodeResource):
1058 """/2/instances/[instance_name]/info resource. 1059 1060 """ 1061 GET_OPCODE = opcodes.OpInstanceQueryData 1062
1063 - def GetGetOpInput(self):
1064 """Request detailed instance information. 1065 1066 """ 1067 assert len(self.items) == 1 1068 return ({}, { 1069 "instances": [self.items[0]], 1070 "static": bool(self._checkIntVariable("static", default=0)), 1071 })
1072
1073 1074 -class R_2_instances_name_reboot(baserlib.OpcodeResource):
1075 """/2/instances/[instance_name]/reboot resource. 1076 1077 Implements an instance reboot. 1078 1079 """ 1080 POST_OPCODE = opcodes.OpInstanceReboot 1081
1082 - def GetPostOpInput(self):
1083 """Reboot an instance. 1084 1085 The URI takes type=[hard|soft|full] and 1086 ignore_secondaries=[False|True] parameters. 1087 1088 """ 1089 return (self.request_body, { 1090 "instance_name": self.items[0], 1091 "reboot_type": 1092 self.queryargs.get("type", [constants.INSTANCE_REBOOT_HARD])[0], 1093 "ignore_secondaries": bool(self._checkIntVariable("ignore_secondaries")), 1094 "dry_run": self.dryRun(), 1095 })
1096
1097 1098 -class R_2_instances_name_startup(baserlib.OpcodeResource):
1099 """/2/instances/[instance_name]/startup resource. 1100 1101 Implements an instance startup. 1102 1103 """ 1104 PUT_OPCODE = opcodes.OpInstanceStartup 1105
1106 - def GetPutOpInput(self):
1107 """Startup an instance. 1108 1109 The URI takes force=[False|True] parameter to start the instance 1110 if even if secondary disks are failing. 1111 1112 """ 1113 return ({}, { 1114 "instance_name": self.items[0], 1115 "force": self.useForce(), 1116 "dry_run": self.dryRun(), 1117 "no_remember": bool(self._checkIntVariable("no_remember")), 1118 })
1119
1120 1121 -class R_2_instances_name_shutdown(baserlib.OpcodeResource):
1122 """/2/instances/[instance_name]/shutdown resource. 1123 1124 Implements an instance shutdown. 1125 1126 """ 1127 PUT_OPCODE = opcodes.OpInstanceShutdown 1128
1129 - def GetPutOpInput(self):
1130 """Shutdown an instance. 1131 1132 """ 1133 return (self.request_body, { 1134 "instance_name": self.items[0], 1135 "no_remember": bool(self._checkIntVariable("no_remember")), 1136 "dry_run": self.dryRun(), 1137 })
1138
1139 1140 -def _ParseInstanceReinstallRequest(name, data):
1141 """Parses a request for reinstalling an instance. 1142 1143 """ 1144 if not isinstance(data, dict): 1145 raise http.HttpBadRequest("Invalid body contents, not a dictionary") 1146 1147 ostype = baserlib.CheckParameter(data, "os", default=None) 1148 start = baserlib.CheckParameter(data, "start", exptype=bool, 1149 default=True) 1150 osparams = baserlib.CheckParameter(data, "osparams", default=None) 1151 1152 ops = [ 1153 opcodes.OpInstanceShutdown(instance_name=name), 1154 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype, 1155 osparams=osparams), 1156 ] 1157 1158 if start: 1159 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False)) 1160 1161 return ops
1162
1163 1164 -class R_2_instances_name_reinstall(baserlib.OpcodeResource):
1165 """/2/instances/[instance_name]/reinstall resource. 1166 1167 Implements an instance reinstall. 1168 1169 """ 1170 POST_OPCODE = opcodes.OpInstanceReinstall 1171
1172 - def POST(self):
1173 """Reinstall an instance. 1174 1175 The URI takes os=name and nostartup=[0|1] optional 1176 parameters. By default, the instance will be started 1177 automatically. 1178 1179 """ 1180 if self.request_body: 1181 if self.queryargs: 1182 raise http.HttpBadRequest("Can't combine query and body parameters") 1183 1184 body = self.request_body 1185 elif self.queryargs: 1186 # Legacy interface, do not modify/extend 1187 body = { 1188 "os": self._checkStringVariable("os"), 1189 "start": not self._checkIntVariable("nostartup"), 1190 } 1191 else: 1192 body = {} 1193 1194 ops = _ParseInstanceReinstallRequest(self.items[0], body) 1195 1196 return self.SubmitJob(ops)
1197
1198 1199 -class R_2_instances_name_replace_disks(baserlib.OpcodeResource):
1200 """/2/instances/[instance_name]/replace-disks resource. 1201 1202 """ 1203 POST_OPCODE = opcodes.OpInstanceReplaceDisks 1204
1205 - def GetPostOpInput(self):
1206 """Replaces disks on an instance. 1207 1208 """ 1209 static = { 1210 "instance_name": self.items[0], 1211 } 1212 1213 if self.request_body: 1214 data = self.request_body 1215 elif self.queryargs: 1216 # Legacy interface, do not modify/extend 1217 data = { 1218 "remote_node": self._checkStringVariable("remote_node", default=None), 1219 "mode": self._checkStringVariable("mode", default=None), 1220 "disks": self._checkStringVariable("disks", default=None), 1221 "iallocator": self._checkStringVariable("iallocator", default=None), 1222 } 1223 else: 1224 data = {} 1225 1226 # Parse disks 1227 try: 1228 raw_disks = data.pop("disks") 1229 except KeyError: 1230 pass 1231 else: 1232 if raw_disks: 1233 if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102 1234 data["disks"] = raw_disks 1235 else: 1236 # Backwards compatibility for strings of the format "1, 2, 3" 1237 try: 1238 data["disks"] = [int(part) for part in raw_disks.split(",")] 1239 except (TypeError, ValueError), err: 1240 raise http.HttpBadRequest("Invalid disk index passed: %s" % err) 1241 1242 return (data, static)
1243
1244 1245 -class R_2_instances_name_activate_disks(baserlib.OpcodeResource):
1246 """/2/instances/[instance_name]/activate-disks resource. 1247 1248 """ 1249 PUT_OPCODE = opcodes.OpInstanceActivateDisks 1250
1251 - def GetPutOpInput(self):
1252 """Activate disks for an instance. 1253 1254 The URI might contain ignore_size to ignore current recorded size. 1255 1256 """ 1257 return ({}, { 1258 "instance_name": self.items[0], 1259 "ignore_size": bool(self._checkIntVariable("ignore_size")), 1260 })
1261
1262 1263 -class R_2_instances_name_deactivate_disks(baserlib.OpcodeResource):
1264 """/2/instances/[instance_name]/deactivate-disks resource. 1265 1266 """ 1267 PUT_OPCODE = opcodes.OpInstanceDeactivateDisks 1268
1269 - def GetPutOpInput(self):
1270 """Deactivate disks for an instance. 1271 1272 """ 1273 return ({}, { 1274 "instance_name": self.items[0], 1275 })
1276
1277 1278 -class R_2_instances_name_recreate_disks(baserlib.OpcodeResource):
1279 """/2/instances/[instance_name]/recreate-disks resource. 1280 1281 """ 1282 POST_OPCODE = opcodes.OpInstanceRecreateDisks 1283
1284 - def GetPostOpInput(self):
1285 """Recreate disks for an instance. 1286 1287 """ 1288 return ({}, { 1289 "instance_name": self.items[0], 1290 })
1291
1292 1293 -class R_2_instances_name_prepare_export(baserlib.OpcodeResource):
1294 """/2/instances/[instance_name]/prepare-export resource. 1295 1296 """ 1297 PUT_OPCODE = opcodes.OpBackupPrepare 1298
1299 - def GetPutOpInput(self):
1300 """Prepares an export for an instance. 1301 1302 """ 1303 return ({}, { 1304 "instance_name": self.items[0], 1305 "mode": self._checkStringVariable("mode"), 1306 })
1307
1308 1309 -class R_2_instances_name_export(baserlib.OpcodeResource):
1310 """/2/instances/[instance_name]/export resource. 1311 1312 """ 1313 PUT_OPCODE = opcodes.OpBackupExport 1314 PUT_RENAME = { 1315 "destination": "target_node", 1316 } 1317
1318 - def GetPutOpInput(self):
1319 """Exports an instance. 1320 1321 """ 1322 return (self.request_body, { 1323 "instance_name": self.items[0], 1324 })
1325
1326 1327 -class R_2_instances_name_migrate(baserlib.OpcodeResource):
1328 """/2/instances/[instance_name]/migrate resource. 1329 1330 """ 1331 PUT_OPCODE = opcodes.OpInstanceMigrate 1332
1333 - def GetPutOpInput(self):
1334 """Migrates an instance. 1335 1336 """ 1337 return (self.request_body, { 1338 "instance_name": self.items[0], 1339 })
1340
1341 1342 -class R_2_instances_name_failover(baserlib.OpcodeResource):
1343 """/2/instances/[instance_name]/failover resource. 1344 1345 """ 1346 PUT_OPCODE = opcodes.OpInstanceFailover 1347
1348 - def GetPutOpInput(self):
1349 """Does a failover of an instance. 1350 1351 """ 1352 return (self.request_body, { 1353 "instance_name": self.items[0], 1354 })
1355
1356 1357 -class R_2_instances_name_rename(baserlib.OpcodeResource):
1358 """/2/instances/[instance_name]/rename resource. 1359 1360 """ 1361 PUT_OPCODE = opcodes.OpInstanceRename 1362
1363 - def GetPutOpInput(self):
1364 """Changes the name of an instance. 1365 1366 """ 1367 return (self.request_body, { 1368 "instance_name": self.items[0], 1369 })
1370
1371 1372 -class R_2_instances_name_modify(baserlib.OpcodeResource):
1373 """/2/instances/[instance_name]/modify resource. 1374 1375 """ 1376 PUT_OPCODE = opcodes.OpInstanceSetParams 1377 PUT_RENAME = { 1378 "custom_beparams": "beparams", 1379 "custom_hvparams": "hvparams", 1380 } 1381
1382 - def GetPutOpInput(self):
1383 """Changes parameters of an instance. 1384 1385 """ 1386 data = self.request_body.copy() 1387 _ConvertUsbDevices(data) 1388 1389 return (data, { 1390 "instance_name": self.items[0], 1391 })
1392
1393 1394 -class R_2_instances_name_disk_grow(baserlib.OpcodeResource):
1395 """/2/instances/[instance_name]/disk/[disk_index]/grow resource. 1396 1397 """ 1398 POST_OPCODE = opcodes.OpInstanceGrowDisk 1399
1400 - def GetPostOpInput(self):
1401 """Increases the size of an instance disk. 1402 1403 """ 1404 return (self.request_body, { 1405 "instance_name": self.items[0], 1406 "disk": int(self.items[1]), 1407 })
1408
1409 1410 -class R_2_instances_name_console(baserlib.ResourceBase):
1411 """/2/instances/[instance_name]/console resource. 1412 1413 """ 1414 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ] 1415 GET_OPCODE = opcodes.OpInstanceConsole 1416
1417 - def GET(self):
1418 """Request information for connecting to instance's console. 1419 1420 @return: Serialized instance console description, see 1421 L{objects.InstanceConsole} 1422 1423 """ 1424 client = self.GetClient() 1425 1426 ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False) 1427 1428 if console is None: 1429 raise http.HttpServiceUnavailable("Instance console unavailable") 1430 1431 assert isinstance(console, dict) 1432 return console
1433
1434 1435 -def _GetQueryFields(args):
1436 """Tries to extract C{fields} query parameter. 1437 1438 @type args: dictionary 1439 @rtype: list of string 1440 @raise http.HttpBadRequest: When parameter can't be found 1441 1442 """ 1443 try: 1444 fields = args["fields"] 1445 except KeyError: 1446 raise http.HttpBadRequest("Missing 'fields' query argument") 1447 1448 return _SplitQueryFields(fields[0])
1449
1450 1451 -def _SplitQueryFields(fields):
1452 """Splits fields as given for a query request. 1453 1454 @type fields: string 1455 @rtype: list of string 1456 1457 """ 1458 return [i.strip() for i in fields.split(",")]
1459
1460 1461 -class R_2_query(baserlib.ResourceBase):
1462 """/2/query/[resource] resource. 1463 1464 """ 1465 # Results might contain sensitive information 1466 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ] 1467 PUT_ACCESS = GET_ACCESS 1468 GET_OPCODE = opcodes.OpQuery 1469 PUT_OPCODE = opcodes.OpQuery 1470
1471 - def _Query(self, fields, qfilter):
1472 return self.GetClient().Query(self.items[0], fields, qfilter).ToDict()
1473
1474 - def GET(self):
1475 """Returns resource information. 1476 1477 @return: Query result, see L{objects.QueryResponse} 1478 1479 """ 1480 return self._Query(_GetQueryFields(self.queryargs), None)
1481
1482 - def PUT(self):
1483 """Submits job querying for resources. 1484 1485 @return: Query result, see L{objects.QueryResponse} 1486 1487 """ 1488 body = self.request_body 1489 1490 baserlib.CheckType(body, dict, "Body contents") 1491 1492 try: 1493 fields = body["fields"] 1494 except KeyError: 1495 fields = _GetQueryFields(self.queryargs) 1496 1497 qfilter = body.get("qfilter", None) 1498 # TODO: remove this after 2.7 1499 if qfilter is None: 1500 qfilter = body.get("filter", None) 1501 1502 return self._Query(fields, qfilter)
1503
1504 1505 -class R_2_query_fields(baserlib.ResourceBase):
1506 """/2/query/[resource]/fields resource. 1507 1508 """ 1509 GET_OPCODE = opcodes.OpQueryFields 1510
1511 - def GET(self):
1512 """Retrieves list of available fields for a resource. 1513 1514 @return: List of serialized L{objects.QueryFieldDefinition} 1515 1516 """ 1517 try: 1518 raw_fields = self.queryargs["fields"] 1519 except KeyError: 1520 fields = None 1521 else: 1522 fields = _SplitQueryFields(raw_fields[0]) 1523 1524 return self.GetClient().QueryFields(self.items[0], fields).ToDict()
1525
1526 1527 -class _R_Tags(baserlib.OpcodeResource):
1528 """Quasiclass for tagging resources. 1529 1530 Manages tags. When inheriting this class you must define the 1531 TAG_LEVEL for it. 1532 1533 """ 1534 TAG_LEVEL = None 1535 GET_OPCODE = opcodes.OpTagsGet 1536 PUT_OPCODE = opcodes.OpTagsSet 1537 DELETE_OPCODE = opcodes.OpTagsDel 1538
1539 - def __init__(self, items, queryargs, req, **kwargs):
1540 """A tag resource constructor. 1541 1542 We have to override the default to sort out cluster naming case. 1543 1544 """ 1545 baserlib.OpcodeResource.__init__(self, items, queryargs, req, **kwargs) 1546 1547 if self.TAG_LEVEL == constants.TAG_CLUSTER: 1548 self.name = None 1549 else: 1550 self.name = items[0]
1551
1552 - def GET(self):
1553 """Returns a list of tags. 1554 1555 Example: ["tag1", "tag2", "tag3"] 1556 1557 """ 1558 kind = self.TAG_LEVEL 1559 1560 if kind in (constants.TAG_INSTANCE, 1561 constants.TAG_NODEGROUP, 1562 constants.TAG_NODE, 1563 constants.TAG_NETWORK): 1564 if not self.name: 1565 raise http.HttpBadRequest("Missing name on tag request") 1566 1567 cl = self.GetClient(query=True) 1568 tags = list(cl.QueryTags(kind, self.name)) 1569 1570 elif kind == constants.TAG_CLUSTER: 1571 assert not self.name 1572 # TODO: Use query API? 1573 ssc = ssconf.SimpleStore() 1574 tags = ssc.GetClusterTags() 1575 1576 else: 1577 raise http.HttpBadRequest("Unhandled tag type!") 1578 1579 return list(tags)
1580
1581 - def GetPutOpInput(self):
1582 """Add a set of tags. 1583 1584 The request as a list of strings should be PUT to this URI. And 1585 you'll have back a job id. 1586 1587 """ 1588 return ({}, { 1589 "kind": self.TAG_LEVEL, 1590 "name": self.name, 1591 "tags": self.queryargs.get("tag", []), 1592 "dry_run": self.dryRun(), 1593 })
1594
1595 - def GetDeleteOpInput(self):
1596 """Delete a tag. 1597 1598 In order to delete a set of tags, the DELETE 1599 request should be addressed to URI like: 1600 /tags?tag=[tag]&tag=[tag] 1601 1602 """ 1603 # Re-use code 1604 return self.GetPutOpInput()
1605
1606 1607 -class R_2_instances_name_tags(_R_Tags):
1608 """ /2/instances/[instance_name]/tags resource. 1609 1610 Manages per-instance tags. 1611 1612 """ 1613 TAG_LEVEL = constants.TAG_INSTANCE
1614
1615 1616 -class R_2_nodes_name_tags(_R_Tags):
1617 """ /2/nodes/[node_name]/tags resource. 1618 1619 Manages per-node tags. 1620 1621 """ 1622 TAG_LEVEL = constants.TAG_NODE
1623
1624 1625 -class R_2_groups_name_tags(_R_Tags):
1626 """ /2/groups/[group_name]/tags resource. 1627 1628 Manages per-nodegroup tags. 1629 1630 """ 1631 TAG_LEVEL = constants.TAG_NODEGROUP
1632
1633 1634 -class R_2_networks_name_tags(_R_Tags):
1635 """ /2/networks/[network_name]/tags resource. 1636 1637 Manages per-network tags. 1638 1639 """ 1640 TAG_LEVEL = constants.TAG_NETWORK
1641
1642 1643 -class R_2_tags(_R_Tags):
1644 """ /2/tags resource. 1645 1646 Manages cluster tags. 1647 1648 """ 1649 TAG_LEVEL = constants.TAG_CLUSTER
1650