Package ganeti :: Package rapi :: Module rlib2
[hide private]
[frames] | no frames]

Source Code for Module ganeti.rapi.rlib2

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Remote API resource implementations. 
  32   
  33  PUT or POST? 
  34  ============ 
  35   
  36  According to RFC2616 the main difference between PUT and POST is that 
  37  POST can create new resources but PUT can only create the resource the 
  38  URI was pointing to on the PUT request. 
  39   
  40  In the context of this module POST on ``/2/instances`` to change an existing 
  41  entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a 
  42  new instance) with a name specified in the request. 
  43   
  44  Quoting from RFC2616, section 9.6:: 
  45   
  46    The fundamental difference between the POST and PUT requests is reflected in 
  47    the different meaning of the Request-URI. The URI in a POST request 
  48    identifies the resource that will handle the enclosed entity. That resource 
  49    might be a data-accepting process, a gateway to some other protocol, or a 
  50    separate entity that accepts annotations. In contrast, the URI in a PUT 
  51    request identifies the entity enclosed with the request -- the user agent 
  52    knows what URI is intended and the server MUST NOT attempt to apply the 
  53    request to some other resource. If the server desires that the request be 
  54    applied to a different URI, it MUST send a 301 (Moved Permanently) response; 
  55    the user agent MAY then make its own decision regarding whether or not to 
  56    redirect the request. 
  57   
  58  So when adding new methods, if they are operating on the URI entity itself, 
  59  PUT should be prefered over POST. 
  60   
  61  """ 
  62   
  63  # pylint: disable=C0103 
  64   
  65  # C0103: Invalid name, since the R_* names are not conforming 
  66   
  67  import OpenSSL 
  68   
  69  from ganeti import opcodes 
  70  from ganeti import objects 
  71  from ganeti import http 
  72  from ganeti import constants 
  73  from ganeti import cli 
  74  from ganeti import rapi 
  75  from ganeti import ht 
  76  from ganeti import compat 
  77  from ganeti.rapi import baserlib 
  78   
  79   
  80  _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"] 
  81  I_FIELDS = ["name", "admin_state", "os", 
  82              "pnode", "snodes", 
  83              "disk_template", 
  84              "nic.ips", "nic.macs", "nic.modes", "nic.uuids", "nic.names", 
  85              "nic.links", "nic.networks", "nic.networks.names", "nic.bridges", 
  86              "network_port", 
  87              "disk.sizes", "disk.spindles", "disk_usage", "disk.uuids", 
  88              "disk.names", 
  89              "beparams", "hvparams", 
  90              "oper_state", "oper_ram", "oper_vcpus", "status", 
  91              "custom_hvparams", "custom_beparams", "custom_nicparams", 
  92              ] + _COMMON_FIELDS 
  93   
  94  N_FIELDS = ["name", "offline", "master_candidate", "drained", 
  95              "dtotal", "dfree", "sptotal", "spfree", 
  96              "mtotal", "mnode", "mfree", 
  97              "pinst_cnt", "sinst_cnt", 
  98              "ctotal", "cnos", "cnodes", "csockets", 
  99              "pip", "sip", "role", 
 100              "pinst_list", "sinst_list", 
 101              "master_capable", "vm_capable", 
 102              "ndparams", 
 103              "group.uuid", 
 104              ] + _COMMON_FIELDS 
 105   
 106  NET_FIELDS = ["name", "network", "gateway", 
 107                "network6", "gateway6", 
 108                "mac_prefix", 
 109                "free_count", "reserved_count", 
 110                "map", "group_list", "inst_list", 
 111                "external_reservations", 
 112                ] + _COMMON_FIELDS 
 113   
 114  G_FIELDS = [ 
 115    "alloc_policy", 
 116    "name", 
 117    "node_cnt", 
 118    "node_list", 
 119    "ipolicy", 
 120    "custom_ipolicy", 
 121    "diskparams", 
 122    "custom_diskparams", 
 123    "ndparams", 
 124    "custom_ndparams", 
 125    ] + _COMMON_FIELDS 
 126   
 127  FILTER_RULE_FIELDS = [ 
 128    "watermark", 
 129    "priority", 
 130    "predicates", 
 131    "action", 
 132    "reason_trail", 
 133    "uuid", 
 134    ] 
 135   
 136  J_FIELDS_BULK = [ 
 137    "id", "ops", "status", "summary", 
 138    "opstatus", 
 139    "received_ts", "start_ts", "end_ts", 
 140    ] 
 141   
 142  J_FIELDS = J_FIELDS_BULK + [ 
 143    "oplog", 
 144    "opresult", 
 145    ] 
 146   
 147  _NR_DRAINED = "drained" 
 148  _NR_MASTER_CANDIDATE = "master-candidate" 
 149  _NR_MASTER = "master" 
 150  _NR_OFFLINE = "offline" 
 151  _NR_REGULAR = "regular" 
 152   
 153  _NR_MAP = { 
 154    constants.NR_MASTER: _NR_MASTER, 
 155    constants.NR_MCANDIDATE: _NR_MASTER_CANDIDATE, 
 156    constants.NR_DRAINED: _NR_DRAINED, 
 157    constants.NR_OFFLINE: _NR_OFFLINE, 
 158    constants.NR_REGULAR: _NR_REGULAR, 
 159    } 
 160   
 161  assert frozenset(_NR_MAP.keys()) == constants.NR_ALL 
 162   
 163  # Request data version field 
 164  _REQ_DATA_VERSION = "__version__" 
 165   
 166  # Feature string for instance creation request data version 1 
 167  _INST_CREATE_REQV1 = "instance-create-reqv1" 
 168   
 169  # Feature string for instance reinstall request version 1 
 170  _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1" 
 171   
 172  # Feature string for node migration version 1 
 173  _NODE_MIGRATE_REQV1 = "node-migrate-reqv1" 
 174   
 175  # Feature string for node evacuation with LU-generated jobs 
 176  _NODE_EVAC_RES1 = "node-evac-res1" 
 177   
 178  ALL_FEATURES = compat.UniqueFrozenset([ 
 179    _INST_CREATE_REQV1, 
 180    _INST_REINSTALL_REQV1, 
 181    _NODE_MIGRATE_REQV1, 
 182    _NODE_EVAC_RES1, 
 183    ]) 
 184   
 185  # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change. 
 186  _WFJC_TIMEOUT = 10 
187 188 189 # FIXME: For compatibility we update the beparams/memory field. Needs to be 190 # removed in Ganeti 2.8 191 -def _UpdateBeparams(inst):
192 """Updates the beparams dict of inst to support the memory field. 193 194 @param inst: Inst dict 195 @return: Updated inst dict 196 197 """ 198 beparams = inst["beparams"] 199 beparams[constants.BE_MEMORY] = beparams[constants.BE_MAXMEM] 200 201 return inst
202
203 204 -def _CheckIfConnectionDropped(sock):
205 """Utility function to monitor the state of an open connection. 206 207 @param sock: Connection's open socket 208 @return: True if the connection was remotely closed, otherwise False 209 210 """ 211 try: 212 result = sock.recv(0) 213 if result == "": 214 return True 215 # The connection is still open 216 except OpenSSL.SSL.WantReadError: 217 return False 218 # The connection has been terminated gracefully 219 except OpenSSL.SSL.ZeroReturnError: 220 return True 221 # The connection was terminated 222 except OpenSSL.SSL.SysCallError: 223 return True 224 return False
225
226 227 -class R_root(baserlib.ResourceBase):
228 """/ resource. 229 230 """ 231 @staticmethod
232 - def GET():
233 """Supported for legacy reasons. 234 235 """ 236 return None
237
238 239 -class R_2(R_root):
240 """/2 resource. 241 242 """
243
244 245 -class R_version(baserlib.ResourceBase):
246 """/version resource. 247 248 This resource should be used to determine the remote API version and 249 to adapt clients accordingly. 250 251 """ 252 @staticmethod
253 - def GET():
254 """Returns the remote API version. 255 256 """ 257 return constants.RAPI_VERSION
258
259 260 -class R_2_info(baserlib.OpcodeResource):
261 """/2/info resource. 262 263 """ 264 GET_OPCODE = opcodes.OpClusterQuery 265 GET_ALIASES = { 266 "volume_group_name": "vg_name", 267 "drbd_usermode_helper": "drbd_helper", 268 } 269
270 - def GET(self):
271 """Returns cluster information. 272 273 """ 274 client = self.GetClient() 275 return client.QueryClusterInfo()
276
277 278 -class R_2_features(baserlib.ResourceBase):
279 """/2/features resource. 280 281 """ 282 @staticmethod
283 - def GET():
284 """Returns list of optional RAPI features implemented. 285 286 """ 287 return list(ALL_FEATURES)
288
289 290 -class R_2_os(baserlib.OpcodeResource):
291 """/2/os resource. 292 293 """ 294 GET_OPCODE = opcodes.OpOsDiagnose 295
296 - def GET(self):
297 """Return a list of all OSes. 298 299 Can return error 500 in case of a problem. 300 301 Example: ["debian-etch"] 302 303 """ 304 cl = self.GetClient() 305 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[]) 306 cancel_fn = (lambda: _CheckIfConnectionDropped(self._req.request_sock)) 307 job_id = self.SubmitJob([op], cl=cl) 308 # we use custom feedback function, instead of print we log the status 309 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn, 310 cancel_fn=cancel_fn) 311 diagnose_data = result[0] 312 313 if not isinstance(diagnose_data, list): 314 raise http.HttpBadGateway(message="Can't get OS list") 315 316 os_names = [] 317 for (name, variants) in diagnose_data: 318 os_names.extend(cli.CalculateOSNames(name, variants)) 319 320 return os_names
321
322 323 -class R_2_redist_config(baserlib.OpcodeResource):
324 """/2/redistribute-config resource. 325 326 """ 327 PUT_OPCODE = opcodes.OpClusterRedistConf
328
329 330 -class R_2_cluster_modify(baserlib.OpcodeResource):
331 """/2/modify resource. 332 333 """ 334 PUT_OPCODE = opcodes.OpClusterSetParams 335 PUT_FORBIDDEN = [ 336 "compression_tools", 337 ]
338
339 340 -def checkFilterParameters(data):
341 """Checks and extracts filter rule parameters from a request body. 342 343 @return: the checked parameters: (priority, predicates, action). 344 345 """ 346 347 if not isinstance(data, dict): 348 raise http.HttpBadRequest("Invalid body contents, not a dictionary") 349 350 # Forbid unknown parameters 351 allowed_params = set(["priority", "predicates", "action", "reason"]) 352 for param in data: 353 if param not in allowed_params: 354 raise http.HttpBadRequest("Invalid body parameters: filter rule doesn't" 355 " support the parameter '%s'" % param) 356 357 priority = baserlib.CheckParameter( 358 data, "priority", exptype=int, default=0) 359 360 # We leave the deeper check into the predicates list to the server. 361 predicates = baserlib.CheckParameter( 362 data, "predicates", exptype=list, default=[]) 363 364 # The action can be a string or a list; we leave the check to the server. 365 action = baserlib.CheckParameter(data, "action", default="CONTINUE") 366 367 reason = baserlib.CheckParameter(data, "reason", exptype=list, default=[]) 368 369 return (priority, predicates, action, reason)
370
371 372 -class R_2_filters(baserlib.ResourceBase):
373 """/2/filters resource. 374 375 """ 376
377 - def GET(self):
378 """Returns a list of all filter rules. 379 380 @return: a dictionary with filter rule UUID and uri. 381 382 """ 383 client = self.GetClient() 384 385 if self.useBulk(): 386 bulkdata = client.QueryFilters(None, FILTER_RULE_FIELDS) 387 return baserlib.MapBulkFields(bulkdata, FILTER_RULE_FIELDS) 388 else: 389 jobdata = map(compat.fst, client.QueryFilters(None, ["uuid"])) 390 return baserlib.BuildUriList(jobdata, "/2/filters/%s", 391 uri_fields=("uuid", "uri"))
392
393 - def POST(self):
394 """Adds a filter rule. 395 396 @return: the UUID of the newly created filter rule. 397 398 """ 399 priority, predicates, action, reason = \ 400 checkFilterParameters(self.request_body) 401 402 # ReplaceFilter(None, ...) inserts a new filter. 403 return self.GetClient().ReplaceFilter(None, priority, predicates, action, 404 reason)
405
406 407 -class R_2_filters_uuid(baserlib.ResourceBase):
408 """/2/filters/[filter_uuid] resource. 409 410 """
411 - def GET(self):
412 """Returns a filter rule. 413 414 @return: a dictionary with job parameters. 415 The result includes: 416 - uuid: unique filter ID string 417 - watermark: highest job ID ever used as a number 418 - priority: filter priority as a non-negative number 419 - predicates: filter predicates, each one being a list 420 with the first element being the name of the predicate 421 and the rest being parameters suitable for that predicate 422 - action: effect of the filter as a string 423 - reason_trail: reasons for the addition of this filter as a 424 list of lists 425 426 """ 427 uuid = self.items[0] 428 429 result = baserlib.HandleItemQueryErrors(self.GetClient().QueryFilters, 430 uuids=[uuid], 431 fields=FILTER_RULE_FIELDS) 432 433 return baserlib.MapFields(FILTER_RULE_FIELDS, result[0])
434
435 - def PUT(self):
436 """Replaces an existing filter rule, or creates one if it doesn't 437 exist already. 438 439 @return: the UUID of the changed or created filter rule. 440 441 """ 442 uuid = self.items[0] 443 444 priority, predicates, action, reason = \ 445 checkFilterParameters(self.request_body) 446 447 return self.GetClient().ReplaceFilter(uuid, priority, predicates, action, 448 reason)
449
450 - def DELETE(self):
451 """Deletes a filter rule. 452 453 """ 454 uuid = self.items[0] 455 return self.GetClient().DeleteFilter(uuid)
456
457 458 -class R_2_jobs(baserlib.ResourceBase):
459 """/2/jobs resource. 460 461 """
462 - def GET(self):
463 """Returns a dictionary of jobs. 464 465 @return: a dictionary with jobs id and uri. 466 467 """ 468 client = self.GetClient() 469 470 if self.useBulk(): 471 bulkdata = client.QueryJobs(None, J_FIELDS_BULK) 472 return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK) 473 else: 474 jobdata = map(compat.fst, client.QueryJobs(None, ["id"])) 475 return baserlib.BuildUriList(jobdata, "/2/jobs/%s", 476 uri_fields=("id", "uri"))
477
478 479 -class R_2_jobs_id(baserlib.ResourceBase):
480 """/2/jobs/[job_id] resource. 481 482 """
483 - def GET(self):
484 """Returns a job status. 485 486 @return: a dictionary with job parameters. 487 The result includes: 488 - id: job ID as a number 489 - status: current job status as a string 490 - ops: involved OpCodes as a list of dictionaries for each 491 opcodes in the job 492 - opstatus: OpCodes status as a list 493 - opresult: OpCodes results as a list of lists 494 495 """ 496 job_id = self.items[0] 497 result = self.GetClient().QueryJobs([job_id, ], J_FIELDS)[0] 498 if result is None: 499 raise http.HttpNotFound() 500 return baserlib.MapFields(J_FIELDS, result)
501
502 - def DELETE(self):
503 """Cancel not-yet-started job. 504 505 """ 506 job_id = self.items[0] 507 result = self.GetClient().CancelJob(job_id) 508 return result
509
510 511 -class R_2_jobs_id_wait(baserlib.ResourceBase):
512 """/2/jobs/[job_id]/wait resource. 513 514 """ 515 # WaitForJobChange provides access to sensitive information and blocks 516 # machine resources (it's a blocking RAPI call), hence restricting access. 517 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] 518
519 - def GET(self):
520 """Waits for job changes. 521 522 """ 523 job_id = self.items[0] 524 525 fields = self.getBodyParameter("fields") 526 prev_job_info = self.getBodyParameter("previous_job_info", None) 527 prev_log_serial = self.getBodyParameter("previous_log_serial", None) 528 529 if not isinstance(fields, list): 530 raise http.HttpBadRequest("The 'fields' parameter should be a list") 531 532 if not (prev_job_info is None or isinstance(prev_job_info, list)): 533 raise http.HttpBadRequest("The 'previous_job_info' parameter should" 534 " be a list") 535 536 if not (prev_log_serial is None or 537 isinstance(prev_log_serial, (int, long))): 538 raise http.HttpBadRequest("The 'previous_log_serial' parameter should" 539 " be a number") 540 541 client = self.GetClient() 542 result = client.WaitForJobChangeOnce(job_id, fields, 543 prev_job_info, prev_log_serial, 544 timeout=_WFJC_TIMEOUT) 545 if not result: 546 raise http.HttpNotFound() 547 548 if result == constants.JOB_NOTCHANGED: 549 # No changes 550 return None 551 552 (job_info, log_entries) = result 553 554 return { 555 "job_info": job_info, 556 "log_entries": log_entries, 557 }
558
559 560 -class R_2_nodes(baserlib.OpcodeResource):
561 """/2/nodes resource. 562 563 """ 564
565 - def GET(self):
566 """Returns a list of all nodes. 567 568 """ 569 client = self.GetClient() 570 571 if self.useBulk(): 572 bulkdata = client.QueryNodes([], N_FIELDS, False) 573 return baserlib.MapBulkFields(bulkdata, N_FIELDS) 574 else: 575 nodesdata = client.QueryNodes([], ["name"], False) 576 nodeslist = [row[0] for row in nodesdata] 577 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s", 578 uri_fields=("id", "uri"))
579
580 581 -class R_2_nodes_name(baserlib.OpcodeResource):
582 """/2/nodes/[node_name] resource. 583 584 """ 585 GET_ALIASES = { 586 "sip": "secondary_ip", 587 } 588
589 - def GET(self):
590 """Send information about a node. 591 592 """ 593 node_name = self.items[0] 594 client = self.GetClient() 595 596 result = baserlib.HandleItemQueryErrors(client.QueryNodes, 597 names=[node_name], fields=N_FIELDS, 598 use_locking=self.useLocking()) 599 600 return baserlib.MapFields(N_FIELDS, result[0])
601
602 603 -class R_2_nodes_name_powercycle(baserlib.OpcodeResource):
604 """/2/nodes/[node_name]/powercycle resource. 605 606 """ 607 POST_OPCODE = opcodes.OpNodePowercycle 608
609 - def GetPostOpInput(self):
610 """Tries to powercycle a node. 611 612 """ 613 return (self.request_body, { 614 "node_name": self.items[0], 615 "force": self.useForce(), 616 })
617
618 619 -class R_2_nodes_name_role(baserlib.OpcodeResource):
620 """/2/nodes/[node_name]/role resource. 621 622 """ 623 PUT_OPCODE = opcodes.OpNodeSetParams 624
625 - def GET(self):
626 """Returns the current node role. 627 628 @return: Node role 629 630 """ 631 node_name = self.items[0] 632 client = self.GetClient() 633 result = client.QueryNodes(names=[node_name], fields=["role"], 634 use_locking=self.useLocking()) 635 636 return _NR_MAP[result[0][0]]
637
638 - def GetPutOpInput(self):
639 """Sets the node role. 640 641 """ 642 baserlib.CheckType(self.request_body, basestring, "Body contents") 643 644 role = self.request_body 645 646 if role == _NR_REGULAR: 647 candidate = False 648 offline = False 649 drained = False 650 651 elif role == _NR_MASTER_CANDIDATE: 652 candidate = True 653 offline = drained = None 654 655 elif role == _NR_DRAINED: 656 drained = True 657 candidate = offline = None 658 659 elif role == _NR_OFFLINE: 660 offline = True 661 candidate = drained = None 662 663 else: 664 raise http.HttpBadRequest("Can't set '%s' role" % role) 665 666 assert len(self.items) == 1 667 668 return ({}, { 669 "node_name": self.items[0], 670 "master_candidate": candidate, 671 "offline": offline, 672 "drained": drained, 673 "force": self.useForce(), 674 "auto_promote": bool(self._checkIntVariable("auto-promote", default=0)), 675 })
676
677 678 -class R_2_nodes_name_evacuate(baserlib.OpcodeResource):
679 """/2/nodes/[node_name]/evacuate resource. 680 681 """ 682 POST_OPCODE = opcodes.OpNodeEvacuate 683
684 - def GetPostOpInput(self):
685 """Evacuate all instances off a node. 686 687 """ 688 return (self.request_body, { 689 "node_name": self.items[0], 690 "dry_run": self.dryRun(), 691 })
692
693 694 -class R_2_nodes_name_migrate(baserlib.OpcodeResource):
695 """/2/nodes/[node_name]/migrate resource. 696 697 """ 698 POST_OPCODE = opcodes.OpNodeMigrate 699
700 - def GetPostOpInput(self):
701 """Migrate all primary instances from a node. 702 703 """ 704 if self.queryargs: 705 # Support old-style requests 706 if "live" in self.queryargs and "mode" in self.queryargs: 707 raise http.HttpBadRequest("Only one of 'live' and 'mode' should" 708 " be passed") 709 710 if "live" in self.queryargs: 711 if self._checkIntVariable("live", default=1): 712 mode = constants.HT_MIGRATION_LIVE 713 else: 714 mode = constants.HT_MIGRATION_NONLIVE 715 else: 716 mode = self._checkStringVariable("mode", default=None) 717 718 data = { 719 "mode": mode, 720 } 721 else: 722 data = self.request_body 723 724 return (data, { 725 "node_name": self.items[0], 726 })
727
728 729 -class R_2_nodes_name_modify(baserlib.OpcodeResource):
730 """/2/nodes/[node_name]/modify resource. 731 732 """ 733 POST_OPCODE = opcodes.OpNodeSetParams 734
735 - def GetPostOpInput(self):
736 """Changes parameters of a node. 737 738 """ 739 assert len(self.items) == 1 740 741 return (self.request_body, { 742 "node_name": self.items[0], 743 })
744
745 746 -class R_2_nodes_name_storage(baserlib.OpcodeResource):
747 """/2/nodes/[node_name]/storage resource. 748 749 """ 750 # LUNodeQueryStorage acquires locks, hence restricting access to GET 751 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] 752 GET_OPCODE = opcodes.OpNodeQueryStorage 753
754 - def GetGetOpInput(self):
755 """List storage available on a node. 756 757 """ 758 storage_type = self._checkStringVariable("storage_type", None) 759 output_fields = self._checkStringVariable("output_fields", None) 760 761 if not output_fields: 762 raise http.HttpBadRequest("Missing the required 'output_fields'" 763 " parameter") 764 765 return ({}, { 766 "nodes": [self.items[0]], 767 "storage_type": storage_type, 768 "output_fields": output_fields.split(","), 769 })
770
771 772 -class R_2_nodes_name_storage_modify(baserlib.OpcodeResource):
773 """/2/nodes/[node_name]/storage/modify resource. 774 775 """ 776 PUT_OPCODE = opcodes.OpNodeModifyStorage 777
778 - def GetPutOpInput(self):
779 """Modifies a storage volume on a node. 780 781 """ 782 storage_type = self._checkStringVariable("storage_type", None) 783 name = self._checkStringVariable("name", None) 784 785 if not name: 786 raise http.HttpBadRequest("Missing the required 'name'" 787 " parameter") 788 789 changes = {} 790 791 if "allocatable" in self.queryargs: 792 changes[constants.SF_ALLOCATABLE] = \ 793 bool(self._checkIntVariable("allocatable", default=1)) 794 795 return ({}, { 796 "node_name": self.items[0], 797 "storage_type": storage_type, 798 "name": name, 799 "changes": changes, 800 })
801
802 803 -class R_2_nodes_name_storage_repair(baserlib.OpcodeResource):
804 """/2/nodes/[node_name]/storage/repair resource. 805 806 """ 807 PUT_OPCODE = opcodes.OpRepairNodeStorage 808
809 - def GetPutOpInput(self):
810 """Repairs a storage volume on a node. 811 812 """ 813 storage_type = self._checkStringVariable("storage_type", None) 814 name = self._checkStringVariable("name", None) 815 if not name: 816 raise http.HttpBadRequest("Missing the required 'name'" 817 " parameter") 818 819 return ({}, { 820 "node_name": self.items[0], 821 "storage_type": storage_type, 822 "name": name, 823 })
824
825 826 -class R_2_networks(baserlib.OpcodeResource):
827 """/2/networks resource. 828 829 """ 830 POST_OPCODE = opcodes.OpNetworkAdd 831 POST_RENAME = { 832 "name": "network_name", 833 } 834
835 - def GetPostOpInput(self):
836 """Create a network. 837 838 """ 839 assert not self.items 840 return (self.request_body, { 841 "dry_run": self.dryRun(), 842 })
843
844 - def GET(self):
845 """Returns a list of all networks. 846 847 """ 848 client = self.GetClient() 849 850 if self.useBulk(): 851 bulkdata = client.QueryNetworks([], NET_FIELDS, False) 852 return baserlib.MapBulkFields(bulkdata, NET_FIELDS) 853 else: 854 data = client.QueryNetworks([], ["name"], False) 855 networknames = [row[0] for row in data] 856 return baserlib.BuildUriList(networknames, "/2/networks/%s", 857 uri_fields=("name", "uri"))
858
859 860 -class R_2_networks_name(baserlib.OpcodeResource):
861 """/2/networks/[network_name] resource. 862 863 """ 864 DELETE_OPCODE = opcodes.OpNetworkRemove 865
866 - def GET(self):
867 """Send information about a network. 868 869 """ 870 network_name = self.items[0] 871 client = self.GetClient() 872 873 result = baserlib.HandleItemQueryErrors(client.QueryNetworks, 874 names=[network_name], 875 fields=NET_FIELDS, 876 use_locking=self.useLocking()) 877 878 return baserlib.MapFields(NET_FIELDS, result[0])
879
880 - def GetDeleteOpInput(self):
881 """Delete a network. 882 883 """ 884 assert len(self.items) == 1 885 return (self.request_body, { 886 "network_name": self.items[0], 887 "dry_run": self.dryRun(), 888 })
889
890 891 -class R_2_networks_name_connect(baserlib.OpcodeResource):
892 """/2/networks/[network_name]/connect resource. 893 894 """ 895 PUT_OPCODE = opcodes.OpNetworkConnect 896
897 - def GetPutOpInput(self):
898 """Changes some parameters of node group. 899 900 """ 901 assert self.items 902 return (self.request_body, { 903 "network_name": self.items[0], 904 "dry_run": self.dryRun(), 905 })
906
907 908 -class R_2_networks_name_disconnect(baserlib.OpcodeResource):
909 """/2/networks/[network_name]/disconnect resource. 910 911 """ 912 PUT_OPCODE = opcodes.OpNetworkDisconnect 913
914 - def GetPutOpInput(self):
915 """Changes some parameters of node group. 916 917 """ 918 assert self.items 919 return (self.request_body, { 920 "network_name": self.items[0], 921 "dry_run": self.dryRun(), 922 })
923
924 925 -class R_2_networks_name_modify(baserlib.OpcodeResource):
926 """/2/networks/[network_name]/modify resource. 927 928 """ 929 PUT_OPCODE = opcodes.OpNetworkSetParams 930
931 - def GetPutOpInput(self):
932 """Changes some parameters of network. 933 934 """ 935 assert self.items 936 return (self.request_body, { 937 "network_name": self.items[0], 938 })
939
940 941 -class R_2_groups(baserlib.OpcodeResource):
942 """/2/groups resource. 943 944 """ 945 POST_OPCODE = opcodes.OpGroupAdd 946 POST_RENAME = { 947 "name": "group_name", 948 } 949
950 - def GetPostOpInput(self):
951 """Create a node group. 952 953 954 """ 955 assert not self.items 956 return (self.request_body, { 957 "dry_run": self.dryRun(), 958 })
959
960 - def GET(self):
961 """Returns a list of all node groups. 962 963 """ 964 client = self.GetClient() 965 966 if self.useBulk(): 967 bulkdata = client.QueryGroups([], G_FIELDS, False) 968 return baserlib.MapBulkFields(bulkdata, G_FIELDS) 969 else: 970 data = client.QueryGroups([], ["name"], False) 971 groupnames = [row[0] for row in data] 972 return baserlib.BuildUriList(groupnames, "/2/groups/%s", 973 uri_fields=("name", "uri"))
974
975 976 -class R_2_groups_name(baserlib.OpcodeResource):
977 """/2/groups/[group_name] resource. 978 979 """ 980 DELETE_OPCODE = opcodes.OpGroupRemove 981
982 - def GET(self):
983 """Send information about a node group. 984 985 """ 986 group_name = self.items[0] 987 client = self.GetClient() 988 989 result = baserlib.HandleItemQueryErrors(client.QueryGroups, 990 names=[group_name], fields=G_FIELDS, 991 use_locking=self.useLocking()) 992 993 return baserlib.MapFields(G_FIELDS, result[0])
994
995 - def GetDeleteOpInput(self):
996 """Delete a node group. 997 998 """ 999 assert len(self.items) == 1 1000 return ({}, { 1001 "group_name": self.items[0], 1002 "dry_run": self.dryRun(), 1003 })
1004
1005 1006 -class R_2_groups_name_modify(baserlib.OpcodeResource):
1007 """/2/groups/[group_name]/modify resource. 1008 1009 """ 1010 PUT_OPCODE = opcodes.OpGroupSetParams 1011 PUT_RENAME = { 1012 "custom_ndparams": "ndparams", 1013 "custom_ipolicy": "ipolicy", 1014 "custom_diskparams": "diskparams", 1015 } 1016
1017 - def GetPutOpInput(self):
1018 """Changes some parameters of node group. 1019 1020 """ 1021 assert self.items 1022 return (self.request_body, { 1023 "group_name": self.items[0], 1024 })
1025
1026 1027 -class R_2_groups_name_rename(baserlib.OpcodeResource):
1028 """/2/groups/[group_name]/rename resource. 1029 1030 """ 1031 PUT_OPCODE = opcodes.OpGroupRename 1032
1033 - def GetPutOpInput(self):
1034 """Changes the name of a node group. 1035 1036 """ 1037 assert len(self.items) == 1 1038 return (self.request_body, { 1039 "group_name": self.items[0], 1040 "dry_run": self.dryRun(), 1041 })
1042
1043 1044 -class R_2_groups_name_assign_nodes(baserlib.OpcodeResource):
1045 """/2/groups/[group_name]/assign-nodes resource. 1046 1047 """ 1048 PUT_OPCODE = opcodes.OpGroupAssignNodes 1049
1050 - def GetPutOpInput(self):
1051 """Assigns nodes to a group. 1052 1053 """ 1054 assert len(self.items) == 1 1055 return (self.request_body, { 1056 "group_name": self.items[0], 1057 "dry_run": self.dryRun(), 1058 "force": self.useForce(), 1059 })
1060
1061 1062 -def _ConvertUsbDevices(data):
1063 """Convert in place the usb_devices string to the proper format. 1064 1065 In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from 1066 comma to space because commas cannot be accepted on the command line 1067 (they already act as the separator between different hvparams). RAPI 1068 should be able to accept commas for backwards compatibility, but we want 1069 it to also accept the new space separator. Therefore, we convert 1070 spaces into commas here and keep the old parsing logic elsewhere. 1071 1072 """ 1073 try: 1074 hvparams = data["hvparams"] 1075 usb_devices = hvparams[constants.HV_USB_DEVICES] 1076 hvparams[constants.HV_USB_DEVICES] = usb_devices.replace(" ", ",") 1077 data["hvparams"] = hvparams 1078 except KeyError: 1079 #No usb_devices, no modification required 1080 pass
1081
1082 1083 -class R_2_instances(baserlib.OpcodeResource):
1084 """/2/instances resource. 1085 1086 """ 1087 POST_OPCODE = opcodes.OpInstanceCreate 1088 POST_RENAME = { 1089 "os": "os_type", 1090 "name": "instance_name", 1091 } 1092
1093 - def GET(self):
1094 """Returns a list of all available instances. 1095 1096 """ 1097 client = self.GetClient() 1098 1099 use_locking = self.useLocking() 1100 if self.useBulk(): 1101 bulkdata = client.QueryInstances([], I_FIELDS, use_locking) 1102 return map(_UpdateBeparams, baserlib.MapBulkFields(bulkdata, I_FIELDS)) 1103 else: 1104 instancesdata = client.QueryInstances([], ["name"], use_locking) 1105 instanceslist = [row[0] for row in instancesdata] 1106 return baserlib.BuildUriList(instanceslist, "/2/instances/%s", 1107 uri_fields=("id", "uri"))
1108
1109 - def GetPostOpInput(self):
1110 """Create an instance. 1111 1112 @return: a job id 1113 1114 """ 1115 baserlib.CheckType(self.request_body, dict, "Body contents") 1116 1117 # Default to request data version 0 1118 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0) 1119 1120 if data_version == 0: 1121 raise http.HttpBadRequest("Instance creation request version 0 is no" 1122 " longer supported") 1123 elif data_version != 1: 1124 raise http.HttpBadRequest("Unsupported request data version %s" % 1125 data_version) 1126 1127 data = self.request_body.copy() 1128 # Remove "__version__" 1129 data.pop(_REQ_DATA_VERSION, None) 1130 1131 _ConvertUsbDevices(data) 1132 1133 return (data, { 1134 "dry_run": self.dryRun(), 1135 })
1136
1137 1138 -class R_2_instances_multi_alloc(baserlib.OpcodeResource):
1139 """/2/instances-multi-alloc resource. 1140 1141 """ 1142 POST_OPCODE = opcodes.OpInstanceMultiAlloc 1143
1144 - def GetPostOpInput(self):
1145 """Try to allocate multiple instances. 1146 1147 @return: A dict with submitted jobs, allocatable instances and failed 1148 allocations 1149 1150 """ 1151 if "instances" not in self.request_body: 1152 raise http.HttpBadRequest("Request is missing required 'instances' field" 1153 " in body") 1154 1155 # Unlike most other RAPI calls, this one is composed of individual opcodes, 1156 # and we have to do the filling ourselves 1157 OPCODE_RENAME = { 1158 "os": "os_type", 1159 "name": "instance_name", 1160 } 1161 1162 body = objects.FillDict(self.request_body, { 1163 "instances": [ 1164 baserlib.FillOpcode(opcodes.OpInstanceCreate, inst, {}, 1165 rename=OPCODE_RENAME) 1166 for inst in self.request_body["instances"] 1167 ], 1168 }) 1169 1170 return (body, { 1171 "dry_run": self.dryRun(), 1172 })
1173
1174 1175 -class R_2_instances_name(baserlib.OpcodeResource):
1176 """/2/instances/[instance_name] resource. 1177 1178 """ 1179 DELETE_OPCODE = opcodes.OpInstanceRemove 1180
1181 - def GET(self):
1182 """Send information about an instance. 1183 1184 """ 1185 client = self.GetClient() 1186 instance_name = self.items[0] 1187 1188 result = baserlib.HandleItemQueryErrors(client.QueryInstances, 1189 names=[instance_name], 1190 fields=I_FIELDS, 1191 use_locking=self.useLocking()) 1192 1193 return _UpdateBeparams(baserlib.MapFields(I_FIELDS, result[0]))
1194
1195 - def GetDeleteOpInput(self):
1196 """Delete an instance. 1197 1198 """ 1199 assert len(self.items) == 1 1200 return (self.request_body, { 1201 "instance_name": self.items[0], 1202 "ignore_failures": False, 1203 "dry_run": self.dryRun(), 1204 })
1205
1206 1207 -class R_2_instances_name_info(baserlib.OpcodeResource):
1208 """/2/instances/[instance_name]/info resource. 1209 1210 """ 1211 GET_OPCODE = opcodes.OpInstanceQueryData 1212
1213 - def GetGetOpInput(self):
1214 """Request detailed instance information. 1215 1216 """ 1217 assert len(self.items) == 1 1218 return ({}, { 1219 "instances": [self.items[0]], 1220 "static": bool(self._checkIntVariable("static", default=0)), 1221 })
1222
1223 1224 -class R_2_instances_name_reboot(baserlib.OpcodeResource):
1225 """/2/instances/[instance_name]/reboot resource. 1226 1227 Implements an instance reboot. 1228 1229 """ 1230 POST_OPCODE = opcodes.OpInstanceReboot 1231
1232 - def GetPostOpInput(self):
1233 """Reboot an instance. 1234 1235 The URI takes type=[hard|soft|full] and 1236 ignore_secondaries=[False|True] parameters. 1237 1238 """ 1239 return (self.request_body, { 1240 "instance_name": self.items[0], 1241 "reboot_type": 1242 self.queryargs.get("type", [constants.INSTANCE_REBOOT_HARD])[0], 1243 "ignore_secondaries": bool(self._checkIntVariable("ignore_secondaries")), 1244 "dry_run": self.dryRun(), 1245 })
1246
1247 1248 -class R_2_instances_name_startup(baserlib.OpcodeResource):
1249 """/2/instances/[instance_name]/startup resource. 1250 1251 Implements an instance startup. 1252 1253 """ 1254 PUT_OPCODE = opcodes.OpInstanceStartup 1255
1256 - def GetPutOpInput(self):
1257 """Startup an instance. 1258 1259 The URI takes force=[False|True] parameter to start the instance 1260 if even if secondary disks are failing. 1261 1262 """ 1263 return ({}, { 1264 "instance_name": self.items[0], 1265 "force": self.useForce(), 1266 "dry_run": self.dryRun(), 1267 "no_remember": bool(self._checkIntVariable("no_remember")), 1268 })
1269
1270 1271 -class R_2_instances_name_shutdown(baserlib.OpcodeResource):
1272 """/2/instances/[instance_name]/shutdown resource. 1273 1274 Implements an instance shutdown. 1275 1276 """ 1277 PUT_OPCODE = opcodes.OpInstanceShutdown 1278
1279 - def GetPutOpInput(self):
1280 """Shutdown an instance. 1281 1282 """ 1283 return (self.request_body, { 1284 "instance_name": self.items[0], 1285 "no_remember": bool(self._checkIntVariable("no_remember")), 1286 "dry_run": self.dryRun(), 1287 })
1288
1289 1290 -def _ParseInstanceReinstallRequest(name, data):
1291 """Parses a request for reinstalling an instance. 1292 1293 """ 1294 if not isinstance(data, dict): 1295 raise http.HttpBadRequest("Invalid body contents, not a dictionary") 1296 1297 ostype = baserlib.CheckParameter(data, "os", default=None) 1298 start = baserlib.CheckParameter(data, "start", exptype=bool, 1299 default=True) 1300 osparams = baserlib.CheckParameter(data, "osparams", default=None) 1301 1302 ops = [ 1303 opcodes.OpInstanceShutdown(instance_name=name), 1304 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype, 1305 osparams=osparams), 1306 ] 1307 1308 if start: 1309 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False)) 1310 1311 return ops
1312
1313 1314 -class R_2_instances_name_reinstall(baserlib.OpcodeResource):
1315 """/2/instances/[instance_name]/reinstall resource. 1316 1317 Implements an instance reinstall. 1318 1319 """ 1320 POST_OPCODE = opcodes.OpInstanceReinstall 1321
1322 - def POST(self):
1323 """Reinstall an instance. 1324 1325 The URI takes os=name and nostartup=[0|1] optional 1326 parameters. By default, the instance will be started 1327 automatically. 1328 1329 """ 1330 if self.request_body: 1331 if self.queryargs: 1332 raise http.HttpBadRequest("Can't combine query and body parameters") 1333 1334 body = self.request_body 1335 elif self.queryargs: 1336 # Legacy interface, do not modify/extend 1337 body = { 1338 "os": self._checkStringVariable("os"), 1339 "start": not self._checkIntVariable("nostartup"), 1340 } 1341 else: 1342 body = {} 1343 1344 ops = _ParseInstanceReinstallRequest(self.items[0], body) 1345 1346 return self.SubmitJob(ops)
1347
1348 1349 -class R_2_instances_name_replace_disks(baserlib.OpcodeResource):
1350 """/2/instances/[instance_name]/replace-disks resource. 1351 1352 """ 1353 POST_OPCODE = opcodes.OpInstanceReplaceDisks 1354
1355 - def GetPostOpInput(self):
1356 """Replaces disks on an instance. 1357 1358 """ 1359 static = { 1360 "instance_name": self.items[0], 1361 } 1362 1363 if self.request_body: 1364 data = self.request_body 1365 elif self.queryargs: 1366 # Legacy interface, do not modify/extend 1367 data = { 1368 "remote_node": self._checkStringVariable("remote_node", default=None), 1369 "mode": self._checkStringVariable("mode", default=None), 1370 "disks": self._checkStringVariable("disks", default=None), 1371 "iallocator": self._checkStringVariable("iallocator", default=None), 1372 } 1373 else: 1374 data = {} 1375 1376 # Parse disks 1377 try: 1378 raw_disks = data.pop("disks") 1379 except KeyError: 1380 pass 1381 else: 1382 if raw_disks: 1383 if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102 1384 data["disks"] = raw_disks 1385 else: 1386 # Backwards compatibility for strings of the format "1, 2, 3" 1387 try: 1388 data["disks"] = [int(part) for part in raw_disks.split(",")] 1389 except (TypeError, ValueError), err: 1390 raise http.HttpBadRequest("Invalid disk index passed: %s" % err) 1391 1392 return (data, static)
1393
1394 1395 -class R_2_instances_name_activate_disks(baserlib.OpcodeResource):
1396 """/2/instances/[instance_name]/activate-disks resource. 1397 1398 """ 1399 PUT_OPCODE = opcodes.OpInstanceActivateDisks 1400
1401 - def GetPutOpInput(self):
1402 """Activate disks for an instance. 1403 1404 The URI might contain ignore_size to ignore current recorded size. 1405 1406 """ 1407 return ({}, { 1408 "instance_name": self.items[0], 1409 "ignore_size": bool(self._checkIntVariable("ignore_size")), 1410 })
1411
1412 1413 -class R_2_instances_name_deactivate_disks(baserlib.OpcodeResource):
1414 """/2/instances/[instance_name]/deactivate-disks resource. 1415 1416 """ 1417 PUT_OPCODE = opcodes.OpInstanceDeactivateDisks 1418
1419 - def GetPutOpInput(self):
1420 """Deactivate disks for an instance. 1421 1422 """ 1423 return ({}, { 1424 "instance_name": self.items[0], 1425 "force": self.useForce(), 1426 })
1427
1428 1429 -class R_2_instances_name_recreate_disks(baserlib.OpcodeResource):
1430 """/2/instances/[instance_name]/recreate-disks resource. 1431 1432 """ 1433 POST_OPCODE = opcodes.OpInstanceRecreateDisks 1434
1435 - def GetPostOpInput(self):
1436 """Recreate disks for an instance. 1437 1438 """ 1439 return (self.request_body, { 1440 "instance_name": self.items[0], 1441 })
1442
1443 1444 -class R_2_instances_name_prepare_export(baserlib.OpcodeResource):
1445 """/2/instances/[instance_name]/prepare-export resource. 1446 1447 """ 1448 PUT_OPCODE = opcodes.OpBackupPrepare 1449
1450 - def GetPutOpInput(self):
1451 """Prepares an export for an instance. 1452 1453 """ 1454 return ({}, { 1455 "instance_name": self.items[0], 1456 "mode": self._checkStringVariable("mode"), 1457 })
1458
1459 1460 -class R_2_instances_name_export(baserlib.OpcodeResource):
1461 """/2/instances/[instance_name]/export resource. 1462 1463 """ 1464 PUT_OPCODE = opcodes.OpBackupExport 1465 PUT_RENAME = { 1466 "destination": "target_node", 1467 } 1468
1469 - def GetPutOpInput(self):
1470 """Exports an instance. 1471 1472 """ 1473 return (self.request_body, { 1474 "instance_name": self.items[0], 1475 })
1476
1477 1478 -class R_2_instances_name_migrate(baserlib.OpcodeResource):
1479 """/2/instances/[instance_name]/migrate resource. 1480 1481 """ 1482 PUT_OPCODE = opcodes.OpInstanceMigrate 1483
1484 - def GetPutOpInput(self):
1485 """Migrates an instance. 1486 1487 """ 1488 return (self.request_body, { 1489 "instance_name": self.items[0], 1490 })
1491
1492 1493 -class R_2_instances_name_failover(baserlib.OpcodeResource):
1494 """/2/instances/[instance_name]/failover resource. 1495 1496 """ 1497 PUT_OPCODE = opcodes.OpInstanceFailover 1498
1499 - def GetPutOpInput(self):
1500 """Does a failover of an instance. 1501 1502 """ 1503 return (self.request_body, { 1504 "instance_name": self.items[0], 1505 })
1506
1507 1508 -class R_2_instances_name_rename(baserlib.OpcodeResource):
1509 """/2/instances/[instance_name]/rename resource. 1510 1511 """ 1512 PUT_OPCODE = opcodes.OpInstanceRename 1513
1514 - def GetPutOpInput(self):
1515 """Changes the name of an instance. 1516 1517 """ 1518 return (self.request_body, { 1519 "instance_name": self.items[0], 1520 })
1521
1522 1523 -class R_2_instances_name_modify(baserlib.OpcodeResource):
1524 """/2/instances/[instance_name]/modify resource. 1525 1526 """ 1527 PUT_OPCODE = opcodes.OpInstanceSetParams 1528 PUT_RENAME = { 1529 "custom_beparams": "beparams", 1530 "custom_hvparams": "hvparams", 1531 } 1532
1533 - def GetPutOpInput(self):
1534 """Changes parameters of an instance. 1535 1536 """ 1537 data = self.request_body.copy() 1538 _ConvertUsbDevices(data) 1539 1540 return (data, { 1541 "instance_name": self.items[0], 1542 })
1543
1544 1545 -class R_2_instances_name_disk_grow(baserlib.OpcodeResource):
1546 """/2/instances/[instance_name]/disk/[disk_index]/grow resource. 1547 1548 """ 1549 POST_OPCODE = opcodes.OpInstanceGrowDisk 1550
1551 - def GetPostOpInput(self):
1552 """Increases the size of an instance disk. 1553 1554 """ 1555 return (self.request_body, { 1556 "instance_name": self.items[0], 1557 "disk": int(self.items[1]), 1558 })
1559
1560 1561 -class R_2_instances_name_console(baserlib.ResourceBase):
1562 """/2/instances/[instance_name]/console resource. 1563 1564 """ 1565 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ] 1566 GET_OPCODE = opcodes.OpInstanceConsole 1567
1568 - def GET(self):
1569 """Request information for connecting to instance's console. 1570 1571 @return: Serialized instance console description, see 1572 L{objects.InstanceConsole} 1573 1574 """ 1575 instance_name = self.items[0] 1576 client = self.GetClient() 1577 1578 ((console, oper_state), ) = \ 1579 client.QueryInstances([instance_name], ["console", "oper_state"], False) 1580 1581 if not oper_state: 1582 raise http.HttpServiceUnavailable("Instance console unavailable") 1583 1584 assert isinstance(console, dict) 1585 return console
1586
1587 1588 -def _GetQueryFields(args):
1589 """Tries to extract C{fields} query parameter. 1590 1591 @type args: dictionary 1592 @rtype: list of string 1593 @raise http.HttpBadRequest: When parameter can't be found 1594 1595 """ 1596 try: 1597 fields = args["fields"] 1598 except KeyError: 1599 raise http.HttpBadRequest("Missing 'fields' query argument") 1600 1601 return _SplitQueryFields(fields[0])
1602
1603 1604 -def _SplitQueryFields(fields):
1605 """Splits fields as given for a query request. 1606 1607 @type fields: string 1608 @rtype: list of string 1609 1610 """ 1611 return [i.strip() for i in fields.split(",")]
1612
1613 1614 -class R_2_query(baserlib.ResourceBase):
1615 """/2/query/[resource] resource. 1616 1617 """ 1618 # Results might contain sensitive information 1619 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ] 1620 PUT_ACCESS = GET_ACCESS 1621 GET_OPCODE = opcodes.OpQuery 1622 PUT_OPCODE = opcodes.OpQuery 1623
1624 - def _Query(self, fields, qfilter):
1625 client = self.GetClient() 1626 return client.Query(self.items[0], fields, qfilter).ToDict()
1627
1628 - def GET(self):
1629 """Returns resource information. 1630 1631 @return: Query result, see L{objects.QueryResponse} 1632 1633 """ 1634 return self._Query(_GetQueryFields(self.queryargs), None)
1635
1636 - def PUT(self):
1637 """Submits job querying for resources. 1638 1639 @return: Query result, see L{objects.QueryResponse} 1640 1641 """ 1642 body = self.request_body 1643 1644 baserlib.CheckType(body, dict, "Body contents") 1645 1646 try: 1647 fields = body["fields"] 1648 except KeyError: 1649 fields = _GetQueryFields(self.queryargs) 1650 1651 qfilter = body.get("qfilter", None) 1652 # TODO: remove this after 2.7 1653 if qfilter is None: 1654 qfilter = body.get("filter", None) 1655 1656 return self._Query(fields, qfilter)
1657
1658 1659 -class R_2_query_fields(baserlib.ResourceBase):
1660 """/2/query/[resource]/fields resource. 1661 1662 """ 1663 GET_OPCODE = opcodes.OpQueryFields 1664
1665 - def GET(self):
1666 """Retrieves list of available fields for a resource. 1667 1668 @return: List of serialized L{objects.QueryFieldDefinition} 1669 1670 """ 1671 try: 1672 raw_fields = self.queryargs["fields"] 1673 except KeyError: 1674 fields = None 1675 else: 1676 fields = _SplitQueryFields(raw_fields[0]) 1677 1678 return self.GetClient().QueryFields(self.items[0], fields).ToDict()
1679
1680 1681 -class _R_Tags(baserlib.OpcodeResource):
1682 """Quasiclass for tagging resources. 1683 1684 Manages tags. When inheriting this class you must define the 1685 TAG_LEVEL for it. 1686 1687 """ 1688 TAG_LEVEL = None 1689 GET_OPCODE = opcodes.OpTagsGet 1690 PUT_OPCODE = opcodes.OpTagsSet 1691 DELETE_OPCODE = opcodes.OpTagsDel 1692
1693 - def __init__(self, items, queryargs, req, **kwargs):
1694 """A tag resource constructor. 1695 1696 We have to override the default to sort out cluster naming case. 1697 1698 """ 1699 baserlib.OpcodeResource.__init__(self, items, queryargs, req, **kwargs) 1700 1701 if self.TAG_LEVEL == constants.TAG_CLUSTER: 1702 self.name = None 1703 else: 1704 self.name = items[0]
1705
1706 - def GET(self):
1707 """Returns a list of tags. 1708 1709 Example: ["tag1", "tag2", "tag3"] 1710 1711 """ 1712 kind = self.TAG_LEVEL 1713 1714 if kind in constants.VALID_TAG_TYPES: 1715 cl = self.GetClient() 1716 if kind == constants.TAG_CLUSTER: 1717 if self.name: 1718 raise http.HttpBadRequest("Can't specify a name" 1719 " for cluster tag request") 1720 tags = list(cl.QueryTags(kind, "")) 1721 else: 1722 if not self.name: 1723 raise http.HttpBadRequest("Missing name on tag request") 1724 tags = list(cl.QueryTags(kind, self.name)) 1725 1726 else: 1727 raise http.HttpBadRequest("Unhandled tag type!") 1728 1729 return list(tags)
1730
1731 - def GetPutOpInput(self):
1732 """Add a set of tags. 1733 1734 The request as a list of strings should be PUT to this URI. And 1735 you'll have back a job id. 1736 1737 """ 1738 return ({}, { 1739 "kind": self.TAG_LEVEL, 1740 "name": self.name, 1741 "tags": self.queryargs.get("tag", []), 1742 "dry_run": self.dryRun(), 1743 })
1744
1745 - def GetDeleteOpInput(self):
1746 """Delete a tag. 1747 1748 In order to delete a set of tags, the DELETE 1749 request should be addressed to URI like: 1750 /tags?tag=[tag]&tag=[tag] 1751 1752 """ 1753 # Re-use code 1754 return self.GetPutOpInput()
1755
1756 1757 -class R_2_instances_name_tags(_R_Tags):
1758 """ /2/instances/[instance_name]/tags resource. 1759 1760 Manages per-instance tags. 1761 1762 """ 1763 TAG_LEVEL = constants.TAG_INSTANCE
1764
1765 1766 -class R_2_nodes_name_tags(_R_Tags):
1767 """ /2/nodes/[node_name]/tags resource. 1768 1769 Manages per-node tags. 1770 1771 """ 1772 TAG_LEVEL = constants.TAG_NODE
1773
1774 1775 -class R_2_groups_name_tags(_R_Tags):
1776 """ /2/groups/[group_name]/tags resource. 1777 1778 Manages per-nodegroup tags. 1779 1780 """ 1781 TAG_LEVEL = constants.TAG_NODEGROUP
1782
1783 1784 -class R_2_networks_name_tags(_R_Tags):
1785 """ /2/networks/[network_name]/tags resource. 1786 1787 Manages per-network tags. 1788 1789 """ 1790 TAG_LEVEL = constants.TAG_NETWORK
1791
1792 1793 -class R_2_tags(_R_Tags):
1794 """ /2/tags resource. 1795 1796 Manages cluster tags. 1797 1798 """ 1799 TAG_LEVEL = constants.TAG_CLUSTER
1800