Package ganeti :: Package rapi :: Module rlib2
[hide private]
[frames] | no frames]

Source Code for Module ganeti.rapi.rlib2

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Remote API resource implementations. 
  32   
  33  PUT or POST? 
  34  ============ 
  35   
  36  According to RFC2616 the main difference between PUT and POST is that 
  37  POST can create new resources but PUT can only create the resource the 
  38  URI was pointing to on the PUT request. 
  39   
  40  In the context of this module POST on ``/2/instances`` to change an existing 
  41  entity is legitimate, while PUT would not be. PUT creates a new entity (e.g. a 
  42  new instance) with a name specified in the request. 
  43   
  44  Quoting from RFC2616, section 9.6:: 
  45   
  46    The fundamental difference between the POST and PUT requests is reflected in 
  47    the different meaning of the Request-URI. The URI in a POST request 
  48    identifies the resource that will handle the enclosed entity. That resource 
  49    might be a data-accepting process, a gateway to some other protocol, or a 
  50    separate entity that accepts annotations. In contrast, the URI in a PUT 
  51    request identifies the entity enclosed with the request -- the user agent 
  52    knows what URI is intended and the server MUST NOT attempt to apply the 
  53    request to some other resource. If the server desires that the request be 
  54    applied to a different URI, it MUST send a 301 (Moved Permanently) response; 
  55    the user agent MAY then make its own decision regarding whether or not to 
  56    redirect the request. 
  57   
  58  So when adding new methods, if they are operating on the URI entity itself, 
  59  PUT should be prefered over POST. 
  60   
  61  """ 
  62   
  63  # pylint: disable=C0103 
  64   
  65  # C0103: Invalid name, since the R_* names are not conforming 
  66   
  67  from ganeti import opcodes 
  68  from ganeti import objects 
  69  from ganeti import http 
  70  from ganeti import constants 
  71  from ganeti import cli 
  72  from ganeti import rapi 
  73  from ganeti import ht 
  74  from ganeti import compat 
  75  from ganeti.rapi import baserlib 
  76   
  77   
  78  _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"] 
  79  I_FIELDS = ["name", "admin_state", "os", 
  80              "pnode", "snodes", 
  81              "disk_template", 
  82              "nic.ips", "nic.macs", "nic.modes", "nic.uuids", "nic.names", 
  83              "nic.links", "nic.networks", "nic.networks.names", "nic.bridges", 
  84              "network_port", 
  85              "disk.sizes", "disk.spindles", "disk_usage", "disk.uuids", 
  86              "disk.names", 
  87              "beparams", "hvparams", 
  88              "oper_state", "oper_ram", "oper_vcpus", "status", 
  89              "custom_hvparams", "custom_beparams", "custom_nicparams", 
  90              ] + _COMMON_FIELDS 
  91   
  92  N_FIELDS = ["name", "offline", "master_candidate", "drained", 
  93              "dtotal", "dfree", "sptotal", "spfree", 
  94              "mtotal", "mnode", "mfree", "hv_state", 
  95              "pinst_cnt", "sinst_cnt", 
  96              "ctotal", "cnos", "cnodes", "csockets", 
  97              "pip", "sip", "role", 
  98              "pinst_list", "sinst_list", 
  99              "master_capable", "vm_capable", 
 100              "ndparams", 
 101              "group.uuid", 
 102              ] + _COMMON_FIELDS 
 103   
 104  NET_FIELDS = ["name", "network", "gateway", 
 105                "network6", "gateway6", 
 106                "mac_prefix", 
 107                "free_count", "reserved_count", 
 108                "map", "group_list", "inst_list", 
 109                "external_reservations", 
 110                ] + _COMMON_FIELDS 
 111   
 112  G_FIELDS = [ 
 113    "alloc_policy", 
 114    "name", 
 115    "node_cnt", 
 116    "node_list", 
 117    "ipolicy", 
 118    "custom_ipolicy", 
 119    "diskparams", 
 120    "custom_diskparams", 
 121    "ndparams", 
 122    "custom_ndparams" 
 123    ] + _COMMON_FIELDS 
 124   
 125  FILTER_RULE_FIELDS = [ 
 126    "watermark", 
 127    "priority", 
 128    "predicates", 
 129    "action", 
 130    "reason_trail", 
 131    "uuid", 
 132    ] 
 133   
 134  J_FIELDS_BULK = [ 
 135    "id", "ops", "status", "summary", 
 136    "opstatus", 
 137    "received_ts", "start_ts", "end_ts", 
 138    ] 
 139   
 140  J_FIELDS = J_FIELDS_BULK + [ 
 141    "oplog", 
 142    "opresult", 
 143    ] 
 144   
 145  _NR_DRAINED = "drained" 
 146  _NR_MASTER_CANDIDATE = "master-candidate" 
 147  _NR_MASTER = "master" 
 148  _NR_OFFLINE = "offline" 
 149  _NR_REGULAR = "regular" 
 150   
 151  _NR_MAP = { 
 152    constants.NR_MASTER: _NR_MASTER, 
 153    constants.NR_MCANDIDATE: _NR_MASTER_CANDIDATE, 
 154    constants.NR_DRAINED: _NR_DRAINED, 
 155    constants.NR_OFFLINE: _NR_OFFLINE, 
 156    constants.NR_REGULAR: _NR_REGULAR, 
 157    } 
 158   
 159  assert frozenset(_NR_MAP.keys()) == constants.NR_ALL 
 160   
 161  # Request data version field 
 162  _REQ_DATA_VERSION = "__version__" 
 163   
 164  # Feature string for instance creation request data version 1 
 165  _INST_CREATE_REQV1 = "instance-create-reqv1" 
 166   
 167  # Feature string for instance reinstall request version 1 
 168  _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1" 
 169   
 170  # Feature string for node migration version 1 
 171  _NODE_MIGRATE_REQV1 = "node-migrate-reqv1" 
 172   
 173  # Feature string for node evacuation with LU-generated jobs 
 174  _NODE_EVAC_RES1 = "node-evac-res1" 
 175   
 176  ALL_FEATURES = compat.UniqueFrozenset([ 
 177    _INST_CREATE_REQV1, 
 178    _INST_REINSTALL_REQV1, 
 179    _NODE_MIGRATE_REQV1, 
 180    _NODE_EVAC_RES1, 
 181    ]) 
 182   
 183  # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change. 
 184  _WFJC_TIMEOUT = 10 
185 186 187 # FIXME: For compatibility we update the beparams/memory field. Needs to be 188 # removed in Ganeti 2.8 189 -def _UpdateBeparams(inst):
190 """Updates the beparams dict of inst to support the memory field. 191 192 @param inst: Inst dict 193 @return: Updated inst dict 194 195 """ 196 beparams = inst["beparams"] 197 beparams[constants.BE_MEMORY] = beparams[constants.BE_MAXMEM] 198 199 return inst
200
201 202 -class R_root(baserlib.ResourceBase):
203 """/ resource. 204 205 """ 206 @staticmethod
207 - def GET():
208 """Supported for legacy reasons. 209 210 """ 211 return None
212
213 214 -class R_2(R_root):
215 """/2 resource. 216 217 """
218
219 220 -class R_version(baserlib.ResourceBase):
221 """/version resource. 222 223 This resource should be used to determine the remote API version and 224 to adapt clients accordingly. 225 226 """ 227 @staticmethod
228 - def GET():
229 """Returns the remote API version. 230 231 """ 232 return constants.RAPI_VERSION
233
234 235 -class R_2_info(baserlib.OpcodeResource):
236 """/2/info resource. 237 238 """ 239 GET_OPCODE = opcodes.OpClusterQuery 240 GET_ALIASES = { 241 "volume_group_name": "vg_name", 242 "drbd_usermode_helper": "drbd_helper", 243 } 244
245 - def GET(self):
246 """Returns cluster information. 247 248 """ 249 client = self.GetClient() 250 return client.QueryClusterInfo()
251
252 253 -class R_2_features(baserlib.ResourceBase):
254 """/2/features resource. 255 256 """ 257 @staticmethod
258 - def GET():
259 """Returns list of optional RAPI features implemented. 260 261 """ 262 return list(ALL_FEATURES)
263
264 265 -class R_2_os(baserlib.OpcodeResource):
266 """/2/os resource. 267 268 """ 269 GET_OPCODE = opcodes.OpOsDiagnose 270
271 - def GET(self):
272 """Return a list of all OSes. 273 274 Can return error 500 in case of a problem. 275 276 Example: ["debian-etch"] 277 278 """ 279 cl = self.GetClient() 280 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[]) 281 job_id = self.SubmitJob([op], cl=cl) 282 # we use custom feedback function, instead of print we log the status 283 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn) 284 diagnose_data = result[0] 285 286 if not isinstance(diagnose_data, list): 287 raise http.HttpBadGateway(message="Can't get OS list") 288 289 os_names = [] 290 for (name, variants) in diagnose_data: 291 os_names.extend(cli.CalculateOSNames(name, variants)) 292 293 return os_names
294
295 296 -class R_2_redist_config(baserlib.OpcodeResource):
297 """/2/redistribute-config resource. 298 299 """ 300 PUT_OPCODE = opcodes.OpClusterRedistConf
301
302 303 -class R_2_cluster_modify(baserlib.OpcodeResource):
304 """/2/modify resource. 305 306 """ 307 PUT_OPCODE = opcodes.OpClusterSetParams 308 PUT_FORBIDDEN = [ 309 "compression_tools", 310 ]
311
312 313 -def checkFilterParameters(data):
314 """Checks and extracts filter rule parameters from a request body. 315 316 @return: the checked parameters: (priority, predicates, action). 317 318 """ 319 320 if not isinstance(data, dict): 321 raise http.HttpBadRequest("Invalid body contents, not a dictionary") 322 323 # Forbid unknown parameters 324 allowed_params = set(["priority", "predicates", "action", "reason"]) 325 for param in data: 326 if param not in allowed_params: 327 raise http.HttpBadRequest("Invalid body parameters: filter rule doesn't" 328 " support the parameter '%s'" % param) 329 330 priority = baserlib.CheckParameter( 331 data, "priority", exptype=int, default=0) 332 333 # We leave the deeper check into the predicates list to the server. 334 predicates = baserlib.CheckParameter( 335 data, "predicates", exptype=list, default=[]) 336 337 # The action can be a string or a list; we leave the check to the server. 338 action = baserlib.CheckParameter(data, "action", default="CONTINUE") 339 340 reason = baserlib.CheckParameter(data, "reason", exptype=list, default=[]) 341 342 return (priority, predicates, action, reason)
343
344 345 -class R_2_filters(baserlib.ResourceBase):
346 """/2/filters resource. 347 348 """ 349
350 - def GET(self):
351 """Returns a list of all filter rules. 352 353 @return: a dictionary with filter rule UUID and uri. 354 355 """ 356 client = self.GetClient() 357 358 if self.useBulk(): 359 bulkdata = client.QueryFilters(None, FILTER_RULE_FIELDS) 360 return baserlib.MapBulkFields(bulkdata, FILTER_RULE_FIELDS) 361 else: 362 jobdata = map(compat.fst, client.QueryFilters(None, ["uuid"])) 363 return baserlib.BuildUriList(jobdata, "/2/filters/%s", 364 uri_fields=("uuid", "uri"))
365
366 - def POST(self):
367 """Adds a filter rule. 368 369 @return: the UUID of the newly created filter rule. 370 371 """ 372 priority, predicates, action, reason = \ 373 checkFilterParameters(self.request_body) 374 375 # ReplaceFilter(None, ...) inserts a new filter. 376 return self.GetClient().ReplaceFilter(None, priority, predicates, action, 377 reason)
378
379 380 -class R_2_filters_uuid(baserlib.ResourceBase):
381 """/2/filters/[filter_uuid] resource. 382 383 """
384 - def GET(self):
385 """Returns a filter rule. 386 387 @return: a dictionary with job parameters. 388 The result includes: 389 - uuid: unique filter ID string 390 - watermark: highest job ID ever used as a number 391 - priority: filter priority as a non-negative number 392 - predicates: filter predicates, each one being a list 393 with the first element being the name of the predicate 394 and the rest being parameters suitable for that predicate 395 - action: effect of the filter as a string 396 - reason_trail: reasons for the addition of this filter as a 397 list of lists 398 399 """ 400 uuid = self.items[0] 401 402 result = baserlib.HandleItemQueryErrors(self.GetClient().QueryFilters, 403 uuids=[uuid], 404 fields=FILTER_RULE_FIELDS) 405 406 return baserlib.MapFields(FILTER_RULE_FIELDS, result[0])
407
408 - def PUT(self):
409 """Replaces an existing filter rule, or creates one if it doesn't 410 exist already. 411 412 @return: the UUID of the changed or created filter rule. 413 414 """ 415 uuid = self.items[0] 416 417 priority, predicates, action, reason = \ 418 checkFilterParameters(self.request_body) 419 420 return self.GetClient().ReplaceFilter(uuid, priority, predicates, action, 421 reason)
422
423 - def DELETE(self):
424 """Deletes a filter rule. 425 426 """ 427 uuid = self.items[0] 428 return self.GetClient().DeleteFilter(uuid)
429
430 431 -class R_2_jobs(baserlib.ResourceBase):
432 """/2/jobs resource. 433 434 """
435 - def GET(self):
436 """Returns a dictionary of jobs. 437 438 @return: a dictionary with jobs id and uri. 439 440 """ 441 client = self.GetClient() 442 443 if self.useBulk(): 444 bulkdata = client.QueryJobs(None, J_FIELDS_BULK) 445 return baserlib.MapBulkFields(bulkdata, J_FIELDS_BULK) 446 else: 447 jobdata = map(compat.fst, client.QueryJobs(None, ["id"])) 448 return baserlib.BuildUriList(jobdata, "/2/jobs/%s", 449 uri_fields=("id", "uri"))
450
451 452 -class R_2_jobs_id(baserlib.ResourceBase):
453 """/2/jobs/[job_id] resource. 454 455 """
456 - def GET(self):
457 """Returns a job status. 458 459 @return: a dictionary with job parameters. 460 The result includes: 461 - id: job ID as a number 462 - status: current job status as a string 463 - ops: involved OpCodes as a list of dictionaries for each 464 opcodes in the job 465 - opstatus: OpCodes status as a list 466 - opresult: OpCodes results as a list of lists 467 468 """ 469 job_id = self.items[0] 470 result = self.GetClient().QueryJobs([job_id, ], J_FIELDS)[0] 471 if result is None: 472 raise http.HttpNotFound() 473 return baserlib.MapFields(J_FIELDS, result)
474
475 - def DELETE(self):
476 """Cancel not-yet-started job. 477 478 """ 479 job_id = self.items[0] 480 result = self.GetClient().CancelJob(job_id) 481 return result
482
483 484 -class R_2_jobs_id_wait(baserlib.ResourceBase):
485 """/2/jobs/[job_id]/wait resource. 486 487 """ 488 # WaitForJobChange provides access to sensitive information and blocks 489 # machine resources (it's a blocking RAPI call), hence restricting access. 490 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] 491
492 - def GET(self):
493 """Waits for job changes. 494 495 """ 496 job_id = self.items[0] 497 498 fields = self.getBodyParameter("fields") 499 prev_job_info = self.getBodyParameter("previous_job_info", None) 500 prev_log_serial = self.getBodyParameter("previous_log_serial", None) 501 502 if not isinstance(fields, list): 503 raise http.HttpBadRequest("The 'fields' parameter should be a list") 504 505 if not (prev_job_info is None or isinstance(prev_job_info, list)): 506 raise http.HttpBadRequest("The 'previous_job_info' parameter should" 507 " be a list") 508 509 if not (prev_log_serial is None or 510 isinstance(prev_log_serial, (int, long))): 511 raise http.HttpBadRequest("The 'previous_log_serial' parameter should" 512 " be a number") 513 514 client = self.GetClient() 515 result = client.WaitForJobChangeOnce(job_id, fields, 516 prev_job_info, prev_log_serial, 517 timeout=_WFJC_TIMEOUT) 518 if not result: 519 raise http.HttpNotFound() 520 521 if result == constants.JOB_NOTCHANGED: 522 # No changes 523 return None 524 525 (job_info, log_entries) = result 526 527 return { 528 "job_info": job_info, 529 "log_entries": log_entries, 530 }
531
532 533 -class R_2_nodes(baserlib.OpcodeResource):
534 """/2/nodes resource. 535 536 """ 537
538 - def GET(self):
539 """Returns a list of all nodes. 540 541 """ 542 client = self.GetClient() 543 544 if self.useBulk(): 545 bulkdata = client.QueryNodes([], N_FIELDS, False) 546 return baserlib.MapBulkFields(bulkdata, N_FIELDS) 547 else: 548 nodesdata = client.QueryNodes([], ["name"], False) 549 nodeslist = [row[0] for row in nodesdata] 550 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s", 551 uri_fields=("id", "uri"))
552
553 554 -class R_2_nodes_name(baserlib.OpcodeResource):
555 """/2/nodes/[node_name] resource. 556 557 """ 558 GET_ALIASES = { 559 "sip": "secondary_ip", 560 } 561
562 - def GET(self):
563 """Send information about a node. 564 565 """ 566 node_name = self.items[0] 567 client = self.GetClient() 568 569 result = baserlib.HandleItemQueryErrors(client.QueryNodes, 570 names=[node_name], fields=N_FIELDS, 571 use_locking=self.useLocking()) 572 573 return baserlib.MapFields(N_FIELDS, result[0])
574
575 576 -class R_2_nodes_name_powercycle(baserlib.OpcodeResource):
577 """/2/nodes/[node_name]/powercycle resource. 578 579 """ 580 POST_OPCODE = opcodes.OpNodePowercycle 581
582 - def GetPostOpInput(self):
583 """Tries to powercycle a node. 584 585 """ 586 return (self.request_body, { 587 "node_name": self.items[0], 588 "force": self.useForce(), 589 })
590
591 592 -class R_2_nodes_name_role(baserlib.OpcodeResource):
593 """/2/nodes/[node_name]/role resource. 594 595 """ 596 PUT_OPCODE = opcodes.OpNodeSetParams 597
598 - def GET(self):
599 """Returns the current node role. 600 601 @return: Node role 602 603 """ 604 node_name = self.items[0] 605 client = self.GetClient() 606 result = client.QueryNodes(names=[node_name], fields=["role"], 607 use_locking=self.useLocking()) 608 609 return _NR_MAP[result[0][0]]
610
611 - def GetPutOpInput(self):
612 """Sets the node role. 613 614 """ 615 baserlib.CheckType(self.request_body, basestring, "Body contents") 616 617 role = self.request_body 618 619 if role == _NR_REGULAR: 620 candidate = False 621 offline = False 622 drained = False 623 624 elif role == _NR_MASTER_CANDIDATE: 625 candidate = True 626 offline = drained = None 627 628 elif role == _NR_DRAINED: 629 drained = True 630 candidate = offline = None 631 632 elif role == _NR_OFFLINE: 633 offline = True 634 candidate = drained = None 635 636 else: 637 raise http.HttpBadRequest("Can't set '%s' role" % role) 638 639 assert len(self.items) == 1 640 641 return ({}, { 642 "node_name": self.items[0], 643 "master_candidate": candidate, 644 "offline": offline, 645 "drained": drained, 646 "force": self.useForce(), 647 "auto_promote": bool(self._checkIntVariable("auto-promote", default=0)), 648 })
649
650 651 -class R_2_nodes_name_evacuate(baserlib.OpcodeResource):
652 """/2/nodes/[node_name]/evacuate resource. 653 654 """ 655 POST_OPCODE = opcodes.OpNodeEvacuate 656
657 - def GetPostOpInput(self):
658 """Evacuate all instances off a node. 659 660 """ 661 return (self.request_body, { 662 "node_name": self.items[0], 663 "dry_run": self.dryRun(), 664 })
665
666 667 -class R_2_nodes_name_migrate(baserlib.OpcodeResource):
668 """/2/nodes/[node_name]/migrate resource. 669 670 """ 671 POST_OPCODE = opcodes.OpNodeMigrate 672
673 - def GetPostOpInput(self):
674 """Migrate all primary instances from a node. 675 676 """ 677 if self.queryargs: 678 # Support old-style requests 679 if "live" in self.queryargs and "mode" in self.queryargs: 680 raise http.HttpBadRequest("Only one of 'live' and 'mode' should" 681 " be passed") 682 683 if "live" in self.queryargs: 684 if self._checkIntVariable("live", default=1): 685 mode = constants.HT_MIGRATION_LIVE 686 else: 687 mode = constants.HT_MIGRATION_NONLIVE 688 else: 689 mode = self._checkStringVariable("mode", default=None) 690 691 data = { 692 "mode": mode, 693 } 694 else: 695 data = self.request_body 696 697 return (data, { 698 "node_name": self.items[0], 699 })
700
701 702 -class R_2_nodes_name_modify(baserlib.OpcodeResource):
703 """/2/nodes/[node_name]/modify resource. 704 705 """ 706 POST_OPCODE = opcodes.OpNodeSetParams 707
708 - def GetPostOpInput(self):
709 """Changes parameters of a node. 710 711 """ 712 assert len(self.items) == 1 713 714 return (self.request_body, { 715 "node_name": self.items[0], 716 })
717
718 719 -class R_2_nodes_name_storage(baserlib.OpcodeResource):
720 """/2/nodes/[node_name]/storage resource. 721 722 """ 723 # LUNodeQueryStorage acquires locks, hence restricting access to GET 724 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] 725 GET_OPCODE = opcodes.OpNodeQueryStorage 726
727 - def GetGetOpInput(self):
728 """List storage available on a node. 729 730 """ 731 storage_type = self._checkStringVariable("storage_type", None) 732 output_fields = self._checkStringVariable("output_fields", None) 733 734 if not output_fields: 735 raise http.HttpBadRequest("Missing the required 'output_fields'" 736 " parameter") 737 738 return ({}, { 739 "nodes": [self.items[0]], 740 "storage_type": storage_type, 741 "output_fields": output_fields.split(","), 742 })
743
744 745 -class R_2_nodes_name_storage_modify(baserlib.OpcodeResource):
746 """/2/nodes/[node_name]/storage/modify resource. 747 748 """ 749 PUT_OPCODE = opcodes.OpNodeModifyStorage 750
751 - def GetPutOpInput(self):
752 """Modifies a storage volume on a node. 753 754 """ 755 storage_type = self._checkStringVariable("storage_type", None) 756 name = self._checkStringVariable("name", None) 757 758 if not name: 759 raise http.HttpBadRequest("Missing the required 'name'" 760 " parameter") 761 762 changes = {} 763 764 if "allocatable" in self.queryargs: 765 changes[constants.SF_ALLOCATABLE] = \ 766 bool(self._checkIntVariable("allocatable", default=1)) 767 768 return ({}, { 769 "node_name": self.items[0], 770 "storage_type": storage_type, 771 "name": name, 772 "changes": changes, 773 })
774
775 776 -class R_2_nodes_name_storage_repair(baserlib.OpcodeResource):
777 """/2/nodes/[node_name]/storage/repair resource. 778 779 """ 780 PUT_OPCODE = opcodes.OpRepairNodeStorage 781
782 - def GetPutOpInput(self):
783 """Repairs a storage volume on a node. 784 785 """ 786 storage_type = self._checkStringVariable("storage_type", None) 787 name = self._checkStringVariable("name", None) 788 if not name: 789 raise http.HttpBadRequest("Missing the required 'name'" 790 " parameter") 791 792 return ({}, { 793 "node_name": self.items[0], 794 "storage_type": storage_type, 795 "name": name, 796 })
797
798 799 -class R_2_networks(baserlib.OpcodeResource):
800 """/2/networks resource. 801 802 """ 803 POST_OPCODE = opcodes.OpNetworkAdd 804 POST_RENAME = { 805 "name": "network_name", 806 } 807
808 - def GetPostOpInput(self):
809 """Create a network. 810 811 """ 812 assert not self.items 813 return (self.request_body, { 814 "dry_run": self.dryRun(), 815 })
816
817 - def GET(self):
818 """Returns a list of all networks. 819 820 """ 821 client = self.GetClient() 822 823 if self.useBulk(): 824 bulkdata = client.QueryNetworks([], NET_FIELDS, False) 825 return baserlib.MapBulkFields(bulkdata, NET_FIELDS) 826 else: 827 data = client.QueryNetworks([], ["name"], False) 828 networknames = [row[0] for row in data] 829 return baserlib.BuildUriList(networknames, "/2/networks/%s", 830 uri_fields=("name", "uri"))
831
832 833 -class R_2_networks_name(baserlib.OpcodeResource):
834 """/2/networks/[network_name] resource. 835 836 """ 837 DELETE_OPCODE = opcodes.OpNetworkRemove 838
839 - def GET(self):
840 """Send information about a network. 841 842 """ 843 network_name = self.items[0] 844 client = self.GetClient() 845 846 result = baserlib.HandleItemQueryErrors(client.QueryNetworks, 847 names=[network_name], 848 fields=NET_FIELDS, 849 use_locking=self.useLocking()) 850 851 return baserlib.MapFields(NET_FIELDS, result[0])
852
853 - def GetDeleteOpInput(self):
854 """Delete a network. 855 856 """ 857 assert len(self.items) == 1 858 return (self.request_body, { 859 "network_name": self.items[0], 860 "dry_run": self.dryRun(), 861 })
862
863 864 -class R_2_networks_name_connect(baserlib.OpcodeResource):
865 """/2/networks/[network_name]/connect resource. 866 867 """ 868 PUT_OPCODE = opcodes.OpNetworkConnect 869
870 - def GetPutOpInput(self):
871 """Changes some parameters of node group. 872 873 """ 874 assert self.items 875 return (self.request_body, { 876 "network_name": self.items[0], 877 "dry_run": self.dryRun(), 878 })
879
880 881 -class R_2_networks_name_disconnect(baserlib.OpcodeResource):
882 """/2/networks/[network_name]/disconnect resource. 883 884 """ 885 PUT_OPCODE = opcodes.OpNetworkDisconnect 886
887 - def GetPutOpInput(self):
888 """Changes some parameters of node group. 889 890 """ 891 assert self.items 892 return (self.request_body, { 893 "network_name": self.items[0], 894 "dry_run": self.dryRun(), 895 })
896
897 898 -class R_2_networks_name_modify(baserlib.OpcodeResource):
899 """/2/networks/[network_name]/modify resource. 900 901 """ 902 PUT_OPCODE = opcodes.OpNetworkSetParams 903
904 - def GetPutOpInput(self):
905 """Changes some parameters of network. 906 907 """ 908 assert self.items 909 return (self.request_body, { 910 "network_name": self.items[0], 911 })
912
913 914 -class R_2_groups(baserlib.OpcodeResource):
915 """/2/groups resource. 916 917 """ 918 POST_OPCODE = opcodes.OpGroupAdd 919 POST_RENAME = { 920 "name": "group_name", 921 } 922
923 - def GetPostOpInput(self):
924 """Create a node group. 925 926 927 """ 928 assert not self.items 929 return (self.request_body, { 930 "dry_run": self.dryRun(), 931 })
932
933 - def GET(self):
934 """Returns a list of all node groups. 935 936 """ 937 client = self.GetClient() 938 939 if self.useBulk(): 940 bulkdata = client.QueryGroups([], G_FIELDS, False) 941 return baserlib.MapBulkFields(bulkdata, G_FIELDS) 942 else: 943 data = client.QueryGroups([], ["name"], False) 944 groupnames = [row[0] for row in data] 945 return baserlib.BuildUriList(groupnames, "/2/groups/%s", 946 uri_fields=("name", "uri"))
947
948 949 -class R_2_groups_name(baserlib.OpcodeResource):
950 """/2/groups/[group_name] resource. 951 952 """ 953 DELETE_OPCODE = opcodes.OpGroupRemove 954
955 - def GET(self):
956 """Send information about a node group. 957 958 """ 959 group_name = self.items[0] 960 client = self.GetClient() 961 962 result = baserlib.HandleItemQueryErrors(client.QueryGroups, 963 names=[group_name], fields=G_FIELDS, 964 use_locking=self.useLocking()) 965 966 return baserlib.MapFields(G_FIELDS, result[0])
967
968 - def GetDeleteOpInput(self):
969 """Delete a node group. 970 971 """ 972 assert len(self.items) == 1 973 return ({}, { 974 "group_name": self.items[0], 975 "dry_run": self.dryRun(), 976 })
977
978 979 -class R_2_groups_name_modify(baserlib.OpcodeResource):
980 """/2/groups/[group_name]/modify resource. 981 982 """ 983 PUT_OPCODE = opcodes.OpGroupSetParams 984 PUT_RENAME = { 985 "custom_ndparams": "ndparams", 986 "custom_ipolicy": "ipolicy", 987 "custom_diskparams": "diskparams", 988 } 989
990 - def GetPutOpInput(self):
991 """Changes some parameters of node group. 992 993 """ 994 assert self.items 995 return (self.request_body, { 996 "group_name": self.items[0], 997 })
998
999 1000 -class R_2_groups_name_rename(baserlib.OpcodeResource):
1001 """/2/groups/[group_name]/rename resource. 1002 1003 """ 1004 PUT_OPCODE = opcodes.OpGroupRename 1005
1006 - def GetPutOpInput(self):
1007 """Changes the name of a node group. 1008 1009 """ 1010 assert len(self.items) == 1 1011 return (self.request_body, { 1012 "group_name": self.items[0], 1013 "dry_run": self.dryRun(), 1014 })
1015
1016 1017 -class R_2_groups_name_assign_nodes(baserlib.OpcodeResource):
1018 """/2/groups/[group_name]/assign-nodes resource. 1019 1020 """ 1021 PUT_OPCODE = opcodes.OpGroupAssignNodes 1022
1023 - def GetPutOpInput(self):
1024 """Assigns nodes to a group. 1025 1026 """ 1027 assert len(self.items) == 1 1028 return (self.request_body, { 1029 "group_name": self.items[0], 1030 "dry_run": self.dryRun(), 1031 "force": self.useForce(), 1032 })
1033
1034 1035 -def _ConvertUsbDevices(data):
1036 """Convert in place the usb_devices string to the proper format. 1037 1038 In Ganeti 2.8.4 the separator for the usb_devices hvparam was changed from 1039 comma to space because commas cannot be accepted on the command line 1040 (they already act as the separator between different hvparams). RAPI 1041 should be able to accept commas for backwards compatibility, but we want 1042 it to also accept the new space separator. Therefore, we convert 1043 spaces into commas here and keep the old parsing logic elsewhere. 1044 1045 """ 1046 try: 1047 hvparams = data["hvparams"] 1048 usb_devices = hvparams[constants.HV_USB_DEVICES] 1049 hvparams[constants.HV_USB_DEVICES] = usb_devices.replace(" ", ",") 1050 data["hvparams"] = hvparams 1051 except KeyError: 1052 #No usb_devices, no modification required 1053 pass
1054
1055 1056 -class R_2_instances(baserlib.OpcodeResource):
1057 """/2/instances resource. 1058 1059 """ 1060 POST_OPCODE = opcodes.OpInstanceCreate 1061 POST_RENAME = { 1062 "os": "os_type", 1063 "name": "instance_name", 1064 } 1065
1066 - def GET(self):
1067 """Returns a list of all available instances. 1068 1069 """ 1070 client = self.GetClient() 1071 1072 use_locking = self.useLocking() 1073 if self.useBulk(): 1074 bulkdata = client.QueryInstances([], I_FIELDS, use_locking) 1075 return map(_UpdateBeparams, baserlib.MapBulkFields(bulkdata, I_FIELDS)) 1076 else: 1077 instancesdata = client.QueryInstances([], ["name"], use_locking) 1078 instanceslist = [row[0] for row in instancesdata] 1079 return baserlib.BuildUriList(instanceslist, "/2/instances/%s", 1080 uri_fields=("id", "uri"))
1081
1082 - def GetPostOpInput(self):
1083 """Create an instance. 1084 1085 @return: a job id 1086 1087 """ 1088 baserlib.CheckType(self.request_body, dict, "Body contents") 1089 1090 # Default to request data version 0 1091 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0) 1092 1093 if data_version == 0: 1094 raise http.HttpBadRequest("Instance creation request version 0 is no" 1095 " longer supported") 1096 elif data_version != 1: 1097 raise http.HttpBadRequest("Unsupported request data version %s" % 1098 data_version) 1099 1100 data = self.request_body.copy() 1101 # Remove "__version__" 1102 data.pop(_REQ_DATA_VERSION, None) 1103 1104 _ConvertUsbDevices(data) 1105 1106 return (data, { 1107 "dry_run": self.dryRun(), 1108 })
1109
1110 1111 -class R_2_instances_multi_alloc(baserlib.OpcodeResource):
1112 """/2/instances-multi-alloc resource. 1113 1114 """ 1115 POST_OPCODE = opcodes.OpInstanceMultiAlloc 1116
1117 - def GetPostOpInput(self):
1118 """Try to allocate multiple instances. 1119 1120 @return: A dict with submitted jobs, allocatable instances and failed 1121 allocations 1122 1123 """ 1124 if "instances" not in self.request_body: 1125 raise http.HttpBadRequest("Request is missing required 'instances' field" 1126 " in body") 1127 1128 # Unlike most other RAPI calls, this one is composed of individual opcodes, 1129 # and we have to do the filling ourselves 1130 OPCODE_RENAME = { 1131 "os": "os_type", 1132 "name": "instance_name", 1133 } 1134 1135 body = objects.FillDict(self.request_body, { 1136 "instances": [ 1137 baserlib.FillOpcode(opcodes.OpInstanceCreate, inst, {}, 1138 rename=OPCODE_RENAME) 1139 for inst in self.request_body["instances"] 1140 ], 1141 }) 1142 1143 return (body, { 1144 "dry_run": self.dryRun(), 1145 })
1146
1147 1148 -class R_2_instances_name(baserlib.OpcodeResource):
1149 """/2/instances/[instance_name] resource. 1150 1151 """ 1152 DELETE_OPCODE = opcodes.OpInstanceRemove 1153
1154 - def GET(self):
1155 """Send information about an instance. 1156 1157 """ 1158 client = self.GetClient() 1159 instance_name = self.items[0] 1160 1161 result = baserlib.HandleItemQueryErrors(client.QueryInstances, 1162 names=[instance_name], 1163 fields=I_FIELDS, 1164 use_locking=self.useLocking()) 1165 1166 return _UpdateBeparams(baserlib.MapFields(I_FIELDS, result[0]))
1167
1168 - def GetDeleteOpInput(self):
1169 """Delete an instance. 1170 1171 """ 1172 assert len(self.items) == 1 1173 return (self.request_body, { 1174 "instance_name": self.items[0], 1175 "ignore_failures": False, 1176 "dry_run": self.dryRun(), 1177 })
1178
1179 1180 -class R_2_instances_name_info(baserlib.OpcodeResource):
1181 """/2/instances/[instance_name]/info resource. 1182 1183 """ 1184 GET_OPCODE = opcodes.OpInstanceQueryData 1185
1186 - def GetGetOpInput(self):
1187 """Request detailed instance information. 1188 1189 """ 1190 assert len(self.items) == 1 1191 return ({}, { 1192 "instances": [self.items[0]], 1193 "static": bool(self._checkIntVariable("static", default=0)), 1194 })
1195
1196 1197 -class R_2_instances_name_reboot(baserlib.OpcodeResource):
1198 """/2/instances/[instance_name]/reboot resource. 1199 1200 Implements an instance reboot. 1201 1202 """ 1203 POST_OPCODE = opcodes.OpInstanceReboot 1204
1205 - def GetPostOpInput(self):
1206 """Reboot an instance. 1207 1208 The URI takes type=[hard|soft|full] and 1209 ignore_secondaries=[False|True] parameters. 1210 1211 """ 1212 return (self.request_body, { 1213 "instance_name": self.items[0], 1214 "reboot_type": 1215 self.queryargs.get("type", [constants.INSTANCE_REBOOT_HARD])[0], 1216 "ignore_secondaries": bool(self._checkIntVariable("ignore_secondaries")), 1217 "dry_run": self.dryRun(), 1218 })
1219
1220 1221 -class R_2_instances_name_startup(baserlib.OpcodeResource):
1222 """/2/instances/[instance_name]/startup resource. 1223 1224 Implements an instance startup. 1225 1226 """ 1227 PUT_OPCODE = opcodes.OpInstanceStartup 1228
1229 - def GetPutOpInput(self):
1230 """Startup an instance. 1231 1232 The URI takes force=[False|True] parameter to start the instance 1233 if even if secondary disks are failing. 1234 1235 """ 1236 return ({}, { 1237 "instance_name": self.items[0], 1238 "force": self.useForce(), 1239 "dry_run": self.dryRun(), 1240 "no_remember": bool(self._checkIntVariable("no_remember")), 1241 })
1242
1243 1244 -class R_2_instances_name_shutdown(baserlib.OpcodeResource):
1245 """/2/instances/[instance_name]/shutdown resource. 1246 1247 Implements an instance shutdown. 1248 1249 """ 1250 PUT_OPCODE = opcodes.OpInstanceShutdown 1251
1252 - def GetPutOpInput(self):
1253 """Shutdown an instance. 1254 1255 """ 1256 return (self.request_body, { 1257 "instance_name": self.items[0], 1258 "no_remember": bool(self._checkIntVariable("no_remember")), 1259 "dry_run": self.dryRun(), 1260 })
1261
1262 1263 -def _ParseInstanceReinstallRequest(name, data):
1264 """Parses a request for reinstalling an instance. 1265 1266 """ 1267 if not isinstance(data, dict): 1268 raise http.HttpBadRequest("Invalid body contents, not a dictionary") 1269 1270 ostype = baserlib.CheckParameter(data, "os", default=None) 1271 start = baserlib.CheckParameter(data, "start", exptype=bool, 1272 default=True) 1273 osparams = baserlib.CheckParameter(data, "osparams", default=None) 1274 1275 ops = [ 1276 opcodes.OpInstanceShutdown(instance_name=name), 1277 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype, 1278 osparams=osparams), 1279 ] 1280 1281 if start: 1282 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False)) 1283 1284 return ops
1285
1286 1287 -class R_2_instances_name_reinstall(baserlib.OpcodeResource):
1288 """/2/instances/[instance_name]/reinstall resource. 1289 1290 Implements an instance reinstall. 1291 1292 """ 1293 POST_OPCODE = opcodes.OpInstanceReinstall 1294
1295 - def POST(self):
1296 """Reinstall an instance. 1297 1298 The URI takes os=name and nostartup=[0|1] optional 1299 parameters. By default, the instance will be started 1300 automatically. 1301 1302 """ 1303 if self.request_body: 1304 if self.queryargs: 1305 raise http.HttpBadRequest("Can't combine query and body parameters") 1306 1307 body = self.request_body 1308 elif self.queryargs: 1309 # Legacy interface, do not modify/extend 1310 body = { 1311 "os": self._checkStringVariable("os"), 1312 "start": not self._checkIntVariable("nostartup"), 1313 } 1314 else: 1315 body = {} 1316 1317 ops = _ParseInstanceReinstallRequest(self.items[0], body) 1318 1319 return self.SubmitJob(ops)
1320
1321 1322 -class R_2_instances_name_replace_disks(baserlib.OpcodeResource):
1323 """/2/instances/[instance_name]/replace-disks resource. 1324 1325 """ 1326 POST_OPCODE = opcodes.OpInstanceReplaceDisks 1327
1328 - def GetPostOpInput(self):
1329 """Replaces disks on an instance. 1330 1331 """ 1332 static = { 1333 "instance_name": self.items[0], 1334 } 1335 1336 if self.request_body: 1337 data = self.request_body 1338 elif self.queryargs: 1339 # Legacy interface, do not modify/extend 1340 data = { 1341 "remote_node": self._checkStringVariable("remote_node", default=None), 1342 "mode": self._checkStringVariable("mode", default=None), 1343 "disks": self._checkStringVariable("disks", default=None), 1344 "iallocator": self._checkStringVariable("iallocator", default=None), 1345 } 1346 else: 1347 data = {} 1348 1349 # Parse disks 1350 try: 1351 raw_disks = data.pop("disks") 1352 except KeyError: 1353 pass 1354 else: 1355 if raw_disks: 1356 if ht.TListOf(ht.TInt)(raw_disks): # pylint: disable=E1102 1357 data["disks"] = raw_disks 1358 else: 1359 # Backwards compatibility for strings of the format "1, 2, 3" 1360 try: 1361 data["disks"] = [int(part) for part in raw_disks.split(",")] 1362 except (TypeError, ValueError), err: 1363 raise http.HttpBadRequest("Invalid disk index passed: %s" % err) 1364 1365 return (data, static)
1366
1367 1368 -class R_2_instances_name_activate_disks(baserlib.OpcodeResource):
1369 """/2/instances/[instance_name]/activate-disks resource. 1370 1371 """ 1372 PUT_OPCODE = opcodes.OpInstanceActivateDisks 1373
1374 - def GetPutOpInput(self):
1375 """Activate disks for an instance. 1376 1377 The URI might contain ignore_size to ignore current recorded size. 1378 1379 """ 1380 return ({}, { 1381 "instance_name": self.items[0], 1382 "ignore_size": bool(self._checkIntVariable("ignore_size")), 1383 })
1384
1385 1386 -class R_2_instances_name_deactivate_disks(baserlib.OpcodeResource):
1387 """/2/instances/[instance_name]/deactivate-disks resource. 1388 1389 """ 1390 PUT_OPCODE = opcodes.OpInstanceDeactivateDisks 1391
1392 - def GetPutOpInput(self):
1393 """Deactivate disks for an instance. 1394 1395 """ 1396 return ({}, { 1397 "instance_name": self.items[0], 1398 "force": self.useForce(), 1399 })
1400
1401 1402 -class R_2_instances_name_recreate_disks(baserlib.OpcodeResource):
1403 """/2/instances/[instance_name]/recreate-disks resource. 1404 1405 """ 1406 POST_OPCODE = opcodes.OpInstanceRecreateDisks 1407
1408 - def GetPostOpInput(self):
1409 """Recreate disks for an instance. 1410 1411 """ 1412 return (self.request_body, { 1413 "instance_name": self.items[0], 1414 })
1415
1416 1417 -class R_2_instances_name_prepare_export(baserlib.OpcodeResource):
1418 """/2/instances/[instance_name]/prepare-export resource. 1419 1420 """ 1421 PUT_OPCODE = opcodes.OpBackupPrepare 1422
1423 - def GetPutOpInput(self):
1424 """Prepares an export for an instance. 1425 1426 """ 1427 return ({}, { 1428 "instance_name": self.items[0], 1429 "mode": self._checkStringVariable("mode"), 1430 })
1431
1432 1433 -class R_2_instances_name_export(baserlib.OpcodeResource):
1434 """/2/instances/[instance_name]/export resource. 1435 1436 """ 1437 PUT_OPCODE = opcodes.OpBackupExport 1438 PUT_RENAME = { 1439 "destination": "target_node", 1440 } 1441
1442 - def GetPutOpInput(self):
1443 """Exports an instance. 1444 1445 """ 1446 return (self.request_body, { 1447 "instance_name": self.items[0], 1448 })
1449
1450 1451 -class R_2_instances_name_migrate(baserlib.OpcodeResource):
1452 """/2/instances/[instance_name]/migrate resource. 1453 1454 """ 1455 PUT_OPCODE = opcodes.OpInstanceMigrate 1456
1457 - def GetPutOpInput(self):
1458 """Migrates an instance. 1459 1460 """ 1461 return (self.request_body, { 1462 "instance_name": self.items[0], 1463 })
1464
1465 1466 -class R_2_instances_name_failover(baserlib.OpcodeResource):
1467 """/2/instances/[instance_name]/failover resource. 1468 1469 """ 1470 PUT_OPCODE = opcodes.OpInstanceFailover 1471
1472 - def GetPutOpInput(self):
1473 """Does a failover of an instance. 1474 1475 """ 1476 return (self.request_body, { 1477 "instance_name": self.items[0], 1478 })
1479
1480 1481 -class R_2_instances_name_rename(baserlib.OpcodeResource):
1482 """/2/instances/[instance_name]/rename resource. 1483 1484 """ 1485 PUT_OPCODE = opcodes.OpInstanceRename 1486
1487 - def GetPutOpInput(self):
1488 """Changes the name of an instance. 1489 1490 """ 1491 return (self.request_body, { 1492 "instance_name": self.items[0], 1493 })
1494
1495 1496 -class R_2_instances_name_modify(baserlib.OpcodeResource):
1497 """/2/instances/[instance_name]/modify resource. 1498 1499 """ 1500 PUT_OPCODE = opcodes.OpInstanceSetParams 1501 PUT_RENAME = { 1502 "custom_beparams": "beparams", 1503 "custom_hvparams": "hvparams", 1504 } 1505
1506 - def GetPutOpInput(self):
1507 """Changes parameters of an instance. 1508 1509 """ 1510 data = self.request_body.copy() 1511 _ConvertUsbDevices(data) 1512 1513 return (data, { 1514 "instance_name": self.items[0], 1515 })
1516
1517 1518 -class R_2_instances_name_disk_grow(baserlib.OpcodeResource):
1519 """/2/instances/[instance_name]/disk/[disk_index]/grow resource. 1520 1521 """ 1522 POST_OPCODE = opcodes.OpInstanceGrowDisk 1523
1524 - def GetPostOpInput(self):
1525 """Increases the size of an instance disk. 1526 1527 """ 1528 return (self.request_body, { 1529 "instance_name": self.items[0], 1530 "disk": int(self.items[1]), 1531 })
1532
1533 1534 -class R_2_instances_name_console(baserlib.ResourceBase):
1535 """/2/instances/[instance_name]/console resource. 1536 1537 """ 1538 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ] 1539 GET_OPCODE = opcodes.OpInstanceConsole 1540
1541 - def GET(self):
1542 """Request information for connecting to instance's console. 1543 1544 @return: Serialized instance console description, see 1545 L{objects.InstanceConsole} 1546 1547 """ 1548 instance_name = self.items[0] 1549 client = self.GetClient() 1550 1551 ((console, oper_state), ) = \ 1552 client.QueryInstances([instance_name], ["console", "oper_state"], False) 1553 1554 if not oper_state: 1555 raise http.HttpServiceUnavailable("Instance console unavailable") 1556 1557 assert isinstance(console, dict) 1558 return console
1559
1560 1561 -def _GetQueryFields(args):
1562 """Tries to extract C{fields} query parameter. 1563 1564 @type args: dictionary 1565 @rtype: list of string 1566 @raise http.HttpBadRequest: When parameter can't be found 1567 1568 """ 1569 try: 1570 fields = args["fields"] 1571 except KeyError: 1572 raise http.HttpBadRequest("Missing 'fields' query argument") 1573 1574 return _SplitQueryFields(fields[0])
1575
1576 1577 -def _SplitQueryFields(fields):
1578 """Splits fields as given for a query request. 1579 1580 @type fields: string 1581 @rtype: list of string 1582 1583 """ 1584 return [i.strip() for i in fields.split(",")]
1585
1586 1587 -class R_2_query(baserlib.ResourceBase):
1588 """/2/query/[resource] resource. 1589 1590 """ 1591 # Results might contain sensitive information 1592 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE, rapi.RAPI_ACCESS_READ] 1593 PUT_ACCESS = GET_ACCESS 1594 GET_OPCODE = opcodes.OpQuery 1595 PUT_OPCODE = opcodes.OpQuery 1596
1597 - def _Query(self, fields, qfilter):
1598 client = self.GetClient() 1599 return client.Query(self.items[0], fields, qfilter).ToDict()
1600
1601 - def GET(self):
1602 """Returns resource information. 1603 1604 @return: Query result, see L{objects.QueryResponse} 1605 1606 """ 1607 return self._Query(_GetQueryFields(self.queryargs), None)
1608
1609 - def PUT(self):
1610 """Submits job querying for resources. 1611 1612 @return: Query result, see L{objects.QueryResponse} 1613 1614 """ 1615 body = self.request_body 1616 1617 baserlib.CheckType(body, dict, "Body contents") 1618 1619 try: 1620 fields = body["fields"] 1621 except KeyError: 1622 fields = _GetQueryFields(self.queryargs) 1623 1624 qfilter = body.get("qfilter", None) 1625 # TODO: remove this after 2.7 1626 if qfilter is None: 1627 qfilter = body.get("filter", None) 1628 1629 return self._Query(fields, qfilter)
1630
1631 1632 -class R_2_query_fields(baserlib.ResourceBase):
1633 """/2/query/[resource]/fields resource. 1634 1635 """ 1636 GET_OPCODE = opcodes.OpQueryFields 1637
1638 - def GET(self):
1639 """Retrieves list of available fields for a resource. 1640 1641 @return: List of serialized L{objects.QueryFieldDefinition} 1642 1643 """ 1644 try: 1645 raw_fields = self.queryargs["fields"] 1646 except KeyError: 1647 fields = None 1648 else: 1649 fields = _SplitQueryFields(raw_fields[0]) 1650 1651 return self.GetClient().QueryFields(self.items[0], fields).ToDict()
1652
1653 1654 -class _R_Tags(baserlib.OpcodeResource):
1655 """Quasiclass for tagging resources. 1656 1657 Manages tags. When inheriting this class you must define the 1658 TAG_LEVEL for it. 1659 1660 """ 1661 TAG_LEVEL = None 1662 GET_OPCODE = opcodes.OpTagsGet 1663 PUT_OPCODE = opcodes.OpTagsSet 1664 DELETE_OPCODE = opcodes.OpTagsDel 1665
1666 - def __init__(self, items, queryargs, req, **kwargs):
1667 """A tag resource constructor. 1668 1669 We have to override the default to sort out cluster naming case. 1670 1671 """ 1672 baserlib.OpcodeResource.__init__(self, items, queryargs, req, **kwargs) 1673 1674 if self.TAG_LEVEL == constants.TAG_CLUSTER: 1675 self.name = None 1676 else: 1677 self.name = items[0]
1678
1679 - def GET(self):
1680 """Returns a list of tags. 1681 1682 Example: ["tag1", "tag2", "tag3"] 1683 1684 """ 1685 kind = self.TAG_LEVEL 1686 1687 if kind in constants.VALID_TAG_TYPES: 1688 cl = self.GetClient() 1689 if kind == constants.TAG_CLUSTER: 1690 if self.name: 1691 raise http.HttpBadRequest("Can't specify a name" 1692 " for cluster tag request") 1693 tags = list(cl.QueryTags(kind, "")) 1694 else: 1695 if not self.name: 1696 raise http.HttpBadRequest("Missing name on tag request") 1697 tags = list(cl.QueryTags(kind, self.name)) 1698 1699 else: 1700 raise http.HttpBadRequest("Unhandled tag type!") 1701 1702 return list(tags)
1703
1704 - def GetPutOpInput(self):
1705 """Add a set of tags. 1706 1707 The request as a list of strings should be PUT to this URI. And 1708 you'll have back a job id. 1709 1710 """ 1711 return ({}, { 1712 "kind": self.TAG_LEVEL, 1713 "name": self.name, 1714 "tags": self.queryargs.get("tag", []), 1715 "dry_run": self.dryRun(), 1716 })
1717
1718 - def GetDeleteOpInput(self):
1719 """Delete a tag. 1720 1721 In order to delete a set of tags, the DELETE 1722 request should be addressed to URI like: 1723 /tags?tag=[tag]&tag=[tag] 1724 1725 """ 1726 # Re-use code 1727 return self.GetPutOpInput()
1728
1729 1730 -class R_2_instances_name_tags(_R_Tags):
1731 """ /2/instances/[instance_name]/tags resource. 1732 1733 Manages per-instance tags. 1734 1735 """ 1736 TAG_LEVEL = constants.TAG_INSTANCE
1737
1738 1739 -class R_2_nodes_name_tags(_R_Tags):
1740 """ /2/nodes/[node_name]/tags resource. 1741 1742 Manages per-node tags. 1743 1744 """ 1745 TAG_LEVEL = constants.TAG_NODE
1746
1747 1748 -class R_2_groups_name_tags(_R_Tags):
1749 """ /2/groups/[group_name]/tags resource. 1750 1751 Manages per-nodegroup tags. 1752 1753 """ 1754 TAG_LEVEL = constants.TAG_NODEGROUP
1755
1756 1757 -class R_2_networks_name_tags(_R_Tags):
1758 """ /2/networks/[network_name]/tags resource. 1759 1760 Manages per-network tags. 1761 1762 """ 1763 TAG_LEVEL = constants.TAG_NETWORK
1764
1765 1766 -class R_2_tags(_R_Tags):
1767 """ /2/tags resource. 1768 1769 Manages cluster tags. 1770 1771 """ 1772 TAG_LEVEL = constants.TAG_CLUSTER
1773