Package ganeti :: Package rapi :: Module rlib2
[hide private]
[frames] | no frames]

Source Code for Module ganeti.rapi.rlib2

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc. 
   5  # 
   6  # This program is free software; you can redistribute it and/or modify 
   7  # it under the terms of the GNU General Public License as published by 
   8  # the Free Software Foundation; either version 2 of the License, or 
   9  # (at your option) any later version. 
  10  # 
  11  # This program is distributed in the hope that it will be useful, but 
  12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
  13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
  14  # General Public License for more details. 
  15  # 
  16  # You should have received a copy of the GNU General Public License 
  17  # along with this program; if not, write to the Free Software 
  18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
  19  # 02110-1301, USA. 
  20   
  21   
  22  """Remote API version 2 baserlib.library. 
  23   
  24    PUT or POST? 
  25    ============ 
  26   
  27    According to RFC2616 the main difference between PUT and POST is that 
  28    POST can create new resources but PUT can only create the resource the 
  29    URI was pointing to on the PUT request. 
  30   
  31    To be in context of this module for instance creation POST on 
  32    /2/instances is legitim while PUT would be not, due to it does create a 
  33    new entity and not just replace /2/instances with it. 
  34   
  35    So when adding new methods, if they are operating on the URI entity itself, 
  36    PUT should be prefered over POST. 
  37   
  38  """ 
  39   
  40  # pylint: disable-msg=C0103 
  41   
  42  # C0103: Invalid name, since the R_* names are not conforming 
  43   
  44  from ganeti import opcodes 
  45  from ganeti import http 
  46  from ganeti import constants 
  47  from ganeti import cli 
  48  from ganeti import utils 
  49  from ganeti import rapi 
  50  from ganeti.rapi import baserlib 
  51   
  52   
  53  _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"] 
  54  I_FIELDS = ["name", "admin_state", "os", 
  55              "pnode", "snodes", 
  56              "disk_template", 
  57              "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges", 
  58              "network_port", 
  59              "disk.sizes", "disk_usage", 
  60              "beparams", "hvparams", 
  61              "oper_state", "oper_ram", "oper_vcpus", "status", 
  62              ] + _COMMON_FIELDS 
  63   
  64  N_FIELDS = ["name", "offline", "master_candidate", "drained", 
  65              "dtotal", "dfree", 
  66              "mtotal", "mnode", "mfree", 
  67              "pinst_cnt", "sinst_cnt", 
  68              "ctotal", "cnodes", "csockets", 
  69              "pip", "sip", "role", 
  70              "pinst_list", "sinst_list", 
  71              ] + _COMMON_FIELDS 
  72   
  73  _NR_DRAINED = "drained" 
  74  _NR_MASTER_CANDIATE = "master-candidate" 
  75  _NR_MASTER = "master" 
  76  _NR_OFFLINE = "offline" 
  77  _NR_REGULAR = "regular" 
  78   
  79  _NR_MAP = { 
  80    "M": _NR_MASTER, 
  81    "C": _NR_MASTER_CANDIATE, 
  82    "D": _NR_DRAINED, 
  83    "O": _NR_OFFLINE, 
  84    "R": _NR_REGULAR, 
  85    } 
  86   
  87  # Request data version field 
  88  _REQ_DATA_VERSION = "__version__" 
  89   
  90  # Feature string for instance creation request data version 1 
  91  _INST_CREATE_REQV1 = "instance-create-reqv1" 
  92   
  93  # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change. 
  94  _WFJC_TIMEOUT = 10 
95 96 97 -class R_version(baserlib.R_Generic):
98 """/version resource. 99 100 This resource should be used to determine the remote API version and 101 to adapt clients accordingly. 102 103 """ 104 @staticmethod
105 - def GET():
106 """Returns the remote API version. 107 108 """ 109 return constants.RAPI_VERSION
110
111 112 -class R_2_info(baserlib.R_Generic):
113 """Cluster info. 114 115 """ 116 @staticmethod
117 - def GET():
118 """Returns cluster information. 119 120 """ 121 client = baserlib.GetClient() 122 return client.QueryClusterInfo()
123
124 125 -class R_2_features(baserlib.R_Generic):
126 """/2/features resource. 127 128 """ 129 @staticmethod
130 - def GET():
131 """Returns list of optional RAPI features implemented. 132 133 """ 134 return [_INST_CREATE_REQV1]
135
136 137 -class R_2_os(baserlib.R_Generic):
138 """/2/os resource. 139 140 """ 141 @staticmethod
142 - def GET():
143 """Return a list of all OSes. 144 145 Can return error 500 in case of a problem. 146 147 Example: ["debian-etch"] 148 149 """ 150 cl = baserlib.GetClient() 151 op = opcodes.OpDiagnoseOS(output_fields=["name", "variants"], names=[]) 152 job_id = baserlib.SubmitJob([op], cl) 153 # we use custom feedback function, instead of print we log the status 154 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn) 155 diagnose_data = result[0] 156 157 if not isinstance(diagnose_data, list): 158 raise http.HttpBadGateway(message="Can't get OS list") 159 160 os_names = [] 161 for (name, variants) in diagnose_data: 162 os_names.extend(cli.CalculateOSNames(name, variants)) 163 164 return os_names
165
166 167 -class R_2_redist_config(baserlib.R_Generic):
168 """/2/redistribute-config resource. 169 170 """ 171 @staticmethod
172 - def PUT():
173 """Redistribute configuration to all nodes. 174 175 """ 176 return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
177
178 179 -class R_2_jobs(baserlib.R_Generic):
180 """/2/jobs resource. 181 182 """ 183 @staticmethod
184 - def GET():
185 """Returns a dictionary of jobs. 186 187 @return: a dictionary with jobs id and uri. 188 189 """ 190 fields = ["id"] 191 cl = baserlib.GetClient() 192 # Convert the list of lists to the list of ids 193 result = [job_id for [job_id] in cl.QueryJobs(None, fields)] 194 return baserlib.BuildUriList(result, "/2/jobs/%s", 195 uri_fields=("id", "uri"))
196
197 198 -class R_2_jobs_id(baserlib.R_Generic):
199 """/2/jobs/[job_id] resource. 200 201 """
202 - def GET(self):
203 """Returns a job status. 204 205 @return: a dictionary with job parameters. 206 The result includes: 207 - id: job ID as a number 208 - status: current job status as a string 209 - ops: involved OpCodes as a list of dictionaries for each 210 opcodes in the job 211 - opstatus: OpCodes status as a list 212 - opresult: OpCodes results as a list of lists 213 214 """ 215 fields = ["id", "ops", "status", "summary", 216 "opstatus", "opresult", "oplog", 217 "received_ts", "start_ts", "end_ts", 218 ] 219 job_id = self.items[0] 220 result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0] 221 if result is None: 222 raise http.HttpNotFound() 223 return baserlib.MapFields(fields, result)
224
225 - def DELETE(self):
226 """Cancel not-yet-started job. 227 228 """ 229 job_id = self.items[0] 230 result = baserlib.GetClient().CancelJob(job_id) 231 return result
232
233 234 -class R_2_jobs_id_wait(baserlib.R_Generic):
235 """/2/jobs/[job_id]/wait resource. 236 237 """ 238 # WaitForJobChange provides access to sensitive information and blocks 239 # machine resources (it's a blocking RAPI call), hence restricting access. 240 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] 241
242 - def GET(self):
243 """Waits for job changes. 244 245 """ 246 job_id = self.items[0] 247 248 fields = self.getBodyParameter("fields") 249 prev_job_info = self.getBodyParameter("previous_job_info", None) 250 prev_log_serial = self.getBodyParameter("previous_log_serial", None) 251 252 if not isinstance(fields, list): 253 raise http.HttpBadRequest("The 'fields' parameter should be a list") 254 255 if not (prev_job_info is None or isinstance(prev_job_info, list)): 256 raise http.HttpBadRequest("The 'previous_job_info' parameter should" 257 " be a list") 258 259 if not (prev_log_serial is None or 260 isinstance(prev_log_serial, (int, long))): 261 raise http.HttpBadRequest("The 'previous_log_serial' parameter should" 262 " be a number") 263 264 client = baserlib.GetClient() 265 result = client.WaitForJobChangeOnce(job_id, fields, 266 prev_job_info, prev_log_serial, 267 timeout=_WFJC_TIMEOUT) 268 if not result: 269 raise http.HttpNotFound() 270 271 if result == constants.JOB_NOTCHANGED: 272 # No changes 273 return None 274 275 (job_info, log_entries) = result 276 277 return { 278 "job_info": job_info, 279 "log_entries": log_entries, 280 }
281
282 283 -class R_2_nodes(baserlib.R_Generic):
284 """/2/nodes resource. 285 286 """
287 - def GET(self):
288 """Returns a list of all nodes. 289 290 """ 291 client = baserlib.GetClient() 292 293 if self.useBulk(): 294 bulkdata = client.QueryNodes([], N_FIELDS, False) 295 return baserlib.MapBulkFields(bulkdata, N_FIELDS) 296 else: 297 nodesdata = client.QueryNodes([], ["name"], False) 298 nodeslist = [row[0] for row in nodesdata] 299 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s", 300 uri_fields=("id", "uri"))
301
302 303 -class R_2_nodes_name(baserlib.R_Generic):
304 """/2/nodes/[node_name] resources. 305 306 """
307 - def GET(self):
308 """Send information about a node. 309 310 """ 311 node_name = self.items[0] 312 client = baserlib.GetClient() 313 314 result = baserlib.HandleItemQueryErrors(client.QueryNodes, 315 names=[node_name], fields=N_FIELDS, 316 use_locking=self.useLocking()) 317 318 return baserlib.MapFields(N_FIELDS, result[0])
319
320 321 -class R_2_nodes_name_role(baserlib.R_Generic):
322 """ /2/nodes/[node_name]/role resource. 323 324 """
325 - def GET(self):
326 """Returns the current node role. 327 328 @return: Node role 329 330 """ 331 node_name = self.items[0] 332 client = baserlib.GetClient() 333 result = client.QueryNodes(names=[node_name], fields=["role"], 334 use_locking=self.useLocking()) 335 336 return _NR_MAP[result[0][0]]
337
338 - def PUT(self):
339 """Sets the node role. 340 341 @return: a job id 342 343 """ 344 if not isinstance(self.request_body, basestring): 345 raise http.HttpBadRequest("Invalid body contents, not a string") 346 347 node_name = self.items[0] 348 role = self.request_body 349 350 if role == _NR_REGULAR: 351 candidate = False 352 offline = False 353 drained = False 354 355 elif role == _NR_MASTER_CANDIATE: 356 candidate = True 357 offline = drained = None 358 359 elif role == _NR_DRAINED: 360 drained = True 361 candidate = offline = None 362 363 elif role == _NR_OFFLINE: 364 offline = True 365 candidate = drained = None 366 367 else: 368 raise http.HttpBadRequest("Can't set '%s' role" % role) 369 370 op = opcodes.OpSetNodeParams(node_name=node_name, 371 master_candidate=candidate, 372 offline=offline, 373 drained=drained, 374 force=bool(self.useForce())) 375 376 return baserlib.SubmitJob([op])
377
378 379 -class R_2_nodes_name_evacuate(baserlib.R_Generic):
380 """/2/nodes/[node_name]/evacuate resource. 381 382 """
383 - def POST(self):
384 """Evacuate all secondary instances off a node. 385 386 """ 387 node_name = self.items[0] 388 remote_node = self._checkStringVariable("remote_node", default=None) 389 iallocator = self._checkStringVariable("iallocator", default=None) 390 early_r = bool(self._checkIntVariable("early_release", default=0)) 391 dry_run = bool(self.dryRun()) 392 393 cl = baserlib.GetClient() 394 395 op = opcodes.OpNodeEvacuationStrategy(nodes=[node_name], 396 iallocator=iallocator, 397 remote_node=remote_node) 398 399 job_id = baserlib.SubmitJob([op], cl) 400 # we use custom feedback function, instead of print we log the status 401 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn) 402 403 jobs = [] 404 for iname, node in result: 405 if dry_run: 406 jid = None 407 else: 408 op = opcodes.OpReplaceDisks(instance_name=iname, 409 remote_node=node, disks=[], 410 mode=constants.REPLACE_DISK_CHG, 411 early_release=early_r) 412 jid = baserlib.SubmitJob([op]) 413 jobs.append((jid, iname, node)) 414 415 return jobs
416
417 418 -class R_2_nodes_name_migrate(baserlib.R_Generic):
419 """/2/nodes/[node_name]/migrate resource. 420 421 """
422 - def POST(self):
423 """Migrate all primary instances from a node. 424 425 """ 426 node_name = self.items[0] 427 428 if "live" in self.queryargs and "mode" in self.queryargs: 429 raise http.HttpBadRequest("Only one of 'live' and 'mode' should" 430 " be passed") 431 elif "live" in self.queryargs: 432 if self._checkIntVariable("live", default=1): 433 mode = constants.HT_MIGRATION_LIVE 434 else: 435 mode = constants.HT_MIGRATION_NONLIVE 436 else: 437 mode = self._checkStringVariable("mode", default=None) 438 439 op = opcodes.OpMigrateNode(node_name=node_name, mode=mode) 440 441 return baserlib.SubmitJob([op])
442
443 444 -class R_2_nodes_name_storage(baserlib.R_Generic):
445 """/2/nodes/[node_name]/storage ressource. 446 447 """ 448 # LUQueryNodeStorage acquires locks, hence restricting access to GET 449 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] 450
451 - def GET(self):
452 node_name = self.items[0] 453 454 storage_type = self._checkStringVariable("storage_type", None) 455 if not storage_type: 456 raise http.HttpBadRequest("Missing the required 'storage_type'" 457 " parameter") 458 459 output_fields = self._checkStringVariable("output_fields", None) 460 if not output_fields: 461 raise http.HttpBadRequest("Missing the required 'output_fields'" 462 " parameter") 463 464 op = opcodes.OpQueryNodeStorage(nodes=[node_name], 465 storage_type=storage_type, 466 output_fields=output_fields.split(",")) 467 return baserlib.SubmitJob([op])
468
469 470 -class R_2_nodes_name_storage_modify(baserlib.R_Generic):
471 """/2/nodes/[node_name]/storage/modify ressource. 472 473 """
474 - def PUT(self):
475 node_name = self.items[0] 476 477 storage_type = self._checkStringVariable("storage_type", None) 478 if not storage_type: 479 raise http.HttpBadRequest("Missing the required 'storage_type'" 480 " parameter") 481 482 name = self._checkStringVariable("name", None) 483 if not name: 484 raise http.HttpBadRequest("Missing the required 'name'" 485 " parameter") 486 487 changes = {} 488 489 if "allocatable" in self.queryargs: 490 changes[constants.SF_ALLOCATABLE] = \ 491 bool(self._checkIntVariable("allocatable", default=1)) 492 493 op = opcodes.OpModifyNodeStorage(node_name=node_name, 494 storage_type=storage_type, 495 name=name, 496 changes=changes) 497 return baserlib.SubmitJob([op])
498
499 500 -class R_2_nodes_name_storage_repair(baserlib.R_Generic):
501 """/2/nodes/[node_name]/storage/repair ressource. 502 503 """
504 - def PUT(self):
505 node_name = self.items[0] 506 507 storage_type = self._checkStringVariable("storage_type", None) 508 if not storage_type: 509 raise http.HttpBadRequest("Missing the required 'storage_type'" 510 " parameter") 511 512 name = self._checkStringVariable("name", None) 513 if not name: 514 raise http.HttpBadRequest("Missing the required 'name'" 515 " parameter") 516 517 op = opcodes.OpRepairNodeStorage(node_name=node_name, 518 storage_type=storage_type, 519 name=name) 520 return baserlib.SubmitJob([op])
521
522 523 -def _ParseInstanceCreateRequestVersion1(data, dry_run):
524 """Parses an instance creation request version 1. 525 526 @rtype: L{opcodes.OpCreateInstance} 527 @return: Instance creation opcode 528 529 """ 530 # Disks 531 disks_input = baserlib.CheckParameter(data, "disks", exptype=list) 532 533 disks = [] 534 for idx, i in enumerate(disks_input): 535 baserlib.CheckType(i, dict, "Disk %d specification" % idx) 536 537 # Size is mandatory 538 try: 539 size = i[constants.IDISK_SIZE] 540 except KeyError: 541 raise http.HttpBadRequest("Disk %d specification wrong: missing disk" 542 " size" % idx) 543 544 disk = { 545 constants.IDISK_SIZE: size, 546 } 547 548 # Optional disk access mode 549 try: 550 disk_access = i[constants.IDISK_MODE] 551 except KeyError: 552 pass 553 else: 554 disk[constants.IDISK_MODE] = disk_access 555 556 disks.append(disk) 557 558 assert len(disks_input) == len(disks) 559 560 # Network interfaces 561 nics_input = baserlib.CheckParameter(data, "nics", exptype=list) 562 563 nics = [] 564 for idx, i in enumerate(nics_input): 565 baserlib.CheckType(i, dict, "NIC %d specification" % idx) 566 567 nic = {} 568 569 for field in constants.INIC_PARAMS: 570 try: 571 value = i[field] 572 except KeyError: 573 continue 574 575 nic[field] = value 576 577 nics.append(nic) 578 579 assert len(nics_input) == len(nics) 580 581 # HV/BE parameters 582 hvparams = baserlib.CheckParameter(data, "hvparams", default={}) 583 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES) 584 585 beparams = baserlib.CheckParameter(data, "beparams", default={}) 586 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES) 587 588 return opcodes.OpCreateInstance( 589 mode=baserlib.CheckParameter(data, "mode"), 590 instance_name=baserlib.CheckParameter(data, "name"), 591 os_type=baserlib.CheckParameter(data, "os"), 592 osparams=baserlib.CheckParameter(data, "osparams", default={}), 593 force_variant=baserlib.CheckParameter(data, "force_variant", 594 default=False), 595 pnode=baserlib.CheckParameter(data, "pnode", default=None), 596 snode=baserlib.CheckParameter(data, "snode", default=None), 597 disk_template=baserlib.CheckParameter(data, "disk_template"), 598 disks=disks, 599 nics=nics, 600 src_node=baserlib.CheckParameter(data, "src_node", default=None), 601 src_path=baserlib.CheckParameter(data, "src_path", default=None), 602 start=baserlib.CheckParameter(data, "start", default=True), 603 wait_for_sync=True, 604 ip_check=baserlib.CheckParameter(data, "ip_check", default=True), 605 name_check=baserlib.CheckParameter(data, "name_check", default=True), 606 file_storage_dir=baserlib.CheckParameter(data, "file_storage_dir", 607 default=None), 608 file_driver=baserlib.CheckParameter(data, "file_driver", 609 default=constants.FD_LOOP), 610 source_handshake=baserlib.CheckParameter(data, "source_handshake", 611 default=None), 612 source_x509_ca=baserlib.CheckParameter(data, "source_x509_ca", 613 default=None), 614 source_instance_name=baserlib.CheckParameter(data, "source_instance_name", 615 default=None), 616 iallocator=baserlib.CheckParameter(data, "iallocator", default=None), 617 hypervisor=baserlib.CheckParameter(data, "hypervisor", default=None), 618 hvparams=hvparams, 619 beparams=beparams, 620 dry_run=dry_run, 621 )
622
623 624 -class R_2_instances(baserlib.R_Generic):
625 """/2/instances resource. 626 627 """
628 - def GET(self):
629 """Returns a list of all available instances. 630 631 """ 632 client = baserlib.GetClient() 633 634 use_locking = self.useLocking() 635 if self.useBulk(): 636 bulkdata = client.QueryInstances([], I_FIELDS, use_locking) 637 return baserlib.MapBulkFields(bulkdata, I_FIELDS) 638 else: 639 instancesdata = client.QueryInstances([], ["name"], use_locking) 640 instanceslist = [row[0] for row in instancesdata] 641 return baserlib.BuildUriList(instanceslist, "/2/instances/%s", 642 uri_fields=("id", "uri"))
643
645 """Parses an instance creation request version 0. 646 647 Request data version 0 is deprecated and should not be used anymore. 648 649 @rtype: L{opcodes.OpCreateInstance} 650 @return: Instance creation opcode 651 652 """ 653 # Do not modify anymore, request data version 0 is deprecated 654 beparams = baserlib.MakeParamsDict(self.request_body, 655 constants.BES_PARAMETERS) 656 hvparams = baserlib.MakeParamsDict(self.request_body, 657 constants.HVS_PARAMETERS) 658 fn = self.getBodyParameter 659 660 # disk processing 661 disk_data = fn('disks') 662 if not isinstance(disk_data, list): 663 raise http.HttpBadRequest("The 'disks' parameter should be a list") 664 disks = [] 665 for idx, d in enumerate(disk_data): 666 if not isinstance(d, int): 667 raise http.HttpBadRequest("Disk %d specification wrong: should" 668 " be an integer" % idx) 669 disks.append({"size": d}) 670 671 # nic processing (one nic only) 672 nics = [{"mac": fn("mac", constants.VALUE_AUTO)}] 673 if fn("ip", None) is not None: 674 nics[0]["ip"] = fn("ip") 675 if fn("mode", None) is not None: 676 nics[0]["mode"] = fn("mode") 677 if fn("link", None) is not None: 678 nics[0]["link"] = fn("link") 679 if fn("bridge", None) is not None: 680 nics[0]["bridge"] = fn("bridge") 681 682 # Do not modify anymore, request data version 0 is deprecated 683 return opcodes.OpCreateInstance( 684 mode=constants.INSTANCE_CREATE, 685 instance_name=fn('name'), 686 disks=disks, 687 disk_template=fn('disk_template'), 688 os_type=fn('os'), 689 pnode=fn('pnode', None), 690 snode=fn('snode', None), 691 iallocator=fn('iallocator', None), 692 nics=nics, 693 start=fn('start', True), 694 ip_check=fn('ip_check', True), 695 name_check=fn('name_check', True), 696 wait_for_sync=True, 697 hypervisor=fn('hypervisor', None), 698 hvparams=hvparams, 699 beparams=beparams, 700 file_storage_dir=fn('file_storage_dir', None), 701 file_driver=fn('file_driver', constants.FD_LOOP), 702 dry_run=bool(self.dryRun()), 703 )
704
705 - def POST(self):
706 """Create an instance. 707 708 @return: a job id 709 710 """ 711 if not isinstance(self.request_body, dict): 712 raise http.HttpBadRequest("Invalid body contents, not a dictionary") 713 714 # Default to request data version 0 715 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0) 716 717 if data_version == 0: 718 op = self._ParseVersion0CreateRequest() 719 elif data_version == 1: 720 op = _ParseInstanceCreateRequestVersion1(self.request_body, 721 self.dryRun()) 722 else: 723 raise http.HttpBadRequest("Unsupported request data version %s" % 724 data_version) 725 726 return baserlib.SubmitJob([op])
727
728 729 -class R_2_instances_name(baserlib.R_Generic):
730 """/2/instances/[instance_name] resources. 731 732 """
733 - def GET(self):
734 """Send information about an instance. 735 736 """ 737 client = baserlib.GetClient() 738 instance_name = self.items[0] 739 740 result = baserlib.HandleItemQueryErrors(client.QueryInstances, 741 names=[instance_name], 742 fields=I_FIELDS, 743 use_locking=self.useLocking()) 744 745 return baserlib.MapFields(I_FIELDS, result[0])
746
747 - def DELETE(self):
748 """Delete an instance. 749 750 """ 751 op = opcodes.OpRemoveInstance(instance_name=self.items[0], 752 ignore_failures=False, 753 dry_run=bool(self.dryRun())) 754 return baserlib.SubmitJob([op])
755
756 757 -class R_2_instances_name_info(baserlib.R_Generic):
758 """/2/instances/[instance_name]/info resource. 759 760 """
761 - def GET(self):
762 """Request detailed instance information. 763 764 """ 765 instance_name = self.items[0] 766 static = bool(self._checkIntVariable("static", default=0)) 767 768 op = opcodes.OpQueryInstanceData(instances=[instance_name], 769 static=static) 770 return baserlib.SubmitJob([op])
771
772 773 -class R_2_instances_name_reboot(baserlib.R_Generic):
774 """/2/instances/[instance_name]/reboot resource. 775 776 Implements an instance reboot. 777 778 """
779 - def POST(self):
780 """Reboot an instance. 781 782 The URI takes type=[hard|soft|full] and 783 ignore_secondaries=[False|True] parameters. 784 785 """ 786 instance_name = self.items[0] 787 reboot_type = self.queryargs.get('type', 788 [constants.INSTANCE_REBOOT_HARD])[0] 789 ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries')) 790 op = opcodes.OpRebootInstance(instance_name=instance_name, 791 reboot_type=reboot_type, 792 ignore_secondaries=ignore_secondaries, 793 dry_run=bool(self.dryRun())) 794 795 return baserlib.SubmitJob([op])
796
797 798 -class R_2_instances_name_startup(baserlib.R_Generic):
799 """/2/instances/[instance_name]/startup resource. 800 801 Implements an instance startup. 802 803 """
804 - def PUT(self):
805 """Startup an instance. 806 807 The URI takes force=[False|True] parameter to start the instance 808 if even if secondary disks are failing. 809 810 """ 811 instance_name = self.items[0] 812 force_startup = bool(self._checkIntVariable('force')) 813 op = opcodes.OpStartupInstance(instance_name=instance_name, 814 force=force_startup, 815 dry_run=bool(self.dryRun())) 816 817 return baserlib.SubmitJob([op])
818
819 820 -class R_2_instances_name_shutdown(baserlib.R_Generic):
821 """/2/instances/[instance_name]/shutdown resource. 822 823 Implements an instance shutdown. 824 825 """
826 - def PUT(self):
827 """Shutdown an instance. 828 829 """ 830 instance_name = self.items[0] 831 op = opcodes.OpShutdownInstance(instance_name=instance_name, 832 dry_run=bool(self.dryRun())) 833 834 return baserlib.SubmitJob([op])
835
836 837 -class R_2_instances_name_reinstall(baserlib.R_Generic):
838 """/2/instances/[instance_name]/reinstall resource. 839 840 Implements an instance reinstall. 841 842 """
843 - def POST(self):
844 """Reinstall an instance. 845 846 The URI takes os=name and nostartup=[0|1] optional 847 parameters. By default, the instance will be started 848 automatically. 849 850 """ 851 instance_name = self.items[0] 852 ostype = self._checkStringVariable('os') 853 nostartup = self._checkIntVariable('nostartup') 854 ops = [ 855 opcodes.OpShutdownInstance(instance_name=instance_name), 856 opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype), 857 ] 858 if not nostartup: 859 ops.append(opcodes.OpStartupInstance(instance_name=instance_name, 860 force=False)) 861 return baserlib.SubmitJob(ops)
862
863 864 -class R_2_instances_name_replace_disks(baserlib.R_Generic):
865 """/2/instances/[instance_name]/replace-disks resource. 866 867 """
868 - def POST(self):
869 """Replaces disks on an instance. 870 871 """ 872 instance_name = self.items[0] 873 remote_node = self._checkStringVariable("remote_node", default=None) 874 mode = self._checkStringVariable("mode", default=None) 875 raw_disks = self._checkStringVariable("disks", default=None) 876 iallocator = self._checkStringVariable("iallocator", default=None) 877 878 if raw_disks: 879 try: 880 disks = [int(part) for part in raw_disks.split(",")] 881 except ValueError, err: 882 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err)) 883 else: 884 disks = [] 885 886 op = opcodes.OpReplaceDisks(instance_name=instance_name, 887 remote_node=remote_node, 888 mode=mode, 889 disks=disks, 890 iallocator=iallocator) 891 892 return baserlib.SubmitJob([op])
893
894 895 -class R_2_instances_name_activate_disks(baserlib.R_Generic):
896 """/2/instances/[instance_name]/activate-disks resource. 897 898 """
899 - def PUT(self):
900 """Activate disks for an instance. 901 902 The URI might contain ignore_size to ignore current recorded size. 903 904 """ 905 instance_name = self.items[0] 906 ignore_size = bool(self._checkIntVariable('ignore_size')) 907 908 op = opcodes.OpActivateInstanceDisks(instance_name=instance_name, 909 ignore_size=ignore_size) 910 911 return baserlib.SubmitJob([op])
912
913 914 -class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
915 """/2/instances/[instance_name]/deactivate-disks resource. 916 917 """
918 - def PUT(self):
919 """Deactivate disks for an instance. 920 921 """ 922 instance_name = self.items[0] 923 924 op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name) 925 926 return baserlib.SubmitJob([op])
927
928 929 -class R_2_instances_name_prepare_export(baserlib.R_Generic):
930 """/2/instances/[instance_name]/prepare-export resource. 931 932 """
933 - def PUT(self):
934 """Prepares an export for an instance. 935 936 @return: a job id 937 938 """ 939 instance_name = self.items[0] 940 mode = self._checkStringVariable("mode") 941 942 op = opcodes.OpPrepareExport(instance_name=instance_name, 943 mode=mode) 944 945 return baserlib.SubmitJob([op])
946
947 948 -def _ParseExportInstanceRequest(name, data):
949 """Parses a request for an instance export. 950 951 @rtype: L{opcodes.OpExportInstance} 952 @return: Instance export opcode 953 954 """ 955 mode = baserlib.CheckParameter(data, "mode", 956 default=constants.EXPORT_MODE_LOCAL) 957 target_node = baserlib.CheckParameter(data, "destination") 958 shutdown = baserlib.CheckParameter(data, "shutdown", exptype=bool) 959 remove_instance = baserlib.CheckParameter(data, "remove_instance", 960 exptype=bool, default=False) 961 x509_key_name = baserlib.CheckParameter(data, "x509_key_name", default=None) 962 destination_x509_ca = baserlib.CheckParameter(data, "destination_x509_ca", 963 default=None) 964 965 return opcodes.OpExportInstance(instance_name=name, 966 mode=mode, 967 target_node=target_node, 968 shutdown=shutdown, 969 remove_instance=remove_instance, 970 x509_key_name=x509_key_name, 971 destination_x509_ca=destination_x509_ca)
972
973 974 -class R_2_instances_name_export(baserlib.R_Generic):
975 """/2/instances/[instance_name]/export resource. 976 977 """
978 - def PUT(self):
979 """Exports an instance. 980 981 @return: a job id 982 983 """ 984 if not isinstance(self.request_body, dict): 985 raise http.HttpBadRequest("Invalid body contents, not a dictionary") 986 987 op = _ParseExportInstanceRequest(self.items[0], self.request_body) 988 989 return baserlib.SubmitJob([op])
990
991 992 -def _ParseMigrateInstanceRequest(name, data):
993 """Parses a request for an instance migration. 994 995 @rtype: L{opcodes.OpMigrateInstance} 996 @return: Instance migration opcode 997 998 """ 999 mode = baserlib.CheckParameter(data, "mode", default=None) 1000 cleanup = baserlib.CheckParameter(data, "cleanup", exptype=bool, 1001 default=False) 1002 1003 return opcodes.OpMigrateInstance(instance_name=name, mode=mode, 1004 cleanup=cleanup)
1005
1006 1007 -class R_2_instances_name_migrate(baserlib.R_Generic):
1008 """/2/instances/[instance_name]/migrate resource. 1009 1010 """
1011 - def PUT(self):
1012 """Migrates an instance. 1013 1014 @return: a job id 1015 1016 """ 1017 baserlib.CheckType(self.request_body, dict, "Body contents") 1018 1019 op = _ParseMigrateInstanceRequest(self.items[0], self.request_body) 1020 1021 return baserlib.SubmitJob([op])
1022
1023 1024 -def _ParseRenameInstanceRequest(name, data):
1025 """Parses a request for renaming an instance. 1026 1027 @rtype: L{opcodes.OpRenameInstance} 1028 @return: Instance rename opcode 1029 1030 """ 1031 new_name = baserlib.CheckParameter(data, "new_name") 1032 ip_check = baserlib.CheckParameter(data, "ip_check", default=True) 1033 name_check = baserlib.CheckParameter(data, "name_check", default=True) 1034 1035 return opcodes.OpRenameInstance(instance_name=name, new_name=new_name, 1036 name_check=name_check, ip_check=ip_check)
1037
1038 1039 -class R_2_instances_name_rename(baserlib.R_Generic):
1040 """/2/instances/[instance_name]/rename resource. 1041 1042 """
1043 - def PUT(self):
1044 """Changes the name of an instance. 1045 1046 @return: a job id 1047 1048 """ 1049 baserlib.CheckType(self.request_body, dict, "Body contents") 1050 1051 op = _ParseRenameInstanceRequest(self.items[0], self.request_body) 1052 1053 return baserlib.SubmitJob([op])
1054
1055 1056 -def _ParseModifyInstanceRequest(name, data):
1057 """Parses a request for modifying an instance. 1058 1059 @rtype: L{opcodes.OpSetInstanceParams} 1060 @return: Instance modify opcode 1061 1062 """ 1063 osparams = baserlib.CheckParameter(data, "osparams", default={}) 1064 force = baserlib.CheckParameter(data, "force", default=False) 1065 nics = baserlib.CheckParameter(data, "nics", default=[]) 1066 disks = baserlib.CheckParameter(data, "disks", default=[]) 1067 disk_template = baserlib.CheckParameter(data, "disk_template", default=None) 1068 remote_node = baserlib.CheckParameter(data, "remote_node", default=None) 1069 os_name = baserlib.CheckParameter(data, "os_name", default=None) 1070 force_variant = baserlib.CheckParameter(data, "force_variant", default=False) 1071 1072 # HV/BE parameters 1073 hvparams = baserlib.CheckParameter(data, "hvparams", default={}) 1074 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES, 1075 allowed_values=[constants.VALUE_DEFAULT]) 1076 1077 beparams = baserlib.CheckParameter(data, "beparams", default={}) 1078 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES, 1079 allowed_values=[constants.VALUE_DEFAULT]) 1080 1081 return opcodes.OpSetInstanceParams(instance_name=name, hvparams=hvparams, 1082 beparams=beparams, osparams=osparams, 1083 force=force, nics=nics, disks=disks, 1084 disk_template=disk_template, 1085 remote_node=remote_node, os_name=os_name, 1086 force_variant=force_variant)
1087
1088 1089 -class R_2_instances_name_modify(baserlib.R_Generic):
1090 """/2/instances/[instance_name]/modify resource. 1091 1092 """
1093 - def PUT(self):
1094 """Changes some parameters of an instance. 1095 1096 @return: a job id 1097 1098 """ 1099 baserlib.CheckType(self.request_body, dict, "Body contents") 1100 1101 op = _ParseModifyInstanceRequest(self.items[0], self.request_body) 1102 1103 return baserlib.SubmitJob([op])
1104
1105 1106 -class _R_Tags(baserlib.R_Generic):
1107 """ Quasiclass for tagging resources 1108 1109 Manages tags. When inheriting this class you must define the 1110 TAG_LEVEL for it. 1111 1112 """ 1113 TAG_LEVEL = None 1114
1115 - def __init__(self, items, queryargs, req):
1116 """A tag resource constructor. 1117 1118 We have to override the default to sort out cluster naming case. 1119 1120 """ 1121 baserlib.R_Generic.__init__(self, items, queryargs, req) 1122 1123 if self.TAG_LEVEL == constants.TAG_CLUSTER: 1124 self.name = None 1125 else: 1126 self.name = items[0]
1127
1128 - def GET(self):
1129 """Returns a list of tags. 1130 1131 Example: ["tag1", "tag2", "tag3"] 1132 1133 """ 1134 # pylint: disable-msg=W0212 1135 return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1136
1137 - def PUT(self):
1138 """Add a set of tags. 1139 1140 The request as a list of strings should be PUT to this URI. And 1141 you'll have back a job id. 1142 1143 """ 1144 # pylint: disable-msg=W0212 1145 if 'tag' not in self.queryargs: 1146 raise http.HttpBadRequest("Please specify tag(s) to add using the" 1147 " the 'tag' parameter") 1148 return baserlib._Tags_PUT(self.TAG_LEVEL, 1149 self.queryargs['tag'], name=self.name, 1150 dry_run=bool(self.dryRun()))
1151
1152 - def DELETE(self):
1153 """Delete a tag. 1154 1155 In order to delete a set of tags, the DELETE 1156 request should be addressed to URI like: 1157 /tags?tag=[tag]&tag=[tag] 1158 1159 """ 1160 # pylint: disable-msg=W0212 1161 if 'tag' not in self.queryargs: 1162 # no we not gonna delete all tags 1163 raise http.HttpBadRequest("Cannot delete all tags - please specify" 1164 " tag(s) using the 'tag' parameter") 1165 return baserlib._Tags_DELETE(self.TAG_LEVEL, 1166 self.queryargs['tag'], 1167 name=self.name, 1168 dry_run=bool(self.dryRun()))
1169
1170 1171 -class R_2_instances_name_tags(_R_Tags):
1172 """ /2/instances/[instance_name]/tags resource. 1173 1174 Manages per-instance tags. 1175 1176 """ 1177 TAG_LEVEL = constants.TAG_INSTANCE
1178
1179 1180 -class R_2_nodes_name_tags(_R_Tags):
1181 """ /2/nodes/[node_name]/tags resource. 1182 1183 Manages per-node tags. 1184 1185 """ 1186 TAG_LEVEL = constants.TAG_NODE
1187
1188 1189 -class R_2_tags(_R_Tags):
1190 """ /2/instances/tags resource. 1191 1192 Manages cluster tags. 1193 1194 """ 1195 TAG_LEVEL = constants.TAG_CLUSTER
1196