Package ganeti :: Package rapi :: Module rlib2
[hide private]
[frames] | no frames]

Source Code for Module ganeti.rapi.rlib2

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010 Google Inc. 
   5  # 
   6  # This program is free software; you can redistribute it and/or modify 
   7  # it under the terms of the GNU General Public License as published by 
   8  # the Free Software Foundation; either version 2 of the License, or 
   9  # (at your option) any later version. 
  10  # 
  11  # This program is distributed in the hope that it will be useful, but 
  12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
  13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
  14  # General Public License for more details. 
  15  # 
  16  # You should have received a copy of the GNU General Public License 
  17  # along with this program; if not, write to the Free Software 
  18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
  19  # 02110-1301, USA. 
  20   
  21   
  22  """Remote API version 2 baserlib.library. 
  23   
  24    PUT or POST? 
  25    ============ 
  26   
  27    According to RFC2616 the main difference between PUT and POST is that 
  28    POST can create new resources but PUT can only create the resource the 
  29    URI was pointing to on the PUT request. 
  30   
  31    To be in context of this module for instance creation POST on 
  32    /2/instances is legitim while PUT would be not, due to it does create a 
  33    new entity and not just replace /2/instances with it. 
  34   
  35    So when adding new methods, if they are operating on the URI entity itself, 
  36    PUT should be prefered over POST. 
  37   
  38  """ 
  39   
  40  # pylint: disable-msg=C0103 
  41   
  42  # C0103: Invalid name, since the R_* names are not conforming 
  43   
  44  from ganeti import opcodes 
  45  from ganeti import http 
  46  from ganeti import constants 
  47  from ganeti import cli 
  48  from ganeti import utils 
  49  from ganeti import rapi 
  50  from ganeti.rapi import baserlib 
  51   
  52   
  53  _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"] 
  54  I_FIELDS = ["name", "admin_state", "os", 
  55              "pnode", "snodes", 
  56              "disk_template", 
  57              "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges", 
  58              "network_port", 
  59              "disk.sizes", "disk_usage", 
  60              "beparams", "hvparams", 
  61              "oper_state", "oper_ram", "oper_vcpus", "status", 
  62              "custom_hvparams", "custom_beparams", "custom_nicparams", 
  63              ] + _COMMON_FIELDS 
  64   
  65  N_FIELDS = ["name", "offline", "master_candidate", "drained", 
  66              "dtotal", "dfree", 
  67              "mtotal", "mnode", "mfree", 
  68              "pinst_cnt", "sinst_cnt", 
  69              "ctotal", "cnodes", "csockets", 
  70              "pip", "sip", "role", 
  71              "pinst_list", "sinst_list", 
  72              "master_capable", "vm_capable", 
  73              ] + _COMMON_FIELDS 
  74   
  75  _NR_DRAINED = "drained" 
  76  _NR_MASTER_CANDIATE = "master-candidate" 
  77  _NR_MASTER = "master" 
  78  _NR_OFFLINE = "offline" 
  79  _NR_REGULAR = "regular" 
  80   
  81  _NR_MAP = { 
  82    "M": _NR_MASTER, 
  83    "C": _NR_MASTER_CANDIATE, 
  84    "D": _NR_DRAINED, 
  85    "O": _NR_OFFLINE, 
  86    "R": _NR_REGULAR, 
  87    } 
  88   
  89  # Request data version field 
  90  _REQ_DATA_VERSION = "__version__" 
  91   
  92  # Feature string for instance creation request data version 1 
  93  _INST_CREATE_REQV1 = "instance-create-reqv1" 
  94   
  95  # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change. 
  96  _WFJC_TIMEOUT = 10 
97 98 99 -class R_version(baserlib.R_Generic):
100 """/version resource. 101 102 This resource should be used to determine the remote API version and 103 to adapt clients accordingly. 104 105 """ 106 @staticmethod
107 - def GET():
108 """Returns the remote API version. 109 110 """ 111 return constants.RAPI_VERSION
112
113 114 -class R_2_info(baserlib.R_Generic):
115 """Cluster info. 116 117 """ 118 @staticmethod
119 - def GET():
120 """Returns cluster information. 121 122 """ 123 client = baserlib.GetClient() 124 return client.QueryClusterInfo()
125
126 127 -class R_2_features(baserlib.R_Generic):
128 """/2/features resource. 129 130 """ 131 @staticmethod
132 - def GET():
133 """Returns list of optional RAPI features implemented. 134 135 """ 136 return [_INST_CREATE_REQV1]
137
138 139 -class R_2_os(baserlib.R_Generic):
140 """/2/os resource. 141 142 """ 143 @staticmethod
144 - def GET():
145 """Return a list of all OSes. 146 147 Can return error 500 in case of a problem. 148 149 Example: ["debian-etch"] 150 151 """ 152 cl = baserlib.GetClient() 153 op = opcodes.OpDiagnoseOS(output_fields=["name", "variants"], names=[]) 154 job_id = baserlib.SubmitJob([op], cl) 155 # we use custom feedback function, instead of print we log the status 156 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn) 157 diagnose_data = result[0] 158 159 if not isinstance(diagnose_data, list): 160 raise http.HttpBadGateway(message="Can't get OS list") 161 162 os_names = [] 163 for (name, variants) in diagnose_data: 164 os_names.extend(cli.CalculateOSNames(name, variants)) 165 166 return os_names
167
168 169 -class R_2_redist_config(baserlib.R_Generic):
170 """/2/redistribute-config resource. 171 172 """ 173 @staticmethod
174 - def PUT():
175 """Redistribute configuration to all nodes. 176 177 """ 178 return baserlib.SubmitJob([opcodes.OpRedistributeConfig()])
179
180 181 -class R_2_jobs(baserlib.R_Generic):
182 """/2/jobs resource. 183 184 """ 185 @staticmethod
186 - def GET():
187 """Returns a dictionary of jobs. 188 189 @return: a dictionary with jobs id and uri. 190 191 """ 192 fields = ["id"] 193 cl = baserlib.GetClient() 194 # Convert the list of lists to the list of ids 195 result = [job_id for [job_id] in cl.QueryJobs(None, fields)] 196 return baserlib.BuildUriList(result, "/2/jobs/%s", 197 uri_fields=("id", "uri"))
198
199 200 -class R_2_jobs_id(baserlib.R_Generic):
201 """/2/jobs/[job_id] resource. 202 203 """
204 - def GET(self):
205 """Returns a job status. 206 207 @return: a dictionary with job parameters. 208 The result includes: 209 - id: job ID as a number 210 - status: current job status as a string 211 - ops: involved OpCodes as a list of dictionaries for each 212 opcodes in the job 213 - opstatus: OpCodes status as a list 214 - opresult: OpCodes results as a list of lists 215 216 """ 217 fields = ["id", "ops", "status", "summary", 218 "opstatus", "opresult", "oplog", 219 "received_ts", "start_ts", "end_ts", 220 ] 221 job_id = self.items[0] 222 result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0] 223 if result is None: 224 raise http.HttpNotFound() 225 return baserlib.MapFields(fields, result)
226
227 - def DELETE(self):
228 """Cancel not-yet-started job. 229 230 """ 231 job_id = self.items[0] 232 result = baserlib.GetClient().CancelJob(job_id) 233 return result
234
235 236 -class R_2_jobs_id_wait(baserlib.R_Generic):
237 """/2/jobs/[job_id]/wait resource. 238 239 """ 240 # WaitForJobChange provides access to sensitive information and blocks 241 # machine resources (it's a blocking RAPI call), hence restricting access. 242 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] 243
244 - def GET(self):
245 """Waits for job changes. 246 247 """ 248 job_id = self.items[0] 249 250 fields = self.getBodyParameter("fields") 251 prev_job_info = self.getBodyParameter("previous_job_info", None) 252 prev_log_serial = self.getBodyParameter("previous_log_serial", None) 253 254 if not isinstance(fields, list): 255 raise http.HttpBadRequest("The 'fields' parameter should be a list") 256 257 if not (prev_job_info is None or isinstance(prev_job_info, list)): 258 raise http.HttpBadRequest("The 'previous_job_info' parameter should" 259 " be a list") 260 261 if not (prev_log_serial is None or 262 isinstance(prev_log_serial, (int, long))): 263 raise http.HttpBadRequest("The 'previous_log_serial' parameter should" 264 " be a number") 265 266 client = baserlib.GetClient() 267 result = client.WaitForJobChangeOnce(job_id, fields, 268 prev_job_info, prev_log_serial, 269 timeout=_WFJC_TIMEOUT) 270 if not result: 271 raise http.HttpNotFound() 272 273 if result == constants.JOB_NOTCHANGED: 274 # No changes 275 return None 276 277 (job_info, log_entries) = result 278 279 return { 280 "job_info": job_info, 281 "log_entries": log_entries, 282 }
283
284 285 -class R_2_nodes(baserlib.R_Generic):
286 """/2/nodes resource. 287 288 """
289 - def GET(self):
290 """Returns a list of all nodes. 291 292 """ 293 client = baserlib.GetClient() 294 295 if self.useBulk(): 296 bulkdata = client.QueryNodes([], N_FIELDS, False) 297 return baserlib.MapBulkFields(bulkdata, N_FIELDS) 298 else: 299 nodesdata = client.QueryNodes([], ["name"], False) 300 nodeslist = [row[0] for row in nodesdata] 301 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s", 302 uri_fields=("id", "uri"))
303
304 305 -class R_2_nodes_name(baserlib.R_Generic):
306 """/2/nodes/[node_name] resources. 307 308 """
309 - def GET(self):
310 """Send information about a node. 311 312 """ 313 node_name = self.items[0] 314 client = baserlib.GetClient() 315 316 result = baserlib.HandleItemQueryErrors(client.QueryNodes, 317 names=[node_name], fields=N_FIELDS, 318 use_locking=self.useLocking()) 319 320 return baserlib.MapFields(N_FIELDS, result[0])
321
322 323 -class R_2_nodes_name_role(baserlib.R_Generic):
324 """ /2/nodes/[node_name]/role resource. 325 326 """
327 - def GET(self):
328 """Returns the current node role. 329 330 @return: Node role 331 332 """ 333 node_name = self.items[0] 334 client = baserlib.GetClient() 335 result = client.QueryNodes(names=[node_name], fields=["role"], 336 use_locking=self.useLocking()) 337 338 return _NR_MAP[result[0][0]]
339
340 - def PUT(self):
341 """Sets the node role. 342 343 @return: a job id 344 345 """ 346 if not isinstance(self.request_body, basestring): 347 raise http.HttpBadRequest("Invalid body contents, not a string") 348 349 node_name = self.items[0] 350 role = self.request_body 351 352 if role == _NR_REGULAR: 353 candidate = False 354 offline = False 355 drained = False 356 357 elif role == _NR_MASTER_CANDIATE: 358 candidate = True 359 offline = drained = None 360 361 elif role == _NR_DRAINED: 362 drained = True 363 candidate = offline = None 364 365 elif role == _NR_OFFLINE: 366 offline = True 367 candidate = drained = None 368 369 else: 370 raise http.HttpBadRequest("Can't set '%s' role" % role) 371 372 op = opcodes.OpSetNodeParams(node_name=node_name, 373 master_candidate=candidate, 374 offline=offline, 375 drained=drained, 376 force=bool(self.useForce())) 377 378 return baserlib.SubmitJob([op])
379
380 381 -class R_2_nodes_name_evacuate(baserlib.R_Generic):
382 """/2/nodes/[node_name]/evacuate resource. 383 384 """
385 - def POST(self):
386 """Evacuate all secondary instances off a node. 387 388 """ 389 node_name = self.items[0] 390 remote_node = self._checkStringVariable("remote_node", default=None) 391 iallocator = self._checkStringVariable("iallocator", default=None) 392 early_r = bool(self._checkIntVariable("early_release", default=0)) 393 dry_run = bool(self.dryRun()) 394 395 cl = baserlib.GetClient() 396 397 op = opcodes.OpNodeEvacuationStrategy(nodes=[node_name], 398 iallocator=iallocator, 399 remote_node=remote_node) 400 401 job_id = baserlib.SubmitJob([op], cl) 402 # we use custom feedback function, instead of print we log the status 403 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn) 404 405 jobs = [] 406 for iname, node in result: 407 if dry_run: 408 jid = None 409 else: 410 op = opcodes.OpReplaceDisks(instance_name=iname, 411 remote_node=node, disks=[], 412 mode=constants.REPLACE_DISK_CHG, 413 early_release=early_r) 414 jid = baserlib.SubmitJob([op]) 415 jobs.append((jid, iname, node)) 416 417 return jobs
418
419 420 -class R_2_nodes_name_migrate(baserlib.R_Generic):
421 """/2/nodes/[node_name]/migrate resource. 422 423 """
424 - def POST(self):
425 """Migrate all primary instances from a node. 426 427 """ 428 node_name = self.items[0] 429 430 if "live" in self.queryargs and "mode" in self.queryargs: 431 raise http.HttpBadRequest("Only one of 'live' and 'mode' should" 432 " be passed") 433 elif "live" in self.queryargs: 434 if self._checkIntVariable("live", default=1): 435 mode = constants.HT_MIGRATION_LIVE 436 else: 437 mode = constants.HT_MIGRATION_NONLIVE 438 else: 439 mode = self._checkStringVariable("mode", default=None) 440 441 op = opcodes.OpMigrateNode(node_name=node_name, mode=mode) 442 443 return baserlib.SubmitJob([op])
444
445 446 -class R_2_nodes_name_storage(baserlib.R_Generic):
447 """/2/nodes/[node_name]/storage ressource. 448 449 """ 450 # LUQueryNodeStorage acquires locks, hence restricting access to GET 451 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] 452
453 - def GET(self):
454 node_name = self.items[0] 455 456 storage_type = self._checkStringVariable("storage_type", None) 457 if not storage_type: 458 raise http.HttpBadRequest("Missing the required 'storage_type'" 459 " parameter") 460 461 output_fields = self._checkStringVariable("output_fields", None) 462 if not output_fields: 463 raise http.HttpBadRequest("Missing the required 'output_fields'" 464 " parameter") 465 466 op = opcodes.OpQueryNodeStorage(nodes=[node_name], 467 storage_type=storage_type, 468 output_fields=output_fields.split(",")) 469 return baserlib.SubmitJob([op])
470
471 472 -class R_2_nodes_name_storage_modify(baserlib.R_Generic):
473 """/2/nodes/[node_name]/storage/modify ressource. 474 475 """
476 - def PUT(self):
477 node_name = self.items[0] 478 479 storage_type = self._checkStringVariable("storage_type", None) 480 if not storage_type: 481 raise http.HttpBadRequest("Missing the required 'storage_type'" 482 " parameter") 483 484 name = self._checkStringVariable("name", None) 485 if not name: 486 raise http.HttpBadRequest("Missing the required 'name'" 487 " parameter") 488 489 changes = {} 490 491 if "allocatable" in self.queryargs: 492 changes[constants.SF_ALLOCATABLE] = \ 493 bool(self._checkIntVariable("allocatable", default=1)) 494 495 op = opcodes.OpModifyNodeStorage(node_name=node_name, 496 storage_type=storage_type, 497 name=name, 498 changes=changes) 499 return baserlib.SubmitJob([op])
500
501 502 -class R_2_nodes_name_storage_repair(baserlib.R_Generic):
503 """/2/nodes/[node_name]/storage/repair ressource. 504 505 """
506 - def PUT(self):
507 node_name = self.items[0] 508 509 storage_type = self._checkStringVariable("storage_type", None) 510 if not storage_type: 511 raise http.HttpBadRequest("Missing the required 'storage_type'" 512 " parameter") 513 514 name = self._checkStringVariable("name", None) 515 if not name: 516 raise http.HttpBadRequest("Missing the required 'name'" 517 " parameter") 518 519 op = opcodes.OpRepairNodeStorage(node_name=node_name, 520 storage_type=storage_type, 521 name=name) 522 return baserlib.SubmitJob([op])
523
524 525 -def _ParseInstanceCreateRequestVersion1(data, dry_run):
526 """Parses an instance creation request version 1. 527 528 @rtype: L{opcodes.OpCreateInstance} 529 @return: Instance creation opcode 530 531 """ 532 # Disks 533 disks_input = baserlib.CheckParameter(data, "disks", exptype=list) 534 535 disks = [] 536 for idx, i in enumerate(disks_input): 537 baserlib.CheckType(i, dict, "Disk %d specification" % idx) 538 539 # Size is mandatory 540 try: 541 size = i[constants.IDISK_SIZE] 542 except KeyError: 543 raise http.HttpBadRequest("Disk %d specification wrong: missing disk" 544 " size" % idx) 545 546 disk = { 547 constants.IDISK_SIZE: size, 548 } 549 550 # Optional disk access mode 551 try: 552 disk_access = i[constants.IDISK_MODE] 553 except KeyError: 554 pass 555 else: 556 disk[constants.IDISK_MODE] = disk_access 557 558 disks.append(disk) 559 560 assert len(disks_input) == len(disks) 561 562 # Network interfaces 563 nics_input = baserlib.CheckParameter(data, "nics", exptype=list) 564 565 nics = [] 566 for idx, i in enumerate(nics_input): 567 baserlib.CheckType(i, dict, "NIC %d specification" % idx) 568 569 nic = {} 570 571 for field in constants.INIC_PARAMS: 572 try: 573 value = i[field] 574 except KeyError: 575 continue 576 577 nic[field] = value 578 579 nics.append(nic) 580 581 assert len(nics_input) == len(nics) 582 583 # HV/BE parameters 584 hvparams = baserlib.CheckParameter(data, "hvparams", default={}) 585 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES) 586 587 beparams = baserlib.CheckParameter(data, "beparams", default={}) 588 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES) 589 590 return opcodes.OpCreateInstance( 591 mode=baserlib.CheckParameter(data, "mode"), 592 instance_name=baserlib.CheckParameter(data, "name"), 593 os_type=baserlib.CheckParameter(data, "os"), 594 osparams=baserlib.CheckParameter(data, "osparams", default={}), 595 force_variant=baserlib.CheckParameter(data, "force_variant", 596 default=False), 597 pnode=baserlib.CheckParameter(data, "pnode", default=None), 598 snode=baserlib.CheckParameter(data, "snode", default=None), 599 disk_template=baserlib.CheckParameter(data, "disk_template"), 600 disks=disks, 601 nics=nics, 602 src_node=baserlib.CheckParameter(data, "src_node", default=None), 603 src_path=baserlib.CheckParameter(data, "src_path", default=None), 604 start=baserlib.CheckParameter(data, "start", default=True), 605 wait_for_sync=True, 606 ip_check=baserlib.CheckParameter(data, "ip_check", default=True), 607 name_check=baserlib.CheckParameter(data, "name_check", default=True), 608 file_storage_dir=baserlib.CheckParameter(data, "file_storage_dir", 609 default=None), 610 file_driver=baserlib.CheckParameter(data, "file_driver", 611 default=constants.FD_LOOP), 612 source_handshake=baserlib.CheckParameter(data, "source_handshake", 613 default=None), 614 source_x509_ca=baserlib.CheckParameter(data, "source_x509_ca", 615 default=None), 616 source_instance_name=baserlib.CheckParameter(data, "source_instance_name", 617 default=None), 618 iallocator=baserlib.CheckParameter(data, "iallocator", default=None), 619 hypervisor=baserlib.CheckParameter(data, "hypervisor", default=None), 620 hvparams=hvparams, 621 beparams=beparams, 622 dry_run=dry_run, 623 )
624
625 626 -class R_2_instances(baserlib.R_Generic):
627 """/2/instances resource. 628 629 """
630 - def GET(self):
631 """Returns a list of all available instances. 632 633 """ 634 client = baserlib.GetClient() 635 636 use_locking = self.useLocking() 637 if self.useBulk(): 638 bulkdata = client.QueryInstances([], I_FIELDS, use_locking) 639 return baserlib.MapBulkFields(bulkdata, I_FIELDS) 640 else: 641 instancesdata = client.QueryInstances([], ["name"], use_locking) 642 instanceslist = [row[0] for row in instancesdata] 643 return baserlib.BuildUriList(instanceslist, "/2/instances/%s", 644 uri_fields=("id", "uri"))
645
647 """Parses an instance creation request version 0. 648 649 Request data version 0 is deprecated and should not be used anymore. 650 651 @rtype: L{opcodes.OpCreateInstance} 652 @return: Instance creation opcode 653 654 """ 655 # Do not modify anymore, request data version 0 is deprecated 656 beparams = baserlib.MakeParamsDict(self.request_body, 657 constants.BES_PARAMETERS) 658 hvparams = baserlib.MakeParamsDict(self.request_body, 659 constants.HVS_PARAMETERS) 660 fn = self.getBodyParameter 661 662 # disk processing 663 disk_data = fn('disks') 664 if not isinstance(disk_data, list): 665 raise http.HttpBadRequest("The 'disks' parameter should be a list") 666 disks = [] 667 for idx, d in enumerate(disk_data): 668 if not isinstance(d, int): 669 raise http.HttpBadRequest("Disk %d specification wrong: should" 670 " be an integer" % idx) 671 disks.append({"size": d}) 672 673 # nic processing (one nic only) 674 nics = [{"mac": fn("mac", constants.VALUE_AUTO)}] 675 if fn("ip", None) is not None: 676 nics[0]["ip"] = fn("ip") 677 if fn("mode", None) is not None: 678 nics[0]["mode"] = fn("mode") 679 if fn("link", None) is not None: 680 nics[0]["link"] = fn("link") 681 if fn("bridge", None) is not None: 682 nics[0]["bridge"] = fn("bridge") 683 684 # Do not modify anymore, request data version 0 is deprecated 685 return opcodes.OpCreateInstance( 686 mode=constants.INSTANCE_CREATE, 687 instance_name=fn('name'), 688 disks=disks, 689 disk_template=fn('disk_template'), 690 os_type=fn('os'), 691 pnode=fn('pnode', None), 692 snode=fn('snode', None), 693 iallocator=fn('iallocator', None), 694 nics=nics, 695 start=fn('start', True), 696 ip_check=fn('ip_check', True), 697 name_check=fn('name_check', True), 698 wait_for_sync=True, 699 hypervisor=fn('hypervisor', None), 700 hvparams=hvparams, 701 beparams=beparams, 702 file_storage_dir=fn('file_storage_dir', None), 703 file_driver=fn('file_driver', constants.FD_LOOP), 704 dry_run=bool(self.dryRun()), 705 )
706
707 - def POST(self):
708 """Create an instance. 709 710 @return: a job id 711 712 """ 713 if not isinstance(self.request_body, dict): 714 raise http.HttpBadRequest("Invalid body contents, not a dictionary") 715 716 # Default to request data version 0 717 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0) 718 719 if data_version == 0: 720 op = self._ParseVersion0CreateRequest() 721 elif data_version == 1: 722 op = _ParseInstanceCreateRequestVersion1(self.request_body, 723 self.dryRun()) 724 else: 725 raise http.HttpBadRequest("Unsupported request data version %s" % 726 data_version) 727 728 return baserlib.SubmitJob([op])
729
730 731 -class R_2_instances_name(baserlib.R_Generic):
732 """/2/instances/[instance_name] resources. 733 734 """
735 - def GET(self):
736 """Send information about an instance. 737 738 """ 739 client = baserlib.GetClient() 740 instance_name = self.items[0] 741 742 result = baserlib.HandleItemQueryErrors(client.QueryInstances, 743 names=[instance_name], 744 fields=I_FIELDS, 745 use_locking=self.useLocking()) 746 747 return baserlib.MapFields(I_FIELDS, result[0])
748
749 - def DELETE(self):
750 """Delete an instance. 751 752 """ 753 op = opcodes.OpRemoveInstance(instance_name=self.items[0], 754 ignore_failures=False, 755 dry_run=bool(self.dryRun())) 756 return baserlib.SubmitJob([op])
757
758 759 -class R_2_instances_name_info(baserlib.R_Generic):
760 """/2/instances/[instance_name]/info resource. 761 762 """
763 - def GET(self):
764 """Request detailed instance information. 765 766 """ 767 instance_name = self.items[0] 768 static = bool(self._checkIntVariable("static", default=0)) 769 770 op = opcodes.OpQueryInstanceData(instances=[instance_name], 771 static=static) 772 return baserlib.SubmitJob([op])
773
774 775 -class R_2_instances_name_reboot(baserlib.R_Generic):
776 """/2/instances/[instance_name]/reboot resource. 777 778 Implements an instance reboot. 779 780 """
781 - def POST(self):
782 """Reboot an instance. 783 784 The URI takes type=[hard|soft|full] and 785 ignore_secondaries=[False|True] parameters. 786 787 """ 788 instance_name = self.items[0] 789 reboot_type = self.queryargs.get('type', 790 [constants.INSTANCE_REBOOT_HARD])[0] 791 ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries')) 792 op = opcodes.OpRebootInstance(instance_name=instance_name, 793 reboot_type=reboot_type, 794 ignore_secondaries=ignore_secondaries, 795 dry_run=bool(self.dryRun())) 796 797 return baserlib.SubmitJob([op])
798
799 800 -class R_2_instances_name_startup(baserlib.R_Generic):
801 """/2/instances/[instance_name]/startup resource. 802 803 Implements an instance startup. 804 805 """
806 - def PUT(self):
807 """Startup an instance. 808 809 The URI takes force=[False|True] parameter to start the instance 810 if even if secondary disks are failing. 811 812 """ 813 instance_name = self.items[0] 814 force_startup = bool(self._checkIntVariable('force')) 815 op = opcodes.OpStartupInstance(instance_name=instance_name, 816 force=force_startup, 817 dry_run=bool(self.dryRun())) 818 819 return baserlib.SubmitJob([op])
820
821 822 -class R_2_instances_name_shutdown(baserlib.R_Generic):
823 """/2/instances/[instance_name]/shutdown resource. 824 825 Implements an instance shutdown. 826 827 """
828 - def PUT(self):
829 """Shutdown an instance. 830 831 """ 832 instance_name = self.items[0] 833 op = opcodes.OpShutdownInstance(instance_name=instance_name, 834 dry_run=bool(self.dryRun())) 835 836 return baserlib.SubmitJob([op])
837
838 839 -class R_2_instances_name_reinstall(baserlib.R_Generic):
840 """/2/instances/[instance_name]/reinstall resource. 841 842 Implements an instance reinstall. 843 844 """
845 - def POST(self):
846 """Reinstall an instance. 847 848 The URI takes os=name and nostartup=[0|1] optional 849 parameters. By default, the instance will be started 850 automatically. 851 852 """ 853 instance_name = self.items[0] 854 ostype = self._checkStringVariable('os') 855 nostartup = self._checkIntVariable('nostartup') 856 ops = [ 857 opcodes.OpShutdownInstance(instance_name=instance_name), 858 opcodes.OpReinstallInstance(instance_name=instance_name, os_type=ostype), 859 ] 860 if not nostartup: 861 ops.append(opcodes.OpStartupInstance(instance_name=instance_name, 862 force=False)) 863 return baserlib.SubmitJob(ops)
864
865 866 -class R_2_instances_name_replace_disks(baserlib.R_Generic):
867 """/2/instances/[instance_name]/replace-disks resource. 868 869 """
870 - def POST(self):
871 """Replaces disks on an instance. 872 873 """ 874 instance_name = self.items[0] 875 remote_node = self._checkStringVariable("remote_node", default=None) 876 mode = self._checkStringVariable("mode", default=None) 877 raw_disks = self._checkStringVariable("disks", default=None) 878 iallocator = self._checkStringVariable("iallocator", default=None) 879 880 if raw_disks: 881 try: 882 disks = [int(part) for part in raw_disks.split(",")] 883 except ValueError, err: 884 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err)) 885 else: 886 disks = [] 887 888 op = opcodes.OpReplaceDisks(instance_name=instance_name, 889 remote_node=remote_node, 890 mode=mode, 891 disks=disks, 892 iallocator=iallocator) 893 894 return baserlib.SubmitJob([op])
895
896 897 -class R_2_instances_name_activate_disks(baserlib.R_Generic):
898 """/2/instances/[instance_name]/activate-disks resource. 899 900 """
901 - def PUT(self):
902 """Activate disks for an instance. 903 904 The URI might contain ignore_size to ignore current recorded size. 905 906 """ 907 instance_name = self.items[0] 908 ignore_size = bool(self._checkIntVariable('ignore_size')) 909 910 op = opcodes.OpActivateInstanceDisks(instance_name=instance_name, 911 ignore_size=ignore_size) 912 913 return baserlib.SubmitJob([op])
914
915 916 -class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
917 """/2/instances/[instance_name]/deactivate-disks resource. 918 919 """
920 - def PUT(self):
921 """Deactivate disks for an instance. 922 923 """ 924 instance_name = self.items[0] 925 926 op = opcodes.OpDeactivateInstanceDisks(instance_name=instance_name) 927 928 return baserlib.SubmitJob([op])
929
930 931 -class R_2_instances_name_prepare_export(baserlib.R_Generic):
932 """/2/instances/[instance_name]/prepare-export resource. 933 934 """
935 - def PUT(self):
936 """Prepares an export for an instance. 937 938 @return: a job id 939 940 """ 941 instance_name = self.items[0] 942 mode = self._checkStringVariable("mode") 943 944 op = opcodes.OpPrepareExport(instance_name=instance_name, 945 mode=mode) 946 947 return baserlib.SubmitJob([op])
948
949 950 -def _ParseExportInstanceRequest(name, data):
951 """Parses a request for an instance export. 952 953 @rtype: L{opcodes.OpExportInstance} 954 @return: Instance export opcode 955 956 """ 957 mode = baserlib.CheckParameter(data, "mode", 958 default=constants.EXPORT_MODE_LOCAL) 959 target_node = baserlib.CheckParameter(data, "destination") 960 shutdown = baserlib.CheckParameter(data, "shutdown", exptype=bool) 961 remove_instance = baserlib.CheckParameter(data, "remove_instance", 962 exptype=bool, default=False) 963 x509_key_name = baserlib.CheckParameter(data, "x509_key_name", default=None) 964 destination_x509_ca = baserlib.CheckParameter(data, "destination_x509_ca", 965 default=None) 966 967 return opcodes.OpExportInstance(instance_name=name, 968 mode=mode, 969 target_node=target_node, 970 shutdown=shutdown, 971 remove_instance=remove_instance, 972 x509_key_name=x509_key_name, 973 destination_x509_ca=destination_x509_ca)
974
975 976 -class R_2_instances_name_export(baserlib.R_Generic):
977 """/2/instances/[instance_name]/export resource. 978 979 """
980 - def PUT(self):
981 """Exports an instance. 982 983 @return: a job id 984 985 """ 986 if not isinstance(self.request_body, dict): 987 raise http.HttpBadRequest("Invalid body contents, not a dictionary") 988 989 op = _ParseExportInstanceRequest(self.items[0], self.request_body) 990 991 return baserlib.SubmitJob([op])
992
993 994 -def _ParseMigrateInstanceRequest(name, data):
995 """Parses a request for an instance migration. 996 997 @rtype: L{opcodes.OpMigrateInstance} 998 @return: Instance migration opcode 999 1000 """ 1001 mode = baserlib.CheckParameter(data, "mode", default=None) 1002 cleanup = baserlib.CheckParameter(data, "cleanup", exptype=bool, 1003 default=False) 1004 1005 return opcodes.OpMigrateInstance(instance_name=name, mode=mode, 1006 cleanup=cleanup)
1007
1008 1009 -class R_2_instances_name_migrate(baserlib.R_Generic):
1010 """/2/instances/[instance_name]/migrate resource. 1011 1012 """
1013 - def PUT(self):
1014 """Migrates an instance. 1015 1016 @return: a job id 1017 1018 """ 1019 baserlib.CheckType(self.request_body, dict, "Body contents") 1020 1021 op = _ParseMigrateInstanceRequest(self.items[0], self.request_body) 1022 1023 return baserlib.SubmitJob([op])
1024
1025 1026 -def _ParseRenameInstanceRequest(name, data):
1027 """Parses a request for renaming an instance. 1028 1029 @rtype: L{opcodes.OpRenameInstance} 1030 @return: Instance rename opcode 1031 1032 """ 1033 new_name = baserlib.CheckParameter(data, "new_name") 1034 ip_check = baserlib.CheckParameter(data, "ip_check", default=True) 1035 name_check = baserlib.CheckParameter(data, "name_check", default=True) 1036 1037 return opcodes.OpRenameInstance(instance_name=name, new_name=new_name, 1038 name_check=name_check, ip_check=ip_check)
1039
1040 1041 -class R_2_instances_name_rename(baserlib.R_Generic):
1042 """/2/instances/[instance_name]/rename resource. 1043 1044 """
1045 - def PUT(self):
1046 """Changes the name of an instance. 1047 1048 @return: a job id 1049 1050 """ 1051 baserlib.CheckType(self.request_body, dict, "Body contents") 1052 1053 op = _ParseRenameInstanceRequest(self.items[0], self.request_body) 1054 1055 return baserlib.SubmitJob([op])
1056
1057 1058 -def _ParseModifyInstanceRequest(name, data):
1059 """Parses a request for modifying an instance. 1060 1061 @rtype: L{opcodes.OpSetInstanceParams} 1062 @return: Instance modify opcode 1063 1064 """ 1065 osparams = baserlib.CheckParameter(data, "osparams", default={}) 1066 force = baserlib.CheckParameter(data, "force", default=False) 1067 nics = baserlib.CheckParameter(data, "nics", default=[]) 1068 disks = baserlib.CheckParameter(data, "disks", default=[]) 1069 disk_template = baserlib.CheckParameter(data, "disk_template", default=None) 1070 remote_node = baserlib.CheckParameter(data, "remote_node", default=None) 1071 os_name = baserlib.CheckParameter(data, "os_name", default=None) 1072 force_variant = baserlib.CheckParameter(data, "force_variant", default=False) 1073 1074 # HV/BE parameters 1075 hvparams = baserlib.CheckParameter(data, "hvparams", default={}) 1076 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES, 1077 allowed_values=[constants.VALUE_DEFAULT]) 1078 1079 beparams = baserlib.CheckParameter(data, "beparams", default={}) 1080 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES, 1081 allowed_values=[constants.VALUE_DEFAULT]) 1082 1083 return opcodes.OpSetInstanceParams(instance_name=name, hvparams=hvparams, 1084 beparams=beparams, osparams=osparams, 1085 force=force, nics=nics, disks=disks, 1086 disk_template=disk_template, 1087 remote_node=remote_node, os_name=os_name, 1088 force_variant=force_variant)
1089
1090 1091 -class R_2_instances_name_modify(baserlib.R_Generic):
1092 """/2/instances/[instance_name]/modify resource. 1093 1094 """
1095 - def PUT(self):
1096 """Changes some parameters of an instance. 1097 1098 @return: a job id 1099 1100 """ 1101 baserlib.CheckType(self.request_body, dict, "Body contents") 1102 1103 op = _ParseModifyInstanceRequest(self.items[0], self.request_body) 1104 1105 return baserlib.SubmitJob([op])
1106
1107 1108 -class _R_Tags(baserlib.R_Generic):
1109 """ Quasiclass for tagging resources 1110 1111 Manages tags. When inheriting this class you must define the 1112 TAG_LEVEL for it. 1113 1114 """ 1115 TAG_LEVEL = None 1116
1117 - def __init__(self, items, queryargs, req):
1118 """A tag resource constructor. 1119 1120 We have to override the default to sort out cluster naming case. 1121 1122 """ 1123 baserlib.R_Generic.__init__(self, items, queryargs, req) 1124 1125 if self.TAG_LEVEL == constants.TAG_CLUSTER: 1126 self.name = None 1127 else: 1128 self.name = items[0]
1129
1130 - def GET(self):
1131 """Returns a list of tags. 1132 1133 Example: ["tag1", "tag2", "tag3"] 1134 1135 """ 1136 # pylint: disable-msg=W0212 1137 return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1138
1139 - def PUT(self):
1140 """Add a set of tags. 1141 1142 The request as a list of strings should be PUT to this URI. And 1143 you'll have back a job id. 1144 1145 """ 1146 # pylint: disable-msg=W0212 1147 if 'tag' not in self.queryargs: 1148 raise http.HttpBadRequest("Please specify tag(s) to add using the" 1149 " the 'tag' parameter") 1150 return baserlib._Tags_PUT(self.TAG_LEVEL, 1151 self.queryargs['tag'], name=self.name, 1152 dry_run=bool(self.dryRun()))
1153
1154 - def DELETE(self):
1155 """Delete a tag. 1156 1157 In order to delete a set of tags, the DELETE 1158 request should be addressed to URI like: 1159 /tags?tag=[tag]&tag=[tag] 1160 1161 """ 1162 # pylint: disable-msg=W0212 1163 if 'tag' not in self.queryargs: 1164 # no we not gonna delete all tags 1165 raise http.HttpBadRequest("Cannot delete all tags - please specify" 1166 " tag(s) using the 'tag' parameter") 1167 return baserlib._Tags_DELETE(self.TAG_LEVEL, 1168 self.queryargs['tag'], 1169 name=self.name, 1170 dry_run=bool(self.dryRun()))
1171
1172 1173 -class R_2_instances_name_tags(_R_Tags):
1174 """ /2/instances/[instance_name]/tags resource. 1175 1176 Manages per-instance tags. 1177 1178 """ 1179 TAG_LEVEL = constants.TAG_INSTANCE
1180
1181 1182 -class R_2_nodes_name_tags(_R_Tags):
1183 """ /2/nodes/[node_name]/tags resource. 1184 1185 Manages per-node tags. 1186 1187 """ 1188 TAG_LEVEL = constants.TAG_NODE
1189
1190 1191 -class R_2_tags(_R_Tags):
1192 """ /2/instances/tags resource. 1193 1194 Manages cluster tags. 1195 1196 """ 1197 TAG_LEVEL = constants.TAG_CLUSTER
1198