Package ganeti :: Package rapi :: Module rlib2
[hide private]
[frames] | no frames]

Source Code for Module ganeti.rapi.rlib2

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011 Google Inc. 
   5  # 
   6  # This program is free software; you can redistribute it and/or modify 
   7  # it under the terms of the GNU General Public License as published by 
   8  # the Free Software Foundation; either version 2 of the License, or 
   9  # (at your option) any later version. 
  10  # 
  11  # This program is distributed in the hope that it will be useful, but 
  12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
  13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
  14  # General Public License for more details. 
  15  # 
  16  # You should have received a copy of the GNU General Public License 
  17  # along with this program; if not, write to the Free Software 
  18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
  19  # 02110-1301, USA. 
  20   
  21   
  22  """Remote API version 2 baserlib.library. 
  23   
  24    PUT or POST? 
  25    ============ 
  26   
  27    According to RFC2616 the main difference between PUT and POST is that 
  28    POST can create new resources but PUT can only create the resource the 
  29    URI was pointing to on the PUT request. 
  30   
  31    To be in context of this module for instance creation POST on 
  32    /2/instances is legitim while PUT would be not, due to it does create a 
  33    new entity and not just replace /2/instances with it. 
  34   
  35    So when adding new methods, if they are operating on the URI entity itself, 
  36    PUT should be prefered over POST. 
  37   
  38  """ 
  39   
  40  # pylint: disable-msg=C0103 
  41   
  42  # C0103: Invalid name, since the R_* names are not conforming 
  43   
  44  from ganeti import opcodes 
  45  from ganeti import http 
  46  from ganeti import constants 
  47  from ganeti import cli 
  48  from ganeti import utils 
  49  from ganeti import rapi 
  50  from ganeti.rapi import baserlib 
  51   
  52   
  53  _COMMON_FIELDS = ["ctime", "mtime", "uuid", "serial_no", "tags"] 
  54  I_FIELDS = ["name", "admin_state", "os", 
  55              "pnode", "snodes", 
  56              "disk_template", 
  57              "nic.ips", "nic.macs", "nic.modes", "nic.links", "nic.bridges", 
  58              "network_port", 
  59              "disk.sizes", "disk_usage", 
  60              "beparams", "hvparams", 
  61              "oper_state", "oper_ram", "oper_vcpus", "status", 
  62              "custom_hvparams", "custom_beparams", "custom_nicparams", 
  63              ] + _COMMON_FIELDS 
  64   
  65  N_FIELDS = ["name", "offline", "master_candidate", "drained", 
  66              "dtotal", "dfree", 
  67              "mtotal", "mnode", "mfree", 
  68              "pinst_cnt", "sinst_cnt", 
  69              "ctotal", "cnodes", "csockets", 
  70              "pip", "sip", "role", 
  71              "pinst_list", "sinst_list", 
  72              "master_capable", "vm_capable", 
  73              "group.uuid", 
  74              ] + _COMMON_FIELDS 
  75   
  76  G_FIELDS = ["name", "uuid", 
  77              "alloc_policy", 
  78              "node_cnt", "node_list", 
  79              "ctime", "mtime", "serial_no", 
  80              ]  # "tags" is missing to be able to use _COMMON_FIELDS here. 
  81   
  82  _NR_DRAINED = "drained" 
  83  _NR_MASTER_CANDIATE = "master-candidate" 
  84  _NR_MASTER = "master" 
  85  _NR_OFFLINE = "offline" 
  86  _NR_REGULAR = "regular" 
  87   
  88  _NR_MAP = { 
  89    "M": _NR_MASTER, 
  90    "C": _NR_MASTER_CANDIATE, 
  91    "D": _NR_DRAINED, 
  92    "O": _NR_OFFLINE, 
  93    "R": _NR_REGULAR, 
  94    } 
  95   
  96  # Request data version field 
  97  _REQ_DATA_VERSION = "__version__" 
  98   
  99  # Feature string for instance creation request data version 1 
 100  _INST_CREATE_REQV1 = "instance-create-reqv1" 
 101   
 102  # Feature string for instance reinstall request version 1 
 103  _INST_REINSTALL_REQV1 = "instance-reinstall-reqv1" 
 104   
 105  # Timeout for /2/jobs/[job_id]/wait. Gives job up to 10 seconds to change. 
 106  _WFJC_TIMEOUT = 10 
107 108 109 -class R_version(baserlib.R_Generic):
110 """/version resource. 111 112 This resource should be used to determine the remote API version and 113 to adapt clients accordingly. 114 115 """ 116 @staticmethod
117 - def GET():
118 """Returns the remote API version. 119 120 """ 121 return constants.RAPI_VERSION
122
123 124 -class R_2_info(baserlib.R_Generic):
125 """/2/info resource. 126 127 """ 128 @staticmethod
129 - def GET():
130 """Returns cluster information. 131 132 """ 133 client = baserlib.GetClient() 134 return client.QueryClusterInfo()
135
136 137 -class R_2_features(baserlib.R_Generic):
138 """/2/features resource. 139 140 """ 141 @staticmethod
142 - def GET():
143 """Returns list of optional RAPI features implemented. 144 145 """ 146 return [_INST_CREATE_REQV1, _INST_REINSTALL_REQV1]
147
148 149 -class R_2_os(baserlib.R_Generic):
150 """/2/os resource. 151 152 """ 153 @staticmethod
154 - def GET():
155 """Return a list of all OSes. 156 157 Can return error 500 in case of a problem. 158 159 Example: ["debian-etch"] 160 161 """ 162 cl = baserlib.GetClient() 163 op = opcodes.OpOsDiagnose(output_fields=["name", "variants"], names=[]) 164 job_id = baserlib.SubmitJob([op], cl) 165 # we use custom feedback function, instead of print we log the status 166 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn) 167 diagnose_data = result[0] 168 169 if not isinstance(diagnose_data, list): 170 raise http.HttpBadGateway(message="Can't get OS list") 171 172 os_names = [] 173 for (name, variants) in diagnose_data: 174 os_names.extend(cli.CalculateOSNames(name, variants)) 175 176 return os_names
177
178 179 -class R_2_redist_config(baserlib.R_Generic):
180 """/2/redistribute-config resource. 181 182 """ 183 @staticmethod
184 - def PUT():
185 """Redistribute configuration to all nodes. 186 187 """ 188 return baserlib.SubmitJob([opcodes.OpClusterRedistConf()])
189
190 191 -class R_2_cluster_modify(baserlib.R_Generic):
192 """/2/modify resource. 193 194 """
195 - def PUT(self):
196 """Modifies cluster parameters. 197 198 @return: a job id 199 200 """ 201 op = baserlib.FillOpcode(opcodes.OpClusterSetParams, self.request_body, 202 None) 203 204 return baserlib.SubmitJob([op])
205
206 207 -class R_2_jobs(baserlib.R_Generic):
208 """/2/jobs resource. 209 210 """ 211 @staticmethod
212 - def GET():
213 """Returns a dictionary of jobs. 214 215 @return: a dictionary with jobs id and uri. 216 217 """ 218 fields = ["id"] 219 cl = baserlib.GetClient() 220 # Convert the list of lists to the list of ids 221 result = [job_id for [job_id] in cl.QueryJobs(None, fields)] 222 return baserlib.BuildUriList(result, "/2/jobs/%s", 223 uri_fields=("id", "uri"))
224
225 226 -class R_2_jobs_id(baserlib.R_Generic):
227 """/2/jobs/[job_id] resource. 228 229 """
230 - def GET(self):
231 """Returns a job status. 232 233 @return: a dictionary with job parameters. 234 The result includes: 235 - id: job ID as a number 236 - status: current job status as a string 237 - ops: involved OpCodes as a list of dictionaries for each 238 opcodes in the job 239 - opstatus: OpCodes status as a list 240 - opresult: OpCodes results as a list of lists 241 242 """ 243 fields = ["id", "ops", "status", "summary", 244 "opstatus", "opresult", "oplog", 245 "received_ts", "start_ts", "end_ts", 246 ] 247 job_id = self.items[0] 248 result = baserlib.GetClient().QueryJobs([job_id, ], fields)[0] 249 if result is None: 250 raise http.HttpNotFound() 251 return baserlib.MapFields(fields, result)
252
253 - def DELETE(self):
254 """Cancel not-yet-started job. 255 256 """ 257 job_id = self.items[0] 258 result = baserlib.GetClient().CancelJob(job_id) 259 return result
260
261 262 -class R_2_jobs_id_wait(baserlib.R_Generic):
263 """/2/jobs/[job_id]/wait resource. 264 265 """ 266 # WaitForJobChange provides access to sensitive information and blocks 267 # machine resources (it's a blocking RAPI call), hence restricting access. 268 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] 269
270 - def GET(self):
271 """Waits for job changes. 272 273 """ 274 job_id = self.items[0] 275 276 fields = self.getBodyParameter("fields") 277 prev_job_info = self.getBodyParameter("previous_job_info", None) 278 prev_log_serial = self.getBodyParameter("previous_log_serial", None) 279 280 if not isinstance(fields, list): 281 raise http.HttpBadRequest("The 'fields' parameter should be a list") 282 283 if not (prev_job_info is None or isinstance(prev_job_info, list)): 284 raise http.HttpBadRequest("The 'previous_job_info' parameter should" 285 " be a list") 286 287 if not (prev_log_serial is None or 288 isinstance(prev_log_serial, (int, long))): 289 raise http.HttpBadRequest("The 'previous_log_serial' parameter should" 290 " be a number") 291 292 client = baserlib.GetClient() 293 result = client.WaitForJobChangeOnce(job_id, fields, 294 prev_job_info, prev_log_serial, 295 timeout=_WFJC_TIMEOUT) 296 if not result: 297 raise http.HttpNotFound() 298 299 if result == constants.JOB_NOTCHANGED: 300 # No changes 301 return None 302 303 (job_info, log_entries) = result 304 305 return { 306 "job_info": job_info, 307 "log_entries": log_entries, 308 }
309
310 311 -class R_2_nodes(baserlib.R_Generic):
312 """/2/nodes resource. 313 314 """
315 - def GET(self):
316 """Returns a list of all nodes. 317 318 """ 319 client = baserlib.GetClient() 320 321 if self.useBulk(): 322 bulkdata = client.QueryNodes([], N_FIELDS, False) 323 return baserlib.MapBulkFields(bulkdata, N_FIELDS) 324 else: 325 nodesdata = client.QueryNodes([], ["name"], False) 326 nodeslist = [row[0] for row in nodesdata] 327 return baserlib.BuildUriList(nodeslist, "/2/nodes/%s", 328 uri_fields=("id", "uri"))
329
330 331 -class R_2_nodes_name(baserlib.R_Generic):
332 """/2/nodes/[node_name] resource. 333 334 """
335 - def GET(self):
336 """Send information about a node. 337 338 """ 339 node_name = self.items[0] 340 client = baserlib.GetClient() 341 342 result = baserlib.HandleItemQueryErrors(client.QueryNodes, 343 names=[node_name], fields=N_FIELDS, 344 use_locking=self.useLocking()) 345 346 return baserlib.MapFields(N_FIELDS, result[0])
347
348 349 -class R_2_nodes_name_role(baserlib.R_Generic):
350 """ /2/nodes/[node_name]/role resource. 351 352 """
353 - def GET(self):
354 """Returns the current node role. 355 356 @return: Node role 357 358 """ 359 node_name = self.items[0] 360 client = baserlib.GetClient() 361 result = client.QueryNodes(names=[node_name], fields=["role"], 362 use_locking=self.useLocking()) 363 364 return _NR_MAP[result[0][0]]
365
366 - def PUT(self):
367 """Sets the node role. 368 369 @return: a job id 370 371 """ 372 if not isinstance(self.request_body, basestring): 373 raise http.HttpBadRequest("Invalid body contents, not a string") 374 375 node_name = self.items[0] 376 role = self.request_body 377 378 if role == _NR_REGULAR: 379 candidate = False 380 offline = False 381 drained = False 382 383 elif role == _NR_MASTER_CANDIATE: 384 candidate = True 385 offline = drained = None 386 387 elif role == _NR_DRAINED: 388 drained = True 389 candidate = offline = None 390 391 elif role == _NR_OFFLINE: 392 offline = True 393 candidate = drained = None 394 395 else: 396 raise http.HttpBadRequest("Can't set '%s' role" % role) 397 398 op = opcodes.OpNodeSetParams(node_name=node_name, 399 master_candidate=candidate, 400 offline=offline, 401 drained=drained, 402 force=bool(self.useForce())) 403 404 return baserlib.SubmitJob([op])
405
406 407 -class R_2_nodes_name_evacuate(baserlib.R_Generic):
408 """/2/nodes/[node_name]/evacuate resource. 409 410 """
411 - def POST(self):
412 """Evacuate all secondary instances off a node. 413 414 """ 415 node_name = self.items[0] 416 remote_node = self._checkStringVariable("remote_node", default=None) 417 iallocator = self._checkStringVariable("iallocator", default=None) 418 early_r = bool(self._checkIntVariable("early_release", default=0)) 419 dry_run = bool(self.dryRun()) 420 421 cl = baserlib.GetClient() 422 423 op = opcodes.OpNodeEvacStrategy(nodes=[node_name], 424 iallocator=iallocator, 425 remote_node=remote_node) 426 427 job_id = baserlib.SubmitJob([op], cl) 428 # we use custom feedback function, instead of print we log the status 429 result = cli.PollJob(job_id, cl, feedback_fn=baserlib.FeedbackFn) 430 431 jobs = [] 432 for iname, node in result[0]: 433 if dry_run: 434 jid = None 435 else: 436 op = opcodes.OpInstanceReplaceDisks(instance_name=iname, 437 remote_node=node, disks=[], 438 mode=constants.REPLACE_DISK_CHG, 439 early_release=early_r) 440 jid = baserlib.SubmitJob([op]) 441 jobs.append((jid, iname, node)) 442 443 return jobs
444
445 446 -class R_2_nodes_name_migrate(baserlib.R_Generic):
447 """/2/nodes/[node_name]/migrate resource. 448 449 """
450 - def POST(self):
451 """Migrate all primary instances from a node. 452 453 """ 454 node_name = self.items[0] 455 456 if "live" in self.queryargs and "mode" in self.queryargs: 457 raise http.HttpBadRequest("Only one of 'live' and 'mode' should" 458 " be passed") 459 elif "live" in self.queryargs: 460 if self._checkIntVariable("live", default=1): 461 mode = constants.HT_MIGRATION_LIVE 462 else: 463 mode = constants.HT_MIGRATION_NONLIVE 464 else: 465 mode = self._checkStringVariable("mode", default=None) 466 467 op = opcodes.OpNodeMigrate(node_name=node_name, mode=mode) 468 469 return baserlib.SubmitJob([op])
470
471 472 -class R_2_nodes_name_storage(baserlib.R_Generic):
473 """/2/nodes/[node_name]/storage resource. 474 475 """ 476 # LUNodeQueryStorage acquires locks, hence restricting access to GET 477 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] 478
479 - def GET(self):
480 node_name = self.items[0] 481 482 storage_type = self._checkStringVariable("storage_type", None) 483 if not storage_type: 484 raise http.HttpBadRequest("Missing the required 'storage_type'" 485 " parameter") 486 487 output_fields = self._checkStringVariable("output_fields", None) 488 if not output_fields: 489 raise http.HttpBadRequest("Missing the required 'output_fields'" 490 " parameter") 491 492 op = opcodes.OpNodeQueryStorage(nodes=[node_name], 493 storage_type=storage_type, 494 output_fields=output_fields.split(",")) 495 return baserlib.SubmitJob([op])
496
497 498 -class R_2_nodes_name_storage_modify(baserlib.R_Generic):
499 """/2/nodes/[node_name]/storage/modify resource. 500 501 """
502 - def PUT(self):
503 node_name = self.items[0] 504 505 storage_type = self._checkStringVariable("storage_type", None) 506 if not storage_type: 507 raise http.HttpBadRequest("Missing the required 'storage_type'" 508 " parameter") 509 510 name = self._checkStringVariable("name", None) 511 if not name: 512 raise http.HttpBadRequest("Missing the required 'name'" 513 " parameter") 514 515 changes = {} 516 517 if "allocatable" in self.queryargs: 518 changes[constants.SF_ALLOCATABLE] = \ 519 bool(self._checkIntVariable("allocatable", default=1)) 520 521 op = opcodes.OpNodeModifyStorage(node_name=node_name, 522 storage_type=storage_type, 523 name=name, 524 changes=changes) 525 return baserlib.SubmitJob([op])
526
527 528 -class R_2_nodes_name_storage_repair(baserlib.R_Generic):
529 """/2/nodes/[node_name]/storage/repair resource. 530 531 """
532 - def PUT(self):
533 node_name = self.items[0] 534 535 storage_type = self._checkStringVariable("storage_type", None) 536 if not storage_type: 537 raise http.HttpBadRequest("Missing the required 'storage_type'" 538 " parameter") 539 540 name = self._checkStringVariable("name", None) 541 if not name: 542 raise http.HttpBadRequest("Missing the required 'name'" 543 " parameter") 544 545 op = opcodes.OpRepairNodeStorage(node_name=node_name, 546 storage_type=storage_type, 547 name=name) 548 return baserlib.SubmitJob([op])
549
550 551 -def _ParseCreateGroupRequest(data, dry_run):
552 """Parses a request for creating a node group. 553 554 @rtype: L{opcodes.OpGroupAdd} 555 @return: Group creation opcode 556 557 """ 558 group_name = baserlib.CheckParameter(data, "name") 559 alloc_policy = baserlib.CheckParameter(data, "alloc_policy", default=None) 560 561 return opcodes.OpGroupAdd(group_name=group_name, 562 alloc_policy=alloc_policy, 563 dry_run=dry_run)
564
565 566 -class R_2_groups(baserlib.R_Generic):
567 """/2/groups resource. 568 569 """
570 - def GET(self):
571 """Returns a list of all node groups. 572 573 """ 574 client = baserlib.GetClient() 575 576 if self.useBulk(): 577 bulkdata = client.QueryGroups([], G_FIELDS, False) 578 return baserlib.MapBulkFields(bulkdata, G_FIELDS) 579 else: 580 data = client.QueryGroups([], ["name"], False) 581 groupnames = [row[0] for row in data] 582 return baserlib.BuildUriList(groupnames, "/2/groups/%s", 583 uri_fields=("name", "uri"))
584
585 - def POST(self):
586 """Create a node group. 587 588 @return: a job id 589 590 """ 591 baserlib.CheckType(self.request_body, dict, "Body contents") 592 op = _ParseCreateGroupRequest(self.request_body, self.dryRun()) 593 return baserlib.SubmitJob([op])
594
595 596 -class R_2_groups_name(baserlib.R_Generic):
597 """/2/groups/[group_name] resource. 598 599 """
600 - def GET(self):
601 """Send information about a node group. 602 603 """ 604 group_name = self.items[0] 605 client = baserlib.GetClient() 606 607 result = baserlib.HandleItemQueryErrors(client.QueryGroups, 608 names=[group_name], fields=G_FIELDS, 609 use_locking=self.useLocking()) 610 611 return baserlib.MapFields(G_FIELDS, result[0])
612
613 - def DELETE(self):
614 """Delete a node group. 615 616 """ 617 op = opcodes.OpGroupRemove(group_name=self.items[0], 618 dry_run=bool(self.dryRun())) 619 620 return baserlib.SubmitJob([op])
621
622 623 -def _ParseModifyGroupRequest(name, data):
624 """Parses a request for modifying a node group. 625 626 @rtype: L{opcodes.OpGroupSetParams} 627 @return: Group modify opcode 628 629 """ 630 alloc_policy = baserlib.CheckParameter(data, "alloc_policy", default=None) 631 return opcodes.OpGroupSetParams(group_name=name, alloc_policy=alloc_policy)
632
633 634 -class R_2_groups_name_modify(baserlib.R_Generic):
635 """/2/groups/[group_name]/modify resource. 636 637 """
638 - def PUT(self):
639 """Changes some parameters of node group. 640 641 @return: a job id 642 643 """ 644 baserlib.CheckType(self.request_body, dict, "Body contents") 645 646 op = _ParseModifyGroupRequest(self.items[0], self.request_body) 647 648 return baserlib.SubmitJob([op])
649
650 651 -def _ParseRenameGroupRequest(name, data, dry_run):
652 """Parses a request for renaming a node group. 653 654 @type name: string 655 @param name: name of the node group to rename 656 @type data: dict 657 @param data: the body received by the rename request 658 @type dry_run: bool 659 @param dry_run: whether to perform a dry run 660 661 @rtype: L{opcodes.OpGroupRename} 662 @return: Node group rename opcode 663 664 """ 665 old_name = name 666 new_name = baserlib.CheckParameter(data, "new_name") 667 668 return opcodes.OpGroupRename(old_name=old_name, new_name=new_name, 669 dry_run=dry_run)
670
671 672 -class R_2_groups_name_rename(baserlib.R_Generic):
673 """/2/groups/[group_name]/rename resource. 674 675 """
676 - def PUT(self):
677 """Changes the name of a node group. 678 679 @return: a job id 680 681 """ 682 baserlib.CheckType(self.request_body, dict, "Body contents") 683 op = _ParseRenameGroupRequest(self.items[0], self.request_body, 684 self.dryRun()) 685 return baserlib.SubmitJob([op])
686
687 688 -class R_2_groups_name_assign_nodes(baserlib.R_Generic):
689 """/2/groups/[group_name]/assign-nodes resource. 690 691 """
692 - def PUT(self):
693 """Assigns nodes to a group. 694 695 @return: a job id 696 697 """ 698 op = baserlib.FillOpcode(opcodes.OpGroupAssignNodes, self.request_body, { 699 "group_name": self.items[0], 700 "dry_run": self.dryRun(), 701 "force": self.useForce(), 702 }) 703 704 return baserlib.SubmitJob([op])
705
706 707 -def _ParseInstanceCreateRequestVersion1(data, dry_run):
708 """Parses an instance creation request version 1. 709 710 @rtype: L{opcodes.OpInstanceCreate} 711 @return: Instance creation opcode 712 713 """ 714 # Disks 715 disks_input = baserlib.CheckParameter(data, "disks", exptype=list) 716 717 disks = [] 718 for idx, i in enumerate(disks_input): 719 baserlib.CheckType(i, dict, "Disk %d specification" % idx) 720 721 # Size is mandatory 722 try: 723 size = i[constants.IDISK_SIZE] 724 except KeyError: 725 raise http.HttpBadRequest("Disk %d specification wrong: missing disk" 726 " size" % idx) 727 728 disk = { 729 constants.IDISK_SIZE: size, 730 } 731 732 # Optional disk access mode 733 try: 734 disk_access = i[constants.IDISK_MODE] 735 except KeyError: 736 pass 737 else: 738 disk[constants.IDISK_MODE] = disk_access 739 740 disks.append(disk) 741 742 assert len(disks_input) == len(disks) 743 744 # Network interfaces 745 nics_input = baserlib.CheckParameter(data, "nics", exptype=list) 746 747 nics = [] 748 for idx, i in enumerate(nics_input): 749 baserlib.CheckType(i, dict, "NIC %d specification" % idx) 750 751 nic = {} 752 753 for field in constants.INIC_PARAMS: 754 try: 755 value = i[field] 756 except KeyError: 757 continue 758 759 nic[field] = value 760 761 nics.append(nic) 762 763 assert len(nics_input) == len(nics) 764 765 # HV/BE parameters 766 hvparams = baserlib.CheckParameter(data, "hvparams", default={}) 767 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES) 768 769 beparams = baserlib.CheckParameter(data, "beparams", default={}) 770 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES) 771 772 return opcodes.OpInstanceCreate( 773 mode=baserlib.CheckParameter(data, "mode"), 774 instance_name=baserlib.CheckParameter(data, "name"), 775 os_type=baserlib.CheckParameter(data, "os"), 776 osparams=baserlib.CheckParameter(data, "osparams", default={}), 777 force_variant=baserlib.CheckParameter(data, "force_variant", 778 default=False), 779 no_install=baserlib.CheckParameter(data, "no_install", default=False), 780 pnode=baserlib.CheckParameter(data, "pnode", default=None), 781 snode=baserlib.CheckParameter(data, "snode", default=None), 782 disk_template=baserlib.CheckParameter(data, "disk_template"), 783 disks=disks, 784 nics=nics, 785 src_node=baserlib.CheckParameter(data, "src_node", default=None), 786 src_path=baserlib.CheckParameter(data, "src_path", default=None), 787 start=baserlib.CheckParameter(data, "start", default=True), 788 wait_for_sync=True, 789 ip_check=baserlib.CheckParameter(data, "ip_check", default=True), 790 name_check=baserlib.CheckParameter(data, "name_check", default=True), 791 file_storage_dir=baserlib.CheckParameter(data, "file_storage_dir", 792 default=None), 793 file_driver=baserlib.CheckParameter(data, "file_driver", 794 default=constants.FD_LOOP), 795 source_handshake=baserlib.CheckParameter(data, "source_handshake", 796 default=None), 797 source_x509_ca=baserlib.CheckParameter(data, "source_x509_ca", 798 default=None), 799 source_instance_name=baserlib.CheckParameter(data, "source_instance_name", 800 default=None), 801 iallocator=baserlib.CheckParameter(data, "iallocator", default=None), 802 hypervisor=baserlib.CheckParameter(data, "hypervisor", default=None), 803 hvparams=hvparams, 804 beparams=beparams, 805 dry_run=dry_run, 806 )
807
808 809 -class R_2_instances(baserlib.R_Generic):
810 """/2/instances resource. 811 812 """
813 - def GET(self):
814 """Returns a list of all available instances. 815 816 """ 817 client = baserlib.GetClient() 818 819 use_locking = self.useLocking() 820 if self.useBulk(): 821 bulkdata = client.QueryInstances([], I_FIELDS, use_locking) 822 return baserlib.MapBulkFields(bulkdata, I_FIELDS) 823 else: 824 instancesdata = client.QueryInstances([], ["name"], use_locking) 825 instanceslist = [row[0] for row in instancesdata] 826 return baserlib.BuildUriList(instanceslist, "/2/instances/%s", 827 uri_fields=("id", "uri"))
828
830 """Parses an instance creation request version 0. 831 832 Request data version 0 is deprecated and should not be used anymore. 833 834 @rtype: L{opcodes.OpInstanceCreate} 835 @return: Instance creation opcode 836 837 """ 838 # Do not modify anymore, request data version 0 is deprecated 839 beparams = baserlib.MakeParamsDict(self.request_body, 840 constants.BES_PARAMETERS) 841 hvparams = baserlib.MakeParamsDict(self.request_body, 842 constants.HVS_PARAMETERS) 843 fn = self.getBodyParameter 844 845 # disk processing 846 disk_data = fn('disks') 847 if not isinstance(disk_data, list): 848 raise http.HttpBadRequest("The 'disks' parameter should be a list") 849 disks = [] 850 for idx, d in enumerate(disk_data): 851 if not isinstance(d, int): 852 raise http.HttpBadRequest("Disk %d specification wrong: should" 853 " be an integer" % idx) 854 disks.append({"size": d}) 855 856 # nic processing (one nic only) 857 nics = [{"mac": fn("mac", constants.VALUE_AUTO)}] 858 if fn("ip", None) is not None: 859 nics[0]["ip"] = fn("ip") 860 if fn("mode", None) is not None: 861 nics[0]["mode"] = fn("mode") 862 if fn("link", None) is not None: 863 nics[0]["link"] = fn("link") 864 if fn("bridge", None) is not None: 865 nics[0]["bridge"] = fn("bridge") 866 867 # Do not modify anymore, request data version 0 is deprecated 868 return opcodes.OpInstanceCreate( 869 mode=constants.INSTANCE_CREATE, 870 instance_name=fn('name'), 871 disks=disks, 872 disk_template=fn('disk_template'), 873 os_type=fn('os'), 874 pnode=fn('pnode', None), 875 snode=fn('snode', None), 876 iallocator=fn('iallocator', None), 877 nics=nics, 878 start=fn('start', True), 879 ip_check=fn('ip_check', True), 880 name_check=fn('name_check', True), 881 wait_for_sync=True, 882 hypervisor=fn('hypervisor', None), 883 hvparams=hvparams, 884 beparams=beparams, 885 file_storage_dir=fn('file_storage_dir', None), 886 file_driver=fn('file_driver', constants.FD_LOOP), 887 dry_run=bool(self.dryRun()), 888 )
889
890 - def POST(self):
891 """Create an instance. 892 893 @return: a job id 894 895 """ 896 if not isinstance(self.request_body, dict): 897 raise http.HttpBadRequest("Invalid body contents, not a dictionary") 898 899 # Default to request data version 0 900 data_version = self.getBodyParameter(_REQ_DATA_VERSION, 0) 901 902 if data_version == 0: 903 op = self._ParseVersion0CreateRequest() 904 elif data_version == 1: 905 op = _ParseInstanceCreateRequestVersion1(self.request_body, 906 self.dryRun()) 907 else: 908 raise http.HttpBadRequest("Unsupported request data version %s" % 909 data_version) 910 911 return baserlib.SubmitJob([op])
912
913 914 -class R_2_instances_name(baserlib.R_Generic):
915 """/2/instances/[instance_name] resource. 916 917 """
918 - def GET(self):
919 """Send information about an instance. 920 921 """ 922 client = baserlib.GetClient() 923 instance_name = self.items[0] 924 925 result = baserlib.HandleItemQueryErrors(client.QueryInstances, 926 names=[instance_name], 927 fields=I_FIELDS, 928 use_locking=self.useLocking()) 929 930 return baserlib.MapFields(I_FIELDS, result[0])
931
932 - def DELETE(self):
933 """Delete an instance. 934 935 """ 936 op = opcodes.OpInstanceRemove(instance_name=self.items[0], 937 ignore_failures=False, 938 dry_run=bool(self.dryRun())) 939 return baserlib.SubmitJob([op])
940
941 942 -class R_2_instances_name_info(baserlib.R_Generic):
943 """/2/instances/[instance_name]/info resource. 944 945 """
946 - def GET(self):
947 """Request detailed instance information. 948 949 """ 950 instance_name = self.items[0] 951 static = bool(self._checkIntVariable("static", default=0)) 952 953 op = opcodes.OpInstanceQueryData(instances=[instance_name], 954 static=static) 955 return baserlib.SubmitJob([op])
956
957 958 -class R_2_instances_name_reboot(baserlib.R_Generic):
959 """/2/instances/[instance_name]/reboot resource. 960 961 Implements an instance reboot. 962 963 """
964 - def POST(self):
965 """Reboot an instance. 966 967 The URI takes type=[hard|soft|full] and 968 ignore_secondaries=[False|True] parameters. 969 970 """ 971 instance_name = self.items[0] 972 reboot_type = self.queryargs.get('type', 973 [constants.INSTANCE_REBOOT_HARD])[0] 974 ignore_secondaries = bool(self._checkIntVariable('ignore_secondaries')) 975 op = opcodes.OpInstanceReboot(instance_name=instance_name, 976 reboot_type=reboot_type, 977 ignore_secondaries=ignore_secondaries, 978 dry_run=bool(self.dryRun())) 979 980 return baserlib.SubmitJob([op])
981
982 983 -class R_2_instances_name_startup(baserlib.R_Generic):
984 """/2/instances/[instance_name]/startup resource. 985 986 Implements an instance startup. 987 988 """
989 - def PUT(self):
990 """Startup an instance. 991 992 The URI takes force=[False|True] parameter to start the instance 993 if even if secondary disks are failing. 994 995 """ 996 instance_name = self.items[0] 997 force_startup = bool(self._checkIntVariable('force')) 998 no_remember = bool(self._checkIntVariable('no_remember')) 999 op = opcodes.OpInstanceStartup(instance_name=instance_name, 1000 force=force_startup, 1001 dry_run=bool(self.dryRun()), 1002 no_remember=no_remember) 1003 1004 return baserlib.SubmitJob([op])
1005
1006 1007 -class R_2_instances_name_shutdown(baserlib.R_Generic):
1008 """/2/instances/[instance_name]/shutdown resource. 1009 1010 Implements an instance shutdown. 1011 1012 """
1013 - def PUT(self):
1014 """Shutdown an instance. 1015 1016 """ 1017 instance_name = self.items[0] 1018 no_remember = bool(self._checkIntVariable('no_remember')) 1019 op = opcodes.OpInstanceShutdown(instance_name=instance_name, 1020 dry_run=bool(self.dryRun()), 1021 no_remember=no_remember) 1022 1023 return baserlib.SubmitJob([op])
1024
1025 1026 -def _ParseInstanceReinstallRequest(name, data):
1027 """Parses a request for reinstalling an instance. 1028 1029 """ 1030 if not isinstance(data, dict): 1031 raise http.HttpBadRequest("Invalid body contents, not a dictionary") 1032 1033 ostype = baserlib.CheckParameter(data, "os", default=None) 1034 start = baserlib.CheckParameter(data, "start", exptype=bool, 1035 default=True) 1036 osparams = baserlib.CheckParameter(data, "osparams", default=None) 1037 1038 ops = [ 1039 opcodes.OpInstanceShutdown(instance_name=name), 1040 opcodes.OpInstanceReinstall(instance_name=name, os_type=ostype, 1041 osparams=osparams), 1042 ] 1043 1044 if start: 1045 ops.append(opcodes.OpInstanceStartup(instance_name=name, force=False)) 1046 1047 return ops
1048
1049 1050 -class R_2_instances_name_reinstall(baserlib.R_Generic):
1051 """/2/instances/[instance_name]/reinstall resource. 1052 1053 Implements an instance reinstall. 1054 1055 """
1056 - def POST(self):
1057 """Reinstall an instance. 1058 1059 The URI takes os=name and nostartup=[0|1] optional 1060 parameters. By default, the instance will be started 1061 automatically. 1062 1063 """ 1064 if self.request_body: 1065 if self.queryargs: 1066 raise http.HttpBadRequest("Can't combine query and body parameters") 1067 1068 body = self.request_body 1069 elif self.queryargs: 1070 # Legacy interface, do not modify/extend 1071 body = { 1072 "os": self._checkStringVariable("os"), 1073 "start": not self._checkIntVariable("nostartup"), 1074 } 1075 else: 1076 body = {} 1077 1078 ops = _ParseInstanceReinstallRequest(self.items[0], body) 1079 1080 return baserlib.SubmitJob(ops)
1081
1082 1083 -class R_2_instances_name_replace_disks(baserlib.R_Generic):
1084 """/2/instances/[instance_name]/replace-disks resource. 1085 1086 """
1087 - def POST(self):
1088 """Replaces disks on an instance. 1089 1090 """ 1091 instance_name = self.items[0] 1092 remote_node = self._checkStringVariable("remote_node", default=None) 1093 mode = self._checkStringVariable("mode", default=None) 1094 raw_disks = self._checkStringVariable("disks", default=None) 1095 iallocator = self._checkStringVariable("iallocator", default=None) 1096 1097 if raw_disks: 1098 try: 1099 disks = [int(part) for part in raw_disks.split(",")] 1100 except ValueError, err: 1101 raise http.HttpBadRequest("Invalid disk index passed: %s" % str(err)) 1102 else: 1103 disks = [] 1104 1105 op = opcodes.OpInstanceReplaceDisks(instance_name=instance_name, 1106 remote_node=remote_node, 1107 mode=mode, 1108 disks=disks, 1109 iallocator=iallocator) 1110 1111 return baserlib.SubmitJob([op])
1112
1113 1114 -class R_2_instances_name_activate_disks(baserlib.R_Generic):
1115 """/2/instances/[instance_name]/activate-disks resource. 1116 1117 """
1118 - def PUT(self):
1119 """Activate disks for an instance. 1120 1121 The URI might contain ignore_size to ignore current recorded size. 1122 1123 """ 1124 instance_name = self.items[0] 1125 ignore_size = bool(self._checkIntVariable('ignore_size')) 1126 1127 op = opcodes.OpInstanceActivateDisks(instance_name=instance_name, 1128 ignore_size=ignore_size) 1129 1130 return baserlib.SubmitJob([op])
1131
1132 1133 -class R_2_instances_name_deactivate_disks(baserlib.R_Generic):
1134 """/2/instances/[instance_name]/deactivate-disks resource. 1135 1136 """
1137 - def PUT(self):
1138 """Deactivate disks for an instance. 1139 1140 """ 1141 instance_name = self.items[0] 1142 1143 op = opcodes.OpInstanceDeactivateDisks(instance_name=instance_name) 1144 1145 return baserlib.SubmitJob([op])
1146
1147 1148 -class R_2_instances_name_prepare_export(baserlib.R_Generic):
1149 """/2/instances/[instance_name]/prepare-export resource. 1150 1151 """
1152 - def PUT(self):
1153 """Prepares an export for an instance. 1154 1155 @return: a job id 1156 1157 """ 1158 instance_name = self.items[0] 1159 mode = self._checkStringVariable("mode") 1160 1161 op = opcodes.OpBackupPrepare(instance_name=instance_name, 1162 mode=mode) 1163 1164 return baserlib.SubmitJob([op])
1165
1166 1167 -def _ParseExportInstanceRequest(name, data):
1168 """Parses a request for an instance export. 1169 1170 @rtype: L{opcodes.OpBackupExport} 1171 @return: Instance export opcode 1172 1173 """ 1174 mode = baserlib.CheckParameter(data, "mode", 1175 default=constants.EXPORT_MODE_LOCAL) 1176 target_node = baserlib.CheckParameter(data, "destination") 1177 shutdown = baserlib.CheckParameter(data, "shutdown", exptype=bool) 1178 remove_instance = baserlib.CheckParameter(data, "remove_instance", 1179 exptype=bool, default=False) 1180 x509_key_name = baserlib.CheckParameter(data, "x509_key_name", default=None) 1181 destination_x509_ca = baserlib.CheckParameter(data, "destination_x509_ca", 1182 default=None) 1183 1184 return opcodes.OpBackupExport(instance_name=name, 1185 mode=mode, 1186 target_node=target_node, 1187 shutdown=shutdown, 1188 remove_instance=remove_instance, 1189 x509_key_name=x509_key_name, 1190 destination_x509_ca=destination_x509_ca)
1191
1192 1193 -class R_2_instances_name_export(baserlib.R_Generic):
1194 """/2/instances/[instance_name]/export resource. 1195 1196 """
1197 - def PUT(self):
1198 """Exports an instance. 1199 1200 @return: a job id 1201 1202 """ 1203 if not isinstance(self.request_body, dict): 1204 raise http.HttpBadRequest("Invalid body contents, not a dictionary") 1205 1206 op = _ParseExportInstanceRequest(self.items[0], self.request_body) 1207 1208 return baserlib.SubmitJob([op])
1209
1210 1211 -def _ParseMigrateInstanceRequest(name, data):
1212 """Parses a request for an instance migration. 1213 1214 @rtype: L{opcodes.OpInstanceMigrate} 1215 @return: Instance migration opcode 1216 1217 """ 1218 mode = baserlib.CheckParameter(data, "mode", default=None) 1219 cleanup = baserlib.CheckParameter(data, "cleanup", exptype=bool, 1220 default=False) 1221 1222 return opcodes.OpInstanceMigrate(instance_name=name, mode=mode, 1223 cleanup=cleanup)
1224
1225 1226 -class R_2_instances_name_migrate(baserlib.R_Generic):
1227 """/2/instances/[instance_name]/migrate resource. 1228 1229 """
1230 - def PUT(self):
1231 """Migrates an instance. 1232 1233 @return: a job id 1234 1235 """ 1236 baserlib.CheckType(self.request_body, dict, "Body contents") 1237 1238 op = _ParseMigrateInstanceRequest(self.items[0], self.request_body) 1239 1240 return baserlib.SubmitJob([op])
1241
1242 1243 -def _ParseRenameInstanceRequest(name, data):
1244 """Parses a request for renaming an instance. 1245 1246 @rtype: L{opcodes.OpInstanceRename} 1247 @return: Instance rename opcode 1248 1249 """ 1250 new_name = baserlib.CheckParameter(data, "new_name") 1251 ip_check = baserlib.CheckParameter(data, "ip_check", default=True) 1252 name_check = baserlib.CheckParameter(data, "name_check", default=True) 1253 1254 return opcodes.OpInstanceRename(instance_name=name, new_name=new_name, 1255 name_check=name_check, ip_check=ip_check)
1256
1257 1258 -class R_2_instances_name_rename(baserlib.R_Generic):
1259 """/2/instances/[instance_name]/rename resource. 1260 1261 """
1262 - def PUT(self):
1263 """Changes the name of an instance. 1264 1265 @return: a job id 1266 1267 """ 1268 baserlib.CheckType(self.request_body, dict, "Body contents") 1269 1270 op = _ParseRenameInstanceRequest(self.items[0], self.request_body) 1271 1272 return baserlib.SubmitJob([op])
1273
1274 1275 -def _ParseModifyInstanceRequest(name, data):
1276 """Parses a request for modifying an instance. 1277 1278 @rtype: L{opcodes.OpInstanceSetParams} 1279 @return: Instance modify opcode 1280 1281 """ 1282 osparams = baserlib.CheckParameter(data, "osparams", default={}) 1283 force = baserlib.CheckParameter(data, "force", default=False) 1284 nics = baserlib.CheckParameter(data, "nics", default=[]) 1285 disks = baserlib.CheckParameter(data, "disks", default=[]) 1286 disk_template = baserlib.CheckParameter(data, "disk_template", default=None) 1287 remote_node = baserlib.CheckParameter(data, "remote_node", default=None) 1288 os_name = baserlib.CheckParameter(data, "os_name", default=None) 1289 force_variant = baserlib.CheckParameter(data, "force_variant", default=False) 1290 1291 # HV/BE parameters 1292 hvparams = baserlib.CheckParameter(data, "hvparams", default={}) 1293 utils.ForceDictType(hvparams, constants.HVS_PARAMETER_TYPES, 1294 allowed_values=[constants.VALUE_DEFAULT]) 1295 1296 beparams = baserlib.CheckParameter(data, "beparams", default={}) 1297 utils.ForceDictType(beparams, constants.BES_PARAMETER_TYPES, 1298 allowed_values=[constants.VALUE_DEFAULT]) 1299 1300 return opcodes.OpInstanceSetParams(instance_name=name, hvparams=hvparams, 1301 beparams=beparams, osparams=osparams, 1302 force=force, nics=nics, disks=disks, 1303 disk_template=disk_template, 1304 remote_node=remote_node, os_name=os_name, 1305 force_variant=force_variant)
1306
1307 1308 -class R_2_instances_name_modify(baserlib.R_Generic):
1309 """/2/instances/[instance_name]/modify resource. 1310 1311 """
1312 - def PUT(self):
1313 """Changes some parameters of an instance. 1314 1315 @return: a job id 1316 1317 """ 1318 baserlib.CheckType(self.request_body, dict, "Body contents") 1319 1320 op = _ParseModifyInstanceRequest(self.items[0], self.request_body) 1321 1322 return baserlib.SubmitJob([op])
1323
1324 1325 -class R_2_instances_name_disk_grow(baserlib.R_Generic):
1326 """/2/instances/[instance_name]/disk/[disk_index]/grow resource. 1327 1328 """
1329 - def POST(self):
1330 """Increases the size of an instance disk. 1331 1332 @return: a job id 1333 1334 """ 1335 op = baserlib.FillOpcode(opcodes.OpInstanceGrowDisk, self.request_body, { 1336 "instance_name": self.items[0], 1337 "disk": int(self.items[1]), 1338 }) 1339 1340 return baserlib.SubmitJob([op])
1341
1342 1343 -class R_2_instances_name_console(baserlib.R_Generic):
1344 """/2/instances/[instance_name]/console resource. 1345 1346 """ 1347 GET_ACCESS = [rapi.RAPI_ACCESS_WRITE] 1348
1349 - def GET(self):
1350 """Request information for connecting to instance's console. 1351 1352 @return: Serialized instance console description, see 1353 L{objects.InstanceConsole} 1354 1355 """ 1356 client = baserlib.GetClient() 1357 1358 ((console, ), ) = client.QueryInstances([self.items[0]], ["console"], False) 1359 1360 if console is None: 1361 raise http.HttpServiceUnavailable("Instance console unavailable") 1362 1363 assert isinstance(console, dict) 1364 return console
1365
1366 1367 -class _R_Tags(baserlib.R_Generic):
1368 """ Quasiclass for tagging resources 1369 1370 Manages tags. When inheriting this class you must define the 1371 TAG_LEVEL for it. 1372 1373 """ 1374 TAG_LEVEL = None 1375
1376 - def __init__(self, items, queryargs, req):
1377 """A tag resource constructor. 1378 1379 We have to override the default to sort out cluster naming case. 1380 1381 """ 1382 baserlib.R_Generic.__init__(self, items, queryargs, req) 1383 1384 if self.TAG_LEVEL == constants.TAG_CLUSTER: 1385 self.name = None 1386 else: 1387 self.name = items[0]
1388
1389 - def GET(self):
1390 """Returns a list of tags. 1391 1392 Example: ["tag1", "tag2", "tag3"] 1393 1394 """ 1395 # pylint: disable-msg=W0212 1396 return baserlib._Tags_GET(self.TAG_LEVEL, name=self.name)
1397
1398 - def PUT(self):
1399 """Add a set of tags. 1400 1401 The request as a list of strings should be PUT to this URI. And 1402 you'll have back a job id. 1403 1404 """ 1405 # pylint: disable-msg=W0212 1406 if 'tag' not in self.queryargs: 1407 raise http.HttpBadRequest("Please specify tag(s) to add using the" 1408 " the 'tag' parameter") 1409 return baserlib._Tags_PUT(self.TAG_LEVEL, 1410 self.queryargs['tag'], name=self.name, 1411 dry_run=bool(self.dryRun()))
1412
1413 - def DELETE(self):
1414 """Delete a tag. 1415 1416 In order to delete a set of tags, the DELETE 1417 request should be addressed to URI like: 1418 /tags?tag=[tag]&tag=[tag] 1419 1420 """ 1421 # pylint: disable-msg=W0212 1422 if 'tag' not in self.queryargs: 1423 # no we not gonna delete all tags 1424 raise http.HttpBadRequest("Cannot delete all tags - please specify" 1425 " tag(s) using the 'tag' parameter") 1426 return baserlib._Tags_DELETE(self.TAG_LEVEL, 1427 self.queryargs['tag'], 1428 name=self.name, 1429 dry_run=bool(self.dryRun()))
1430
1431 1432 -class R_2_instances_name_tags(_R_Tags):
1433 """ /2/instances/[instance_name]/tags resource. 1434 1435 Manages per-instance tags. 1436 1437 """ 1438 TAG_LEVEL = constants.TAG_INSTANCE
1439
1440 1441 -class R_2_nodes_name_tags(_R_Tags):
1442 """ /2/nodes/[node_name]/tags resource. 1443 1444 Manages per-node tags. 1445 1446 """ 1447 TAG_LEVEL = constants.TAG_NODE
1448
1449 1450 -class R_2_tags(_R_Tags):
1451 """ /2/tags resource. 1452 1453 Manages cluster tags. 1454 1455 """ 1456 TAG_LEVEL = constants.TAG_CLUSTER
1457