Package ganeti :: Package tools :: Module burnin
[hide private]
[frames] | no frames]

Source Code for Module ganeti.tools.burnin

   1  #!/usr/bin/python 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 Google Inc. 
   5  # All rights reserved. 
   6  # 
   7  # Redistribution and use in source and binary forms, with or without 
   8  # modification, are permitted provided that the following conditions are 
   9  # met: 
  10  # 
  11  # 1. Redistributions of source code must retain the above copyright notice, 
  12  # this list of conditions and the following disclaimer. 
  13  # 
  14  # 2. Redistributions in binary form must reproduce the above copyright 
  15  # notice, this list of conditions and the following disclaimer in the 
  16  # documentation and/or other materials provided with the distribution. 
  17  # 
  18  # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS 
  19  # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 
  20  # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  21  # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 
  22  # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  23  # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  24  # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  25  # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 
  26  # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 
  27  # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 
  28  # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
  29   
  30   
  31  """Burnin program 
  32   
  33  """ 
  34   
  35  import sys 
  36  import optparse 
  37  import time 
  38  import socket 
  39  import urllib 
  40  import random 
  41  import string # pylint: disable=W0402 
  42  from itertools import izip, islice, cycle 
  43  from cStringIO import StringIO 
  44  from operator import or_ 
  45   
  46  from ganeti import opcodes 
  47  from ganeti import constants 
  48  from ganeti import cli 
  49  from ganeti import errors 
  50  from ganeti import utils 
  51  from ganeti import hypervisor 
  52  from ganeti import compat 
  53  from ganeti import pathutils 
  54   
  55  from ganeti.confd import client as confd_client 
  56  from ganeti.runtime import (GetClient) 
  57   
  58   
  59  USAGE = ("\tburnin -o OS_NAME [options...] instance_name ...") 
  60   
  61  MAX_RETRIES = 3 
  62  LOG_HEADERS = { 
  63    0: "- ", 
  64    1: "* ", 
  65    2: "", 
  66    } 
  67   
  68  #: Disk templates supporting a single node 
  69  _SINGLE_NODE_DISK_TEMPLATES = compat.UniqueFrozenset([ 
  70    constants.DT_DISKLESS, 
  71    constants.DT_PLAIN, 
  72    constants.DT_FILE, 
  73    constants.DT_SHARED_FILE, 
  74    constants.DT_EXT, 
  75    constants.DT_RBD, 
  76    constants.DT_GLUSTER 
  77    ]) 
  78   
  79  _SUPPORTED_DISK_TEMPLATES = compat.UniqueFrozenset([ 
  80    constants.DT_DISKLESS, 
  81    constants.DT_DRBD8, 
  82    constants.DT_EXT, 
  83    constants.DT_FILE, 
  84    constants.DT_PLAIN, 
  85    constants.DT_RBD, 
  86    constants.DT_SHARED_FILE, 
  87    constants.DT_GLUSTER 
  88    ]) 
  89   
  90  #: Disk templates for which import/export is tested 
  91  _IMPEXP_DISK_TEMPLATES = (_SUPPORTED_DISK_TEMPLATES - frozenset([ 
  92    constants.DT_DISKLESS, 
  93    constants.DT_FILE, 
  94    constants.DT_SHARED_FILE, 
  95    constants.DT_GLUSTER 
  96    ])) 
97 98 99 -class InstanceDown(Exception):
100 """The checked instance was not up"""
101
102 103 -class BurninFailure(Exception):
104 """Failure detected during burning"""
105
106 107 -def Usage():
108 """Shows program usage information and exits the program.""" 109 110 print >> sys.stderr, "Usage:" 111 print >> sys.stderr, USAGE 112 sys.exit(2)
113
114 115 -def Log(msg, *args, **kwargs):
116 """Simple function that prints out its argument. 117 118 """ 119 if args: 120 msg = msg % args 121 indent = kwargs.get("indent", 0) 122 sys.stdout.write("%*s%s%s\n" % (2 * indent, "", 123 LOG_HEADERS.get(indent, " "), msg)) 124 sys.stdout.flush()
125
126 127 -def Err(msg, exit_code=1):
128 """Simple error logging that prints to stderr. 129 130 """ 131 sys.stderr.write(msg + "\n") 132 sys.stderr.flush() 133 sys.exit(exit_code)
134
135 136 -def RandomString(size=8, chars=string.ascii_uppercase + string.digits):
137 return ''.join(random.choice(chars) for x in range(size))
138
139 140 -class SimpleOpener(urllib.FancyURLopener):
141 """A simple url opener""" 142 # pylint: disable=W0221 143
144 - def prompt_user_passwd(self, host, realm, clear_cache=0):
145 """No-interaction version of prompt_user_passwd.""" 146 # we follow parent class' API 147 # pylint: disable=W0613 148 return None, None
149
150 - def http_error_default(self, url, fp, errcode, errmsg, headers):
151 """Custom error handling""" 152 # make sure sockets are not left in CLOSE_WAIT, this is similar 153 # but with a different exception to the BasicURLOpener class 154 _ = fp.read() # throw away data 155 fp.close() 156 raise InstanceDown("HTTP error returned: code %s, msg %s" % 157 (errcode, errmsg))
158 159 160 OPTIONS = [ 161 cli.cli_option("-o", "--os", dest="os", default=None, 162 help="OS to use during burnin", 163 metavar="<OS>", 164 completion_suggest=cli.OPT_COMPL_ONE_OS), 165 cli.HYPERVISOR_OPT, 166 cli.OSPARAMS_OPT, 167 cli.cli_option("--disk-size", dest="disk_size", 168 help="Disk size (determines disk count)", 169 default="1G", type="string", metavar="<size,size,...>", 170 completion_suggest=("512M 1G 4G 1G,256M" 171 " 4G,1G,1G 10G").split()), 172 cli.cli_option("--disk-growth", dest="disk_growth", help="Disk growth", 173 default="128m", type="string", metavar="<size,size,...>"), 174 cli.cli_option("--mem-size", dest="mem_size", help="Memory size", 175 default=None, type="unit", metavar="<size>", 176 completion_suggest=("128M 256M 512M 1G 4G 8G" 177 " 12G 16G").split()), 178 cli.cli_option("--maxmem-size", dest="maxmem_size", help="Max Memory size", 179 default=256, type="unit", metavar="<size>", 180 completion_suggest=("128M 256M 512M 1G 4G 8G" 181 " 12G 16G").split()), 182 cli.cli_option("--minmem-size", dest="minmem_size", help="Min Memory size", 183 default=128, type="unit", metavar="<size>", 184 completion_suggest=("128M 256M 512M 1G 4G 8G" 185 " 12G 16G").split()), 186 cli.cli_option("--vcpu-count", dest="vcpu_count", help="VCPU count", 187 default=3, type="unit", metavar="<count>", 188 completion_suggest=("1 2 3 4").split()), 189 cli.DEBUG_OPT, 190 cli.VERBOSE_OPT, 191 cli.NOIPCHECK_OPT, 192 cli.NONAMECHECK_OPT, 193 cli.EARLY_RELEASE_OPT, 194 cli.cli_option("--no-replace1", dest="do_replace1", 195 help="Skip disk replacement with the same secondary", 196 action="store_false", default=True), 197 cli.cli_option("--no-replace2", dest="do_replace2", 198 help="Skip disk replacement with a different secondary", 199 action="store_false", default=True), 200 cli.cli_option("--no-failover", dest="do_failover", 201 help="Skip instance failovers", action="store_false", 202 default=True), 203 cli.cli_option("--no-migrate", dest="do_migrate", 204 help="Skip instance live migration", 205 action="store_false", default=True), 206 cli.cli_option("--no-move", dest="do_move", 207 help="Skip instance moves", action="store_false", 208 default=True), 209 cli.cli_option("--no-importexport", dest="do_importexport", 210 help="Skip instance export/import", action="store_false", 211 default=True), 212 cli.cli_option("--no-startstop", dest="do_startstop", 213 help="Skip instance stop/start", action="store_false", 214 default=True), 215 cli.cli_option("--no-reinstall", dest="do_reinstall", 216 help="Skip instance reinstall", action="store_false", 217 default=True), 218 cli.cli_option("--no-reboot", dest="do_reboot", 219 help="Skip instance reboot", action="store_false", 220 default=True), 221 cli.cli_option("--no-renamesame", dest="do_renamesame", 222 help="Skip instance rename to same name", action="store_false", 223 default=True), 224 cli.cli_option("--reboot-types", dest="reboot_types", 225 help="Specify the reboot types", default=None), 226 cli.cli_option("--no-activate-disks", dest="do_activate_disks", 227 help="Skip disk activation/deactivation", 228 action="store_false", default=True), 229 cli.cli_option("--no-add-disks", dest="do_addremove_disks", 230 help="Skip disk addition/removal", 231 action="store_false", default=True), 232 cli.cli_option("--no-add-nics", dest="do_addremove_nics", 233 help="Skip NIC addition/removal", 234 action="store_false", default=True), 235 cli.cli_option("--no-nics", dest="nics", 236 help="No network interfaces", action="store_const", 237 const=[], default=[{}]), 238 cli.cli_option("--no-confd", dest="do_confd_tests", 239 help="Skip confd queries", 240 action="store_false", default=True), 241 cli.cli_option("--rename", dest="rename", default=None, 242 help=("Give one unused instance name which is taken" 243 " to start the renaming sequence"), 244 metavar="<instance_name>"), 245 cli.cli_option("-t", "--disk-template", dest="disk_template", 246 choices=list(_SUPPORTED_DISK_TEMPLATES), 247 default=constants.DT_DRBD8, 248 help=("Disk template (default %s, otherwise one of %s)" % 249 (constants.DT_DRBD8, 250 utils.CommaJoin(_SUPPORTED_DISK_TEMPLATES)))), 251 cli.cli_option("-n", "--nodes", dest="nodes", default="", 252 help=("Comma separated list of nodes to perform" 253 " the burnin on (defaults to all nodes)"), 254 completion_suggest=cli.OPT_COMPL_MANY_NODES), 255 cli.cli_option("-I", "--iallocator", dest="iallocator", 256 default=None, type="string", 257 help=("Perform the allocation using an iallocator" 258 " instead of fixed node spread (node restrictions no" 259 " longer apply, therefore -n/--nodes must not be" 260 " used"), 261 completion_suggest=cli.OPT_COMPL_ONE_IALLOCATOR), 262 cli.cli_option("-p", "--parallel", default=False, action="store_true", 263 dest="parallel", 264 help=("Enable parallelization of some operations in" 265 " order to speed burnin or to test granular locking")), 266 cli.cli_option("--net-timeout", default=15, type="int", 267 dest="net_timeout", 268 help=("The instance check network timeout in seconds" 269 " (defaults to 15 seconds)"), 270 completion_suggest="15 60 300 900".split()), 271 cli.cli_option("-C", "--http-check", default=False, action="store_true", 272 dest="http_check", 273 help=("Enable checking of instance status via http," 274 " looking for /hostname.txt that should contain the" 275 " name of the instance")), 276 cli.cli_option("-K", "--keep-instances", default=False, 277 action="store_true", 278 dest="keep_instances", 279 help=("Leave instances on the cluster after burnin," 280 " for investigation in case of errors or simply" 281 " to use them")), 282 cli.REASON_OPT, 283 ] 284 285 # Mainly used for bash completion 286 ARGUMENTS = [cli.ArgInstance(min=1)]
287 288 289 -def _DoCheckInstances(fn):
290 """Decorator for checking instances. 291 292 """ 293 def wrapper(self, *args, **kwargs): 294 val = fn(self, *args, **kwargs) 295 for instance in self.instances: 296 self._CheckInstanceAlive(instance) # pylint: disable=W0212 297 return val
298 299 return wrapper 300
301 302 -def _DoBatch(retry):
303 """Decorator for possible batch operations. 304 305 Must come after the _DoCheckInstances decorator (if any). 306 307 @param retry: whether this is a retryable batch, will be 308 passed to StartBatch 309 310 """ 311 def wrap(fn): 312 def batched(self, *args, **kwargs): 313 self.StartBatch(retry) 314 val = fn(self, *args, **kwargs) 315 self.CommitQueue() 316 return val
317 return batched 318 319 return wrap 320
321 322 -class FeedbackAccumulator(object):
323 """Feedback accumulator class.""" 324 325 _feed_buf = StringIO() 326 opts = None 327
328 - def ClearFeedbackBuf(self):
329 """Clear the feedback buffer.""" 330 self._feed_buf.truncate(0)
331
332 - def GetFeedbackBuf(self):
333 """Return the contents of the buffer.""" 334 return self._feed_buf.getvalue()
335
336 - def Feedback(self, msg):
337 """Acumulate feedback in our buffer.""" 338 formatted_msg = "%s %s" % (time.ctime(utils.MergeTime(msg[0])), msg[2]) 339 self._feed_buf.write(formatted_msg + "\n") 340 if self.opts.verbose: 341 Log(formatted_msg, indent=3)
342
343 344 -class JobHandler(FeedbackAccumulator):
345 """Class for handling Ganeti jobs.""" 346 347 queued_ops = [] 348 queue_retry = False 349
350 - def __init__(self):
351 self.cl = cli.GetClient()
352
353 - def MaybeRetry(self, retry_count, msg, fn, *args):
354 """Possibly retry a given function execution. 355 356 @type retry_count: int 357 @param retry_count: retry counter: 358 - 0: non-retryable action 359 - 1: last retry for a retryable action 360 - MAX_RETRIES: original try for a retryable action 361 @type msg: str 362 @param msg: the kind of the operation 363 @type fn: callable 364 @param fn: the function to be called 365 366 """ 367 try: 368 val = fn(*args) 369 if retry_count > 0 and retry_count < MAX_RETRIES: 370 Log("Idempotent %s succeeded after %d retries", 371 msg, MAX_RETRIES - retry_count) 372 return val 373 except Exception, err: # pylint: disable=W0703 374 if retry_count == 0: 375 Log("Non-idempotent %s failed, aborting", msg) 376 raise 377 elif retry_count == 1: 378 Log("Idempotent %s repeated failure, aborting", msg) 379 raise 380 else: 381 Log("Idempotent %s failed, retry #%d/%d: %s", 382 msg, MAX_RETRIES - retry_count + 1, MAX_RETRIES, err) 383 self.MaybeRetry(retry_count - 1, msg, fn, *args)
384
385 - def _ExecOp(self, *ops):
386 """Execute one or more opcodes and manage the exec buffer. 387 388 @return: if only opcode has been passed, we return its result; 389 otherwise we return the list of results 390 391 """ 392 job_id = cli.SendJob(ops, cl=self.cl) 393 results = cli.PollJob(job_id, cl=self.cl, feedback_fn=self.Feedback) 394 if len(ops) == 1: 395 return results[0] 396 else: 397 return results
398
399 - def ExecOp(self, retry, *ops):
400 """Execute one or more opcodes and manage the exec buffer. 401 402 @return: if only opcode has been passed, we return its result; 403 otherwise we return the list of results 404 405 """ 406 if retry: 407 rval = MAX_RETRIES 408 else: 409 rval = 0 410 cli.SetGenericOpcodeOpts(ops, self.opts) 411 return self.MaybeRetry(rval, "opcode", self._ExecOp, *ops)
412
413 - def ExecOrQueue(self, name, ops, post_process=None):
414 """Execute an opcode and manage the exec buffer.""" 415 if self.opts.parallel: 416 cli.SetGenericOpcodeOpts(ops, self.opts) 417 self.queued_ops.append((ops, name, post_process)) 418 else: 419 val = self.ExecOp(self.queue_retry, *ops) # pylint: disable=W0142 420 if post_process is not None: 421 post_process() 422 return val
423
424 - def StartBatch(self, retry):
425 """Start a new batch of jobs. 426 427 @param retry: whether this is a retryable batch 428 429 """ 430 self.queued_ops = [] 431 self.queue_retry = retry
432
433 - def CommitQueue(self):
434 """Execute all submitted opcodes in case of parallel burnin""" 435 if not self.opts.parallel or not self.queued_ops: 436 return 437 438 if self.queue_retry: 439 rval = MAX_RETRIES 440 else: 441 rval = 0 442 443 try: 444 results = self.MaybeRetry(rval, "jobset", self.ExecJobSet, 445 self.queued_ops) 446 finally: 447 self.queued_ops = [] 448 return results
449
450 - def ExecJobSet(self, jobs):
451 """Execute a set of jobs and return once all are done. 452 453 The method will return the list of results, if all jobs are 454 successful. Otherwise, OpExecError will be raised from within 455 cli.py. 456 457 """ 458 self.ClearFeedbackBuf() 459 jex = cli.JobExecutor(cl=self.cl, feedback_fn=self.Feedback) 460 for ops, name, _ in jobs: 461 jex.QueueJob(name, *ops) # pylint: disable=W0142 462 try: 463 results = jex.GetResults() 464 except Exception, err: # pylint: disable=W0703 465 Log("Jobs failed: %s", err) 466 raise BurninFailure() 467 468 fail = False 469 val = [] 470 for (_, name, post_process), (success, result) in zip(jobs, results): 471 if success: 472 if post_process: 473 try: 474 post_process() 475 except Exception, err: # pylint: disable=W0703 476 Log("Post process call for job %s failed: %s", name, err) 477 fail = True 478 val.append(result) 479 else: 480 fail = True 481 482 if fail: 483 raise BurninFailure() 484 485 return val
486
487 488 -class Burner(JobHandler):
489 """Burner class.""" 490
491 - def __init__(self):
492 """Constructor.""" 493 super(Burner, self).__init__() 494 495 self.url_opener = SimpleOpener() 496 self.nodes = [] 497 self.instances = [] 498 self.to_rem = [] 499 self.disk_count = self.disk_growth = self.disk_size = None 500 self.hvp = self.bep = None 501 self.ParseOptions() 502 self.disk_nodes = {} 503 self.instance_nodes = {} 504 self.GetState() 505 self.confd_reply = None
506
507 - def ParseOptions(self):
508 """Parses the command line options. 509 510 In case of command line errors, it will show the usage and exit the 511 program. 512 513 """ 514 parser = optparse.OptionParser(usage="\n%s" % USAGE, 515 version=("%%prog (ganeti) %s" % 516 constants.RELEASE_VERSION), 517 option_list=OPTIONS) 518 519 options, args = parser.parse_args() 520 if len(args) < 1 or options.os is None: 521 Usage() 522 523 if options.mem_size: 524 options.maxmem_size = options.mem_size 525 options.minmem_size = options.mem_size 526 elif options.minmem_size > options.maxmem_size: 527 Err("Maximum memory lower than minimum memory") 528 529 if options.disk_template not in _SUPPORTED_DISK_TEMPLATES: 530 Err("Unknown or unsupported disk template '%s'" % options.disk_template) 531 532 if options.disk_template == constants.DT_DISKLESS: 533 disk_size = disk_growth = [] 534 options.do_addremove_disks = False 535 else: 536 disk_size = [utils.ParseUnit(v) for v in options.disk_size.split(",")] 537 disk_growth = [utils.ParseUnit(v) 538 for v in options.disk_growth.split(",")] 539 if len(disk_growth) != len(disk_size): 540 Err("Wrong disk sizes/growth combination") 541 if ((disk_size and options.disk_template == constants.DT_DISKLESS) or 542 (not disk_size and options.disk_template != constants.DT_DISKLESS)): 543 Err("Wrong disk count/disk template combination") 544 545 self.disk_size = disk_size 546 self.disk_growth = disk_growth 547 self.disk_count = len(disk_size) 548 549 if options.nodes and options.iallocator: 550 Err("Give either the nodes option or the iallocator option, not both") 551 552 if options.http_check and not options.name_check: 553 Err("Can't enable HTTP checks without name checks") 554 555 self.opts = options 556 self.instances = args 557 self.bep = { 558 constants.BE_MINMEM: options.minmem_size, 559 constants.BE_MAXMEM: options.maxmem_size, 560 constants.BE_VCPUS: options.vcpu_count, 561 } 562 563 self.hypervisor = None 564 self.hvp = {} 565 if options.hypervisor: 566 self.hypervisor, self.hvp = options.hypervisor 567 568 if options.reboot_types is None: 569 options.reboot_types = constants.REBOOT_TYPES 570 else: 571 options.reboot_types = options.reboot_types.split(",") 572 rt_diff = set(options.reboot_types).difference(constants.REBOOT_TYPES) 573 if rt_diff: 574 Err("Invalid reboot types specified: %s" % utils.CommaJoin(rt_diff)) 575 576 socket.setdefaulttimeout(options.net_timeout)
577
578 - def GetState(self):
579 """Read the cluster state from the master daemon.""" 580 if self.opts.nodes: 581 names = self.opts.nodes.split(",") 582 else: 583 names = [] 584 try: 585 qcl = GetClient() 586 result = qcl.QueryNodes(names, ["name", "offline", "drained"], False) 587 except errors.GenericError, err: 588 err_code, msg = cli.FormatError(err) 589 Err(msg, exit_code=err_code) 590 finally: 591 qcl.Close() 592 self.nodes = [data[0] for data in result if not (data[1] or data[2])] 593 594 op_diagnose = opcodes.OpOsDiagnose(output_fields=["name", 595 "variants", 596 "hidden"], 597 names=[]) 598 result = self.ExecOp(True, op_diagnose) 599 600 if not result: 601 Err("Can't get the OS list") 602 603 found = False 604 for (name, variants, _) in result: 605 if self.opts.os in cli.CalculateOSNames(name, variants): 606 found = True 607 break 608 609 if not found: 610 Err("OS '%s' not found" % self.opts.os) 611 612 cluster_info = self.cl.QueryClusterInfo() 613 self.cluster_info = cluster_info 614 if not self.cluster_info: 615 Err("Can't get cluster info") 616 617 default_nic_params = self.cluster_info["nicparams"][constants.PP_DEFAULT] 618 self.cluster_default_nicparams = default_nic_params 619 if self.hypervisor is None: 620 self.hypervisor = self.cluster_info["default_hypervisor"] 621 self.hv_can_migrate = \ 622 hypervisor.GetHypervisorClass(self.hypervisor).CAN_MIGRATE
623
624 - def FindMatchingDisk(self, instance):
625 """Find a disk whose nodes match the instance's disk nodes.""" 626 instance_nodes = self.instance_nodes[instance] 627 for disk, disk_nodes in self.disk_nodes.iteritems(): 628 if instance_nodes == disk_nodes: 629 # Erase that disk from the dictionary so that we don't pick it again. 630 del self.disk_nodes[disk] 631 return disk 632 Err("Couldn't find matching detached disk for instance %s" % instance)
633 634 @_DoCheckInstances 635 @_DoBatch(False)
636 - def BurnCreateInstances(self):
637 """Create the given instances. 638 639 """ 640 self.to_rem = [] 641 mytor = izip(cycle(self.nodes), 642 islice(cycle(self.nodes), 1, None), 643 self.instances) 644 645 Log("Creating instances") 646 for pnode, snode, instance in mytor: 647 Log("instance %s", instance, indent=1) 648 if self.opts.iallocator: 649 pnode = snode = None 650 msg = "with iallocator %s" % self.opts.iallocator 651 elif self.opts.disk_template not in constants.DTS_INT_MIRROR: 652 snode = None 653 msg = "on %s" % pnode 654 else: 655 msg = "on %s, %s" % (pnode, snode) 656 657 Log(msg, indent=2) 658 659 op = opcodes.OpInstanceCreate(instance_name=instance, 660 disks=[{"size": size} 661 for size in self.disk_size], 662 disk_template=self.opts.disk_template, 663 nics=self.opts.nics, 664 mode=constants.INSTANCE_CREATE, 665 os_type=self.opts.os, 666 pnode=pnode, 667 snode=snode, 668 start=True, 669 ip_check=self.opts.ip_check, 670 name_check=self.opts.name_check, 671 wait_for_sync=True, 672 file_driver="loop", 673 file_storage_dir=None, 674 iallocator=self.opts.iallocator, 675 beparams=self.bep, 676 hvparams=self.hvp, 677 hypervisor=self.hypervisor, 678 osparams=self.opts.osparams, 679 ) 680 remove_instance = lambda name: lambda: self.to_rem.append(name) 681 self.ExecOrQueue(instance, [op], post_process=remove_instance(instance))
682 683 @_DoBatch(False)
684 - def BurnModifyRuntimeMemory(self):
685 """Alter the runtime memory.""" 686 Log("Setting instance runtime memory") 687 for instance in self.instances: 688 Log("instance %s", instance, indent=1) 689 tgt_mem = self.bep[constants.BE_MINMEM] 690 op = opcodes.OpInstanceSetParams(instance_name=instance, 691 runtime_mem=tgt_mem) 692 Log("Set memory to %s MB", tgt_mem, indent=2) 693 self.ExecOrQueue(instance, [op])
694 695 @_DoBatch(False)
696 - def BurnGrowDisks(self):
697 """Grow both the os and the swap disks by the requested amount, if any.""" 698 Log("Growing disks") 699 for instance in self.instances: 700 Log("instance %s", instance, indent=1) 701 for idx, growth in enumerate(self.disk_growth): 702 if growth > 0: 703 op = opcodes.OpInstanceGrowDisk(instance_name=instance, disk=idx, 704 amount=growth, wait_for_sync=True, 705 ignore_ipolicy=True) 706 Log("increase disk/%s by %s MB", idx, growth, indent=2) 707 self.ExecOrQueue(instance, [op])
708 709 @_DoBatch(True)
710 - def BurnReplaceDisks1D8(self):
711 """Replace disks on primary and secondary for drbd8.""" 712 Log("Replacing disks on the same nodes") 713 early_release = self.opts.early_release 714 for instance in self.instances: 715 Log("instance %s", instance, indent=1) 716 ops = [] 717 for mode in constants.REPLACE_DISK_SEC, constants.REPLACE_DISK_PRI: 718 op = opcodes.OpInstanceReplaceDisks(instance_name=instance, 719 mode=mode, 720 disks=list(range(self.disk_count)), 721 early_release=early_release) 722 Log("run %s", mode, indent=2) 723 ops.append(op) 724 self.ExecOrQueue(instance, ops)
725 726 @_DoBatch(True)
727 - def BurnReplaceDisks2(self):
728 """Replace secondary node.""" 729 Log("Changing the secondary node") 730 mode = constants.REPLACE_DISK_CHG 731 732 mytor = izip(islice(cycle(self.nodes), 2, None), 733 self.instances) 734 for tnode, instance in mytor: 735 Log("instance %s", instance, indent=1) 736 if self.opts.iallocator: 737 tnode = None 738 msg = "with iallocator %s" % self.opts.iallocator 739 else: 740 msg = tnode 741 op = opcodes.OpInstanceReplaceDisks(instance_name=instance, 742 mode=mode, 743 remote_node=tnode, 744 iallocator=self.opts.iallocator, 745 disks=[], 746 early_release=self.opts.early_release) 747 Log("run %s %s", mode, msg, indent=2) 748 self.ExecOrQueue(instance, [op])
749 750 @_DoCheckInstances 751 @_DoBatch(False)
752 - def BurnFailover(self):
753 """Failover the instances.""" 754 Log("Failing over instances") 755 for instance in self.instances: 756 Log("instance %s", instance, indent=1) 757 op = opcodes.OpInstanceFailover(instance_name=instance, 758 ignore_consistency=False) 759 self.ExecOrQueue(instance, [op])
760 761 @_DoCheckInstances 762 @_DoBatch(False)
763 - def BurnMove(self):
764 """Move the instances.""" 765 Log("Moving instances") 766 mytor = izip(islice(cycle(self.nodes), 1, None), 767 self.instances) 768 for tnode, instance in mytor: 769 Log("instance %s", instance, indent=1) 770 op = opcodes.OpInstanceMove(instance_name=instance, 771 target_node=tnode) 772 self.ExecOrQueue(instance, [op])
773 774 @_DoBatch(False)
775 - def BurnMigrate(self):
776 """Migrate the instances.""" 777 Log("Migrating instances") 778 for instance in self.instances: 779 Log("instance %s", instance, indent=1) 780 op1 = opcodes.OpInstanceMigrate(instance_name=instance, mode=None, 781 cleanup=False) 782 783 op2 = opcodes.OpInstanceMigrate(instance_name=instance, mode=None, 784 cleanup=True) 785 Log("migration and migration cleanup", indent=2) 786 self.ExecOrQueue(instance, [op1, op2])
787 788 @_DoCheckInstances 789 @_DoBatch(False)
790 - def BurnImportExport(self):
791 """Export the instance, delete it, and import it back. 792 793 """ 794 Log("Exporting and re-importing instances") 795 mytor = izip(cycle(self.nodes), 796 islice(cycle(self.nodes), 1, None), 797 islice(cycle(self.nodes), 2, None), 798 self.instances) 799 800 qcl = GetClient() 801 for pnode, snode, enode, instance in mytor: 802 Log("instance %s", instance, indent=1) 803 # read the full name of the instance 804 ((full_name, ), ) = qcl.QueryInstances([instance], ["name"], False) 805 806 if self.opts.iallocator: 807 pnode = snode = None 808 import_log_msg = ("import from %s" 809 " with iallocator %s" % 810 (enode, self.opts.iallocator)) 811 elif self.opts.disk_template not in constants.DTS_INT_MIRROR: 812 snode = None 813 import_log_msg = ("import from %s to %s" % 814 (enode, pnode)) 815 else: 816 import_log_msg = ("import from %s to %s, %s" % 817 (enode, pnode, snode)) 818 819 exp_op = opcodes.OpBackupExport(instance_name=instance, 820 target_node=enode, 821 mode=constants.EXPORT_MODE_LOCAL, 822 shutdown=True) 823 rem_op = opcodes.OpInstanceRemove(instance_name=instance, 824 ignore_failures=True) 825 imp_dir = utils.PathJoin(pathutils.EXPORT_DIR, full_name) 826 imp_op = opcodes.OpInstanceCreate(instance_name=instance, 827 disks=[{"size": size} 828 for size in self.disk_size], 829 disk_template=self.opts.disk_template, 830 nics=self.opts.nics, 831 mode=constants.INSTANCE_IMPORT, 832 src_node=enode, 833 src_path=imp_dir, 834 pnode=pnode, 835 snode=snode, 836 start=True, 837 ip_check=self.opts.ip_check, 838 name_check=self.opts.name_check, 839 wait_for_sync=True, 840 file_storage_dir=None, 841 file_driver="loop", 842 iallocator=self.opts.iallocator, 843 beparams=self.bep, 844 hvparams=self.hvp, 845 osparams=self.opts.osparams, 846 ) 847 848 erem_op = opcodes.OpBackupRemove(instance_name=instance) 849 850 Log("export to node %s", enode, indent=2) 851 Log("remove instance", indent=2) 852 Log(import_log_msg, indent=2) 853 Log("remove export", indent=2) 854 self.ExecOrQueue(instance, [exp_op, rem_op, imp_op, erem_op]) 855 qcl.Close()
856 857 @staticmethod
858 - def StopInstanceOp(instance):
859 """Stop given instance.""" 860 return opcodes.OpInstanceShutdown(instance_name=instance)
861 862 @staticmethod
863 - def StartInstanceOp(instance):
864 """Start given instance.""" 865 return opcodes.OpInstanceStartup(instance_name=instance, force=False)
866 867 @staticmethod
868 - def RenameInstanceOp(instance, instance_new, name_check, ip_check):
869 """Rename instance.""" 870 return opcodes.OpInstanceRename(instance_name=instance, 871 new_name=instance_new, 872 name_check=name_check, 873 ip_check=ip_check)
874 875 @_DoCheckInstances 876 @_DoBatch(True)
877 - def BurnStopStart(self):
878 """Stop/start the instances.""" 879 Log("Stopping and starting instances") 880 for instance in self.instances: 881 Log("instance %s", instance, indent=1) 882 op1 = self.StopInstanceOp(instance) 883 op2 = self.StartInstanceOp(instance) 884 self.ExecOrQueue(instance, [op1, op2])
885 886 @_DoBatch(False)
887 - def BurnRemove(self):
888 """Remove the instances.""" 889 Log("Removing instances") 890 for instance in self.to_rem: 891 Log("instance %s", instance, indent=1) 892 op = opcodes.OpInstanceRemove(instance_name=instance, 893 ignore_failures=True) 894 self.ExecOrQueue(instance, [op])
895
896 - def BurnRename(self, name_check, ip_check):
897 """Rename the instances. 898 899 Note that this function will not execute in parallel, since we 900 only have one target for rename. 901 902 """ 903 Log("Renaming instances") 904 rename = self.opts.rename 905 for instance in self.instances: 906 Log("instance %s", instance, indent=1) 907 op_stop1 = self.StopInstanceOp(instance) 908 op_stop2 = self.StopInstanceOp(rename) 909 op_rename1 = self.RenameInstanceOp(instance, rename, name_check, ip_check) 910 op_rename2 = self.RenameInstanceOp(rename, instance, name_check, ip_check) 911 op_start1 = self.StartInstanceOp(rename) 912 op_start2 = self.StartInstanceOp(instance) 913 self.ExecOp(False, op_stop1, op_rename1, op_start1) 914 self._CheckInstanceAlive(rename) 915 self.ExecOp(False, op_stop2, op_rename2, op_start2) 916 self._CheckInstanceAlive(instance)
917 918 @_DoCheckInstances 919 @_DoBatch(True)
920 - def BurnReinstall(self):
921 """Reinstall the instances.""" 922 Log("Reinstalling instances") 923 for instance in self.instances: 924 Log("instance %s", instance, indent=1) 925 op1 = self.StopInstanceOp(instance) 926 op2 = opcodes.OpInstanceReinstall(instance_name=instance) 927 Log("reinstall without passing the OS", indent=2) 928 op3 = opcodes.OpInstanceReinstall(instance_name=instance, 929 os_type=self.opts.os) 930 Log("reinstall specifying the OS", indent=2) 931 op4 = self.StartInstanceOp(instance) 932 self.ExecOrQueue(instance, [op1, op2, op3, op4])
933 934 @_DoCheckInstances 935 @_DoBatch(True)
936 - def BurnReboot(self):
937 """Reboot the instances.""" 938 Log("Rebooting instances") 939 for instance in self.instances: 940 Log("instance %s", instance, indent=1) 941 ops = [] 942 for reboot_type in self.opts.reboot_types: 943 op = opcodes.OpInstanceReboot(instance_name=instance, 944 reboot_type=reboot_type, 945 ignore_secondaries=False) 946 Log("reboot with type '%s'", reboot_type, indent=2) 947 ops.append(op) 948 self.ExecOrQueue(instance, ops)
949 950 @_DoCheckInstances 951 @_DoBatch(True)
952 - def BurnRenameSame(self, name_check, ip_check):
953 """Rename the instances to their own name.""" 954 Log("Renaming the instances to their own name") 955 for instance in self.instances: 956 Log("instance %s", instance, indent=1) 957 op1 = self.StopInstanceOp(instance) 958 op2 = self.RenameInstanceOp(instance, instance, name_check, ip_check) 959 Log("rename to the same name", indent=2) 960 op4 = self.StartInstanceOp(instance) 961 self.ExecOrQueue(instance, [op1, op2, op4])
962 963 @_DoCheckInstances 964 @_DoBatch(True)
965 - def BurnActivateDisks(self):
966 """Activate and deactivate disks of the instances.""" 967 Log("Activating/deactivating disks") 968 for instance in self.instances: 969 Log("instance %s", instance, indent=1) 970 op_start = self.StartInstanceOp(instance) 971 op_act = opcodes.OpInstanceActivateDisks(instance_name=instance) 972 op_deact = opcodes.OpInstanceDeactivateDisks(instance_name=instance) 973 op_stop = self.StopInstanceOp(instance) 974 Log("activate disks when online", indent=2) 975 Log("activate disks when offline", indent=2) 976 Log("deactivate disks (when offline)", indent=2) 977 self.ExecOrQueue(instance, [op_act, op_stop, op_act, op_deact, op_start])
978 979 @_DoBatch(False)
980 - def BurnAddRemoveNICs(self):
981 """Add, change and remove an extra NIC for the instances.""" 982 Log("Adding and removing NICs") 983 for instance in self.instances: 984 Log("instance %s", instance, indent=1) 985 op_add = opcodes.OpInstanceSetParams( 986 instance_name=instance, nics=[(constants.DDM_ADD, {})]) 987 op_chg = opcodes.OpInstanceSetParams( 988 instance_name=instance, nics=[(constants.DDM_MODIFY, 989 -1, {"mac": constants.VALUE_GENERATE})]) 990 op_rem = opcodes.OpInstanceSetParams( 991 instance_name=instance, nics=[(constants.DDM_REMOVE, {})]) 992 Log("adding a NIC", indent=2) 993 Log("changing a NIC", indent=2) 994 Log("removing last NIC", indent=2) 995 self.ExecOrQueue(instance, [op_add, op_chg, op_rem])
996
997 - def ConfdCallback(self, reply):
998 """Callback for confd queries""" 999 if reply.type == confd_client.UPCALL_REPLY: 1000 if reply.server_reply.status != constants.CONFD_REPL_STATUS_OK: 1001 Err("Query %s gave non-ok status %s: %s" % (reply.orig_request, 1002 reply.server_reply.status, 1003 reply.server_reply)) 1004 if reply.orig_request.type == constants.CONFD_REQ_PING: 1005 Log("Ping: OK", indent=1) 1006 elif reply.orig_request.type == constants.CONFD_REQ_CLUSTER_MASTER: 1007 if reply.server_reply.answer == self.cluster_info["master"]: 1008 Log("Master: OK", indent=1) 1009 else: 1010 Err("Master: wrong: %s" % reply.server_reply.answer) 1011 elif reply.orig_request.type == constants.CONFD_REQ_NODE_ROLE_BYNAME: 1012 if reply.server_reply.answer == constants.CONFD_NODE_ROLE_MASTER: 1013 Log("Node role for master: OK", indent=1) 1014 else: 1015 Err("Node role for master: wrong: %s" % reply.server_reply.answer) 1016 elif reply.orig_request.type == constants.CONFD_REQ_INSTANCE_DISKS: 1017 self.confd_reply = reply.server_reply.answer
1018
1019 - def DoConfdRequestReply(self, req):
1020 self.confd_counting_callback.RegisterQuery(req.rsalt) 1021 self.confd_client.SendRequest(req, async=False) 1022 while not self.confd_counting_callback.AllAnswered(): 1023 if not self.confd_client.ReceiveReply(): 1024 Err("Did not receive all expected confd replies") 1025 break
1026
1027 - def BurnConfd(self):
1028 """Run confd queries for our instances. 1029 1030 The following confd queries are tested: 1031 - CONFD_REQ_PING: simple ping 1032 - CONFD_REQ_CLUSTER_MASTER: cluster master 1033 - CONFD_REQ_NODE_ROLE_BYNAME: node role, for the master 1034 1035 """ 1036 Log("Checking confd results") 1037 1038 filter_callback = confd_client.ConfdFilterCallback(self.ConfdCallback) 1039 counting_callback = confd_client.ConfdCountingCallback(filter_callback) 1040 self.confd_counting_callback = counting_callback 1041 1042 self.confd_client = confd_client.GetConfdClient(counting_callback) 1043 1044 req = confd_client.ConfdClientRequest(type=constants.CONFD_REQ_PING) 1045 self.DoConfdRequestReply(req) 1046 1047 req = confd_client.ConfdClientRequest( 1048 type=constants.CONFD_REQ_CLUSTER_MASTER) 1049 self.DoConfdRequestReply(req) 1050 1051 req = confd_client.ConfdClientRequest( 1052 type=constants.CONFD_REQ_NODE_ROLE_BYNAME, 1053 query=self.cluster_info["master"]) 1054 self.DoConfdRequestReply(req)
1055 1056 @_DoCheckInstances 1057 @_DoBatch(False)
1058 - def BurnAddDisks(self):
1059 """Add an extra disk to every instance and then detach it.""" 1060 Log("Adding and detaching disks") 1061 1062 # Instantiate a Confd client 1063 filter_callback = confd_client.ConfdFilterCallback(self.ConfdCallback) 1064 counting_callback = confd_client.ConfdCountingCallback(filter_callback) 1065 self.confd_counting_callback = counting_callback 1066 self.confd_client = confd_client.GetConfdClient(counting_callback) 1067 1068 # Iterate all instances, start them, add a disk with a unique name and 1069 # detach it. Do all disk operations with hotplugging (if possible). 1070 for instance in self.instances: 1071 Log("instance %s", instance, indent=1) 1072 1073 # Fetch disk info for an instance from the confd. The result of the query 1074 # will be stored in the confd_reply attribute of Burner. 1075 req = (confd_client.ConfdClientRequest( 1076 type=constants.CONFD_REQ_INSTANCE_DISKS, query=instance)) 1077 self.DoConfdRequestReply(req) 1078 1079 disk_name = RandomString() 1080 1081 nodes = [set(disk["nodes"]) for disk in self.confd_reply] 1082 nodes = reduce(or_, nodes) 1083 self.instance_nodes[instance] = nodes 1084 self.disk_nodes[disk_name] = nodes 1085 1086 op_stop = self.StopInstanceOp(instance) 1087 op_add = opcodes.OpInstanceSetParams( 1088 instance_name=instance, hotplug_if_possible=True, 1089 disks=[(constants.DDM_ADD, {"size": self.disk_size[0], 1090 "name": disk_name})]) 1091 op_detach = opcodes.OpInstanceSetParams( 1092 instance_name=instance, hotplug_if_possible=True, 1093 disks=[(constants.DDM_DETACH, {})]) 1094 op_start = self.StartInstanceOp(instance) 1095 Log("adding a disk with name %s" % disk_name, indent=2) 1096 Log("detaching last disk", indent=2) 1097 self.ExecOrQueue(instance, [op_start, op_add, op_detach, op_stop, 1098 op_start])
1099 1100 @_DoCheckInstances 1101 @_DoBatch(False)
1102 - def BurnRemoveDisks(self):
1103 """Attach a previously detached disk to an instance and then remove it.""" 1104 Log("Attaching and removing disks") 1105 1106 # Iterate all instances in random order, attach the detached disks, remove 1107 # them and then restart the instances. Do all disk operation with 1108 # hotplugging (if possible). 1109 instances_copy = list(self.instances) 1110 random.shuffle(instances_copy) 1111 for instance in instances_copy: 1112 Log("instance %s", instance, indent=1) 1113 1114 disk_name = self.FindMatchingDisk(instance) 1115 op_attach = opcodes.OpInstanceSetParams( 1116 instance_name=instance, hotplug_if_possible=True, 1117 disks=[(constants.DDM_ATTACH, {"name": disk_name})]) 1118 op_rem = opcodes.OpInstanceSetParams( 1119 instance_name=instance, hotplug_if_possible=True, 1120 disks=[(constants.DDM_REMOVE, {})]) 1121 op_stop = self.StopInstanceOp(instance) 1122 op_start = self.StartInstanceOp(instance) 1123 Log("attaching a disk with name %s" % disk_name, indent=2) 1124 Log("removing last disk", indent=2) 1125 self.ExecOrQueue(instance, [op_attach, op_rem, op_stop, op_start]) 1126 1127 # Disk nodes are useful only for this test. 1128 del self.disk_nodes 1129 del self.instance_nodes
1130
1131 - def _CheckInstanceAlive(self, instance):
1132 """Check if an instance is alive by doing http checks. 1133 1134 This will try to retrieve the url on the instance /hostname.txt 1135 and check that it contains the hostname of the instance. In case 1136 we get ECONNREFUSED, we retry up to the net timeout seconds, for 1137 any other error we abort. 1138 1139 """ 1140 if not self.opts.http_check: 1141 return 1142 end_time = time.time() + self.opts.net_timeout 1143 url = None 1144 while time.time() < end_time and url is None: 1145 try: 1146 url = self.url_opener.open("http://%s/hostname.txt" % instance) 1147 except IOError: 1148 # here we can have connection refused, no route to host, etc. 1149 time.sleep(1) 1150 if url is None: 1151 raise InstanceDown(instance, "Cannot contact instance") 1152 hostname = url.read().strip() 1153 url.close() 1154 if hostname != instance: 1155 raise InstanceDown(instance, ("Hostname mismatch, expected %s, got %s" % 1156 (instance, hostname)))
1157
1158 - def BurninCluster(self):
1159 """Test a cluster intensively. 1160 1161 This will create instances and then start/stop/failover them. 1162 It is safe for existing instances but could impact performance. 1163 1164 """ 1165 1166 Log("Testing global parameters") 1167 1168 if (len(self.nodes) == 1 and 1169 self.opts.disk_template not in _SINGLE_NODE_DISK_TEMPLATES): 1170 Err("When one node is available/selected the disk template must" 1171 " be one of %s" % utils.CommaJoin(_SINGLE_NODE_DISK_TEMPLATES)) 1172 1173 has_err = True 1174 try: 1175 self.BurnCreateInstances() 1176 1177 if self.opts.do_startstop: 1178 self.BurnStopStart() 1179 1180 if self.bep[constants.BE_MINMEM] < self.bep[constants.BE_MAXMEM]: 1181 self.BurnModifyRuntimeMemory() 1182 1183 if self.opts.do_replace1 and \ 1184 self.opts.disk_template in constants.DTS_INT_MIRROR: 1185 self.BurnReplaceDisks1D8() 1186 if (self.opts.do_replace2 and len(self.nodes) > 2 and 1187 self.opts.disk_template in constants.DTS_INT_MIRROR): 1188 self.BurnReplaceDisks2() 1189 1190 if (self.opts.disk_template in constants.DTS_GROWABLE and 1191 compat.any(n > 0 for n in self.disk_growth)): 1192 self.BurnGrowDisks() 1193 1194 if self.opts.do_failover and \ 1195 self.opts.disk_template in constants.DTS_MIRRORED: 1196 self.BurnFailover() 1197 1198 if self.opts.do_migrate: 1199 if self.opts.disk_template not in constants.DTS_MIRRORED: 1200 Log("Skipping migration (disk template %s does not support it)", 1201 self.opts.disk_template) 1202 elif not self.hv_can_migrate: 1203 Log("Skipping migration (hypervisor %s does not support it)", 1204 self.hypervisor) 1205 else: 1206 self.BurnMigrate() 1207 1208 if (self.opts.do_move and len(self.nodes) > 1 and 1209 self.opts.disk_template in [constants.DT_PLAIN, constants.DT_FILE]): 1210 self.BurnMove() 1211 1212 if (self.opts.do_importexport and 1213 self.opts.disk_template in _IMPEXP_DISK_TEMPLATES): 1214 self.BurnImportExport() 1215 1216 if self.opts.do_reinstall: 1217 self.BurnReinstall() 1218 1219 if self.opts.do_reboot: 1220 self.BurnReboot() 1221 1222 if self.opts.do_renamesame: 1223 self.BurnRenameSame(self.opts.name_check, self.opts.ip_check) 1224 1225 if self.opts.do_confd_tests: 1226 self.BurnConfd() 1227 1228 default_nic_mode = self.cluster_default_nicparams[constants.NIC_MODE] 1229 # Don't add/remove nics in routed mode, as we would need an ip to add 1230 # them with 1231 if self.opts.do_addremove_nics: 1232 if default_nic_mode == constants.NIC_MODE_BRIDGED: 1233 self.BurnAddRemoveNICs() 1234 else: 1235 Log("Skipping nic add/remove as the cluster is not in bridged mode") 1236 1237 if self.opts.do_activate_disks: 1238 self.BurnActivateDisks() 1239 1240 if self.opts.do_addremove_disks: 1241 self.BurnAddDisks() 1242 self.BurnRemoveDisks() 1243 1244 if self.opts.rename: 1245 self.BurnRename(self.opts.name_check, self.opts.ip_check) 1246 1247 has_err = False 1248 finally: 1249 if has_err: 1250 Log("Error detected: opcode buffer follows:\n\n") 1251 Log(self.GetFeedbackBuf()) 1252 Log("\n\n") 1253 if not self.opts.keep_instances: 1254 try: 1255 self.BurnRemove() 1256 except Exception, err: # pylint: disable=W0703 1257 if has_err: # already detected errors, so errors in removal 1258 # are quite expected 1259 Log("Note: error detected during instance remove: %s", err) 1260 else: # non-expected error 1261 raise 1262 1263 return constants.EXIT_SUCCESS
1264
1265 1266 -def Main():
1267 """Main function. 1268 1269 """ 1270 utils.SetupLogging(pathutils.LOG_BURNIN, sys.argv[0], 1271 debug=False, stderr_logging=True) 1272 1273 return Burner().BurninCluster()
1274