1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Burnin program
32
33 """
34
35 import sys
36 import optparse
37 import time
38 import socket
39 import urllib
40 from itertools import izip, islice, cycle
41 from cStringIO import StringIO
42
43 from ganeti import opcodes
44 from ganeti import constants
45 from ganeti import cli
46 from ganeti import errors
47 from ganeti import utils
48 from ganeti import hypervisor
49 from ganeti import compat
50 from ganeti import pathutils
51
52 from ganeti.confd import client as confd_client
53 from ganeti.runtime import (GetClient)
54
55
56 USAGE = ("\tburnin -o OS_NAME [options...] instance_name ...")
57
58 MAX_RETRIES = 3
59 LOG_HEADERS = {
60 0: "- ",
61 1: "* ",
62 2: "",
63 }
64
65
66 _SINGLE_NODE_DISK_TEMPLATES = compat.UniqueFrozenset([
67 constants.DT_DISKLESS,
68 constants.DT_PLAIN,
69 constants.DT_FILE,
70 constants.DT_SHARED_FILE,
71 constants.DT_EXT,
72 constants.DT_RBD,
73 constants.DT_GLUSTER
74 ])
75
76 _SUPPORTED_DISK_TEMPLATES = compat.UniqueFrozenset([
77 constants.DT_DISKLESS,
78 constants.DT_DRBD8,
79 constants.DT_EXT,
80 constants.DT_FILE,
81 constants.DT_PLAIN,
82 constants.DT_RBD,
83 constants.DT_SHARED_FILE,
84 constants.DT_GLUSTER
85 ])
86
87
88 _IMPEXP_DISK_TEMPLATES = (_SUPPORTED_DISK_TEMPLATES - frozenset([
89 constants.DT_DISKLESS,
90 constants.DT_FILE,
91 constants.DT_SHARED_FILE,
92 constants.DT_GLUSTER
93 ]))
97 """The checked instance was not up"""
98
101 """Failure detected during burning"""
102
105 """Shows program usage information and exits the program."""
106
107 print >> sys.stderr, "Usage:"
108 print >> sys.stderr, USAGE
109 sys.exit(2)
110
111
112 -def Log(msg, *args, **kwargs):
113 """Simple function that prints out its argument.
114
115 """
116 if args:
117 msg = msg % args
118 indent = kwargs.get("indent", 0)
119 sys.stdout.write("%*s%s%s\n" % (2 * indent, "",
120 LOG_HEADERS.get(indent, " "), msg))
121 sys.stdout.flush()
122
123
124 -def Err(msg, exit_code=1):
125 """Simple error logging that prints to stderr.
126
127 """
128 sys.stderr.write(msg + "\n")
129 sys.stderr.flush()
130 sys.exit(exit_code)
131
134 """A simple url opener"""
135
136
138 """No-interaction version of prompt_user_passwd."""
139
140
141 return None, None
142
144 """Custom error handling"""
145
146
147 _ = fp.read()
148 fp.close()
149 raise InstanceDown("HTTP error returned: code %s, msg %s" %
150 (errcode, errmsg))
151
152
153 OPTIONS = [
154 cli.cli_option("-o", "--os", dest="os", default=None,
155 help="OS to use during burnin",
156 metavar="<OS>",
157 completion_suggest=cli.OPT_COMPL_ONE_OS),
158 cli.HYPERVISOR_OPT,
159 cli.OSPARAMS_OPT,
160 cli.cli_option("--disk-size", dest="disk_size",
161 help="Disk size (determines disk count)",
162 default="128m", type="string", metavar="<size,size,...>",
163 completion_suggest=("128M 512M 1G 4G 1G,256M"
164 " 4G,1G,1G 10G").split()),
165 cli.cli_option("--disk-growth", dest="disk_growth", help="Disk growth",
166 default="128m", type="string", metavar="<size,size,...>"),
167 cli.cli_option("--mem-size", dest="mem_size", help="Memory size",
168 default=None, type="unit", metavar="<size>",
169 completion_suggest=("128M 256M 512M 1G 4G 8G"
170 " 12G 16G").split()),
171 cli.cli_option("--maxmem-size", dest="maxmem_size", help="Max Memory size",
172 default=256, type="unit", metavar="<size>",
173 completion_suggest=("128M 256M 512M 1G 4G 8G"
174 " 12G 16G").split()),
175 cli.cli_option("--minmem-size", dest="minmem_size", help="Min Memory size",
176 default=128, type="unit", metavar="<size>",
177 completion_suggest=("128M 256M 512M 1G 4G 8G"
178 " 12G 16G").split()),
179 cli.cli_option("--vcpu-count", dest="vcpu_count", help="VCPU count",
180 default=3, type="unit", metavar="<count>",
181 completion_suggest=("1 2 3 4").split()),
182 cli.DEBUG_OPT,
183 cli.VERBOSE_OPT,
184 cli.NOIPCHECK_OPT,
185 cli.NONAMECHECK_OPT,
186 cli.EARLY_RELEASE_OPT,
187 cli.cli_option("--no-replace1", dest="do_replace1",
188 help="Skip disk replacement with the same secondary",
189 action="store_false", default=True),
190 cli.cli_option("--no-replace2", dest="do_replace2",
191 help="Skip disk replacement with a different secondary",
192 action="store_false", default=True),
193 cli.cli_option("--no-failover", dest="do_failover",
194 help="Skip instance failovers", action="store_false",
195 default=True),
196 cli.cli_option("--no-migrate", dest="do_migrate",
197 help="Skip instance live migration",
198 action="store_false", default=True),
199 cli.cli_option("--no-move", dest="do_move",
200 help="Skip instance moves", action="store_false",
201 default=True),
202 cli.cli_option("--no-importexport", dest="do_importexport",
203 help="Skip instance export/import", action="store_false",
204 default=True),
205 cli.cli_option("--no-startstop", dest="do_startstop",
206 help="Skip instance stop/start", action="store_false",
207 default=True),
208 cli.cli_option("--no-reinstall", dest="do_reinstall",
209 help="Skip instance reinstall", action="store_false",
210 default=True),
211 cli.cli_option("--no-reboot", dest="do_reboot",
212 help="Skip instance reboot", action="store_false",
213 default=True),
214 cli.cli_option("--no-renamesame", dest="do_renamesame",
215 help="Skip instance rename to same name", action="store_false",
216 default=True),
217 cli.cli_option("--reboot-types", dest="reboot_types",
218 help="Specify the reboot types", default=None),
219 cli.cli_option("--no-activate-disks", dest="do_activate_disks",
220 help="Skip disk activation/deactivation",
221 action="store_false", default=True),
222 cli.cli_option("--no-add-disks", dest="do_addremove_disks",
223 help="Skip disk addition/removal",
224 action="store_false", default=True),
225 cli.cli_option("--no-add-nics", dest="do_addremove_nics",
226 help="Skip NIC addition/removal",
227 action="store_false", default=True),
228 cli.cli_option("--no-nics", dest="nics",
229 help="No network interfaces", action="store_const",
230 const=[], default=[{}]),
231 cli.cli_option("--no-confd", dest="do_confd_tests",
232 help="Skip confd queries",
233 action="store_false", default=constants.ENABLE_CONFD),
234 cli.cli_option("--rename", dest="rename", default=None,
235 help=("Give one unused instance name which is taken"
236 " to start the renaming sequence"),
237 metavar="<instance_name>"),
238 cli.cli_option("-t", "--disk-template", dest="disk_template",
239 choices=list(_SUPPORTED_DISK_TEMPLATES),
240 default=constants.DT_DRBD8,
241 help=("Disk template (default %s, otherwise one of %s)" %
242 (constants.DT_DRBD8,
243 utils.CommaJoin(_SUPPORTED_DISK_TEMPLATES)))),
244 cli.cli_option("-n", "--nodes", dest="nodes", default="",
245 help=("Comma separated list of nodes to perform"
246 " the burnin on (defaults to all nodes)"),
247 completion_suggest=cli.OPT_COMPL_MANY_NODES),
248 cli.cli_option("-I", "--iallocator", dest="iallocator",
249 default=None, type="string",
250 help=("Perform the allocation using an iallocator"
251 " instead of fixed node spread (node restrictions no"
252 " longer apply, therefore -n/--nodes must not be"
253 " used"),
254 completion_suggest=cli.OPT_COMPL_ONE_IALLOCATOR),
255 cli.cli_option("-p", "--parallel", default=False, action="store_true",
256 dest="parallel",
257 help=("Enable parallelization of some operations in"
258 " order to speed burnin or to test granular locking")),
259 cli.cli_option("--net-timeout", default=15, type="int",
260 dest="net_timeout",
261 help=("The instance check network timeout in seconds"
262 " (defaults to 15 seconds)"),
263 completion_suggest="15 60 300 900".split()),
264 cli.cli_option("-C", "--http-check", default=False, action="store_true",
265 dest="http_check",
266 help=("Enable checking of instance status via http,"
267 " looking for /hostname.txt that should contain the"
268 " name of the instance")),
269 cli.cli_option("-K", "--keep-instances", default=False,
270 action="store_true",
271 dest="keep_instances",
272 help=("Leave instances on the cluster after burnin,"
273 " for investigation in case of errors or simply"
274 " to use them")),
275 cli.REASON_OPT,
276 ]
277
278
279 ARGUMENTS = [cli.ArgInstance(min=1)]
283 """Decorator for checking instances.
284
285 """
286 def wrapper(self, *args, **kwargs):
287 val = fn(self, *args, **kwargs)
288 for instance in self.instances:
289 self._CheckInstanceAlive(instance)
290 return val
291
292 return wrapper
293
296 """Decorator for possible batch operations.
297
298 Must come after the _DoCheckInstances decorator (if any).
299
300 @param retry: whether this is a retryable batch, will be
301 passed to StartBatch
302
303 """
304 def wrap(fn):
305 def batched(self, *args, **kwargs):
306 self.StartBatch(retry)
307 val = fn(self, *args, **kwargs)
308 self.CommitQueue()
309 return val
310 return batched
311
312 return wrap
313
316 """Burner class."""
317
319 """Constructor."""
320 self.url_opener = SimpleOpener()
321 self._feed_buf = StringIO()
322 self.nodes = []
323 self.instances = []
324 self.to_rem = []
325 self.queued_ops = []
326 self.opts = None
327 self.queue_retry = False
328 self.disk_count = self.disk_growth = self.disk_size = None
329 self.hvp = self.bep = None
330 self.ParseOptions()
331 self.cl = cli.GetClient()
332 self.GetState()
333
335 """Clear the feedback buffer."""
336 self._feed_buf.truncate(0)
337
339 """Return the contents of the buffer."""
340 return self._feed_buf.getvalue()
341
343 """Acumulate feedback in our buffer."""
344 formatted_msg = "%s %s" % (time.ctime(utils.MergeTime(msg[0])), msg[2])
345 self._feed_buf.write(formatted_msg + "\n")
346 if self.opts.verbose:
347 Log(formatted_msg, indent=3)
348
349 - def MaybeRetry(self, retry_count, msg, fn, *args):
350 """Possibly retry a given function execution.
351
352 @type retry_count: int
353 @param retry_count: retry counter:
354 - 0: non-retryable action
355 - 1: last retry for a retryable action
356 - MAX_RETRIES: original try for a retryable action
357 @type msg: str
358 @param msg: the kind of the operation
359 @type fn: callable
360 @param fn: the function to be called
361
362 """
363 try:
364 val = fn(*args)
365 if retry_count > 0 and retry_count < MAX_RETRIES:
366 Log("Idempotent %s succeeded after %d retries",
367 msg, MAX_RETRIES - retry_count)
368 return val
369 except Exception, err:
370 if retry_count == 0:
371 Log("Non-idempotent %s failed, aborting", msg)
372 raise
373 elif retry_count == 1:
374 Log("Idempotent %s repeated failure, aborting", msg)
375 raise
376 else:
377 Log("Idempotent %s failed, retry #%d/%d: %s",
378 msg, MAX_RETRIES - retry_count + 1, MAX_RETRIES, err)
379 self.MaybeRetry(retry_count - 1, msg, fn, *args)
380
382 """Execute one or more opcodes and manage the exec buffer.
383
384 @return: if only opcode has been passed, we return its result;
385 otherwise we return the list of results
386
387 """
388 job_id = cli.SendJob(ops, cl=self.cl)
389 results = cli.PollJob(job_id, cl=self.cl, feedback_fn=self.Feedback)
390 if len(ops) == 1:
391 return results[0]
392 else:
393 return results
394
395 - def ExecOp(self, retry, *ops):
396 """Execute one or more opcodes and manage the exec buffer.
397
398 @return: if only opcode has been passed, we return its result;
399 otherwise we return the list of results
400
401 """
402 if retry:
403 rval = MAX_RETRIES
404 else:
405 rval = 0
406 cli.SetGenericOpcodeOpts(ops, self.opts)
407 return self.MaybeRetry(rval, "opcode", self._ExecOp, *ops)
408
410 """Execute an opcode and manage the exec buffer."""
411 if self.opts.parallel:
412 cli.SetGenericOpcodeOpts(ops, self.opts)
413 self.queued_ops.append((ops, name, post_process))
414 else:
415 val = self.ExecOp(self.queue_retry, *ops)
416 if post_process is not None:
417 post_process()
418 return val
419
421 """Start a new batch of jobs.
422
423 @param retry: whether this is a retryable batch
424
425 """
426 self.queued_ops = []
427 self.queue_retry = retry
428
430 """Execute all submitted opcodes in case of parallel burnin"""
431 if not self.opts.parallel or not self.queued_ops:
432 return
433
434 if self.queue_retry:
435 rval = MAX_RETRIES
436 else:
437 rval = 0
438
439 try:
440 results = self.MaybeRetry(rval, "jobset", self.ExecJobSet,
441 self.queued_ops)
442 finally:
443 self.queued_ops = []
444 return results
445
447 """Execute a set of jobs and return once all are done.
448
449 The method will return the list of results, if all jobs are
450 successful. Otherwise, OpExecError will be raised from within
451 cli.py.
452
453 """
454 self.ClearFeedbackBuf()
455 jex = cli.JobExecutor(cl=self.cl, feedback_fn=self.Feedback)
456 for ops, name, _ in jobs:
457 jex.QueueJob(name, *ops)
458 try:
459 results = jex.GetResults()
460 except Exception, err:
461 Log("Jobs failed: %s", err)
462 raise BurninFailure()
463
464 fail = False
465 val = []
466 for (_, name, post_process), (success, result) in zip(jobs, results):
467 if success:
468 if post_process:
469 try:
470 post_process()
471 except Exception, err:
472 Log("Post process call for job %s failed: %s", name, err)
473 fail = True
474 val.append(result)
475 else:
476 fail = True
477
478 if fail:
479 raise BurninFailure()
480
481 return val
482
484 """Parses the command line options.
485
486 In case of command line errors, it will show the usage and exit the
487 program.
488
489 """
490 parser = optparse.OptionParser(usage="\n%s" % USAGE,
491 version=("%%prog (ganeti) %s" %
492 constants.RELEASE_VERSION),
493 option_list=OPTIONS)
494
495 options, args = parser.parse_args()
496 if len(args) < 1 or options.os is None:
497 Usage()
498
499 if options.mem_size:
500 options.maxmem_size = options.mem_size
501 options.minmem_size = options.mem_size
502 elif options.minmem_size > options.maxmem_size:
503 Err("Maximum memory lower than minimum memory")
504
505 if options.disk_template not in _SUPPORTED_DISK_TEMPLATES:
506 Err("Unknown or unsupported disk template '%s'" % options.disk_template)
507
508 if options.disk_template == constants.DT_DISKLESS:
509 disk_size = disk_growth = []
510 options.do_addremove_disks = False
511 else:
512 disk_size = [utils.ParseUnit(v) for v in options.disk_size.split(",")]
513 disk_growth = [utils.ParseUnit(v)
514 for v in options.disk_growth.split(",")]
515 if len(disk_growth) != len(disk_size):
516 Err("Wrong disk sizes/growth combination")
517 if ((disk_size and options.disk_template == constants.DT_DISKLESS) or
518 (not disk_size and options.disk_template != constants.DT_DISKLESS)):
519 Err("Wrong disk count/disk template combination")
520
521 self.disk_size = disk_size
522 self.disk_growth = disk_growth
523 self.disk_count = len(disk_size)
524
525 if options.nodes and options.iallocator:
526 Err("Give either the nodes option or the iallocator option, not both")
527
528 if options.http_check and not options.name_check:
529 Err("Can't enable HTTP checks without name checks")
530
531 self.opts = options
532 self.instances = args
533 self.bep = {
534 constants.BE_MINMEM: options.minmem_size,
535 constants.BE_MAXMEM: options.maxmem_size,
536 constants.BE_VCPUS: options.vcpu_count,
537 }
538
539 self.hypervisor = None
540 self.hvp = {}
541 if options.hypervisor:
542 self.hypervisor, self.hvp = options.hypervisor
543
544 if options.reboot_types is None:
545 options.reboot_types = constants.REBOOT_TYPES
546 else:
547 options.reboot_types = options.reboot_types.split(",")
548 rt_diff = set(options.reboot_types).difference(constants.REBOOT_TYPES)
549 if rt_diff:
550 Err("Invalid reboot types specified: %s" % utils.CommaJoin(rt_diff))
551
552 socket.setdefaulttimeout(options.net_timeout)
553
555 """Read the cluster state from the master daemon."""
556 if self.opts.nodes:
557 names = self.opts.nodes.split(",")
558 else:
559 names = []
560 try:
561 qcl = GetClient(query=True)
562 result = qcl.QueryNodes(names, ["name", "offline", "drained"], False)
563 except errors.GenericError, err:
564 err_code, msg = cli.FormatError(err)
565 Err(msg, exit_code=err_code)
566 finally:
567 qcl.Close()
568 self.nodes = [data[0] for data in result if not (data[1] or data[2])]
569
570 op_diagnose = opcodes.OpOsDiagnose(output_fields=["name",
571 "variants",
572 "hidden"],
573 names=[])
574 result = self.ExecOp(True, op_diagnose)
575
576 if not result:
577 Err("Can't get the OS list")
578
579 found = False
580 for (name, variants, _) in result:
581 if self.opts.os in cli.CalculateOSNames(name, variants):
582 found = True
583 break
584
585 if not found:
586 Err("OS '%s' not found" % self.opts.os)
587
588 cluster_info = self.cl.QueryClusterInfo()
589 self.cluster_info = cluster_info
590 if not self.cluster_info:
591 Err("Can't get cluster info")
592
593 default_nic_params = self.cluster_info["nicparams"][constants.PP_DEFAULT]
594 self.cluster_default_nicparams = default_nic_params
595 if self.hypervisor is None:
596 self.hypervisor = self.cluster_info["default_hypervisor"]
597 self.hv_can_migrate = \
598 hypervisor.GetHypervisorClass(self.hypervisor).CAN_MIGRATE
599
600 @_DoCheckInstances
601 @_DoBatch(False)
603 """Create the given instances.
604
605 """
606 self.to_rem = []
607 mytor = izip(cycle(self.nodes),
608 islice(cycle(self.nodes), 1, None),
609 self.instances)
610
611 Log("Creating instances")
612 for pnode, snode, instance in mytor:
613 Log("instance %s", instance, indent=1)
614 if self.opts.iallocator:
615 pnode = snode = None
616 msg = "with iallocator %s" % self.opts.iallocator
617 elif self.opts.disk_template not in constants.DTS_INT_MIRROR:
618 snode = None
619 msg = "on %s" % pnode
620 else:
621 msg = "on %s, %s" % (pnode, snode)
622
623 Log(msg, indent=2)
624
625 op = opcodes.OpInstanceCreate(instance_name=instance,
626 disks=[{"size": size}
627 for size in self.disk_size],
628 disk_template=self.opts.disk_template,
629 nics=self.opts.nics,
630 mode=constants.INSTANCE_CREATE,
631 os_type=self.opts.os,
632 pnode=pnode,
633 snode=snode,
634 start=True,
635 ip_check=self.opts.ip_check,
636 name_check=self.opts.name_check,
637 wait_for_sync=True,
638 file_driver="loop",
639 file_storage_dir=None,
640 iallocator=self.opts.iallocator,
641 beparams=self.bep,
642 hvparams=self.hvp,
643 hypervisor=self.hypervisor,
644 osparams=self.opts.osparams,
645 )
646 remove_instance = lambda name: lambda: self.to_rem.append(name)
647 self.ExecOrQueue(instance, [op], post_process=remove_instance(instance))
648
649 @_DoBatch(False)
660
661 @_DoBatch(False)
663 """Grow both the os and the swap disks by the requested amount, if any."""
664 Log("Growing disks")
665 for instance in self.instances:
666 Log("instance %s", instance, indent=1)
667 for idx, growth in enumerate(self.disk_growth):
668 if growth > 0:
669 op = opcodes.OpInstanceGrowDisk(instance_name=instance, disk=idx,
670 amount=growth, wait_for_sync=True)
671 Log("increase disk/%s by %s MB", idx, growth, indent=2)
672 self.ExecOrQueue(instance, [op])
673
674 @_DoBatch(True)
690
691 @_DoBatch(True)
693 """Replace secondary node."""
694 Log("Changing the secondary node")
695 mode = constants.REPLACE_DISK_CHG
696
697 mytor = izip(islice(cycle(self.nodes), 2, None),
698 self.instances)
699 for tnode, instance in mytor:
700 Log("instance %s", instance, indent=1)
701 if self.opts.iallocator:
702 tnode = None
703 msg = "with iallocator %s" % self.opts.iallocator
704 else:
705 msg = tnode
706 op = opcodes.OpInstanceReplaceDisks(instance_name=instance,
707 mode=mode,
708 remote_node=tnode,
709 iallocator=self.opts.iallocator,
710 disks=[],
711 early_release=self.opts.early_release)
712 Log("run %s %s", mode, msg, indent=2)
713 self.ExecOrQueue(instance, [op])
714
715 @_DoCheckInstances
716 @_DoBatch(False)
725
726 @_DoCheckInstances
727 @_DoBatch(False)
738
739 @_DoBatch(False)
752
753 @_DoCheckInstances
754 @_DoBatch(False)
756 """Export the instance, delete it, and import it back.
757
758 """
759 Log("Exporting and re-importing instances")
760 mytor = izip(cycle(self.nodes),
761 islice(cycle(self.nodes), 1, None),
762 islice(cycle(self.nodes), 2, None),
763 self.instances)
764
765 qcl = GetClient(query=True)
766 for pnode, snode, enode, instance in mytor:
767 Log("instance %s", instance, indent=1)
768
769 ((full_name, ), ) = qcl.QueryInstances([instance], ["name"], False)
770
771 if self.opts.iallocator:
772 pnode = snode = None
773 import_log_msg = ("import from %s"
774 " with iallocator %s" %
775 (enode, self.opts.iallocator))
776 elif self.opts.disk_template not in constants.DTS_INT_MIRROR:
777 snode = None
778 import_log_msg = ("import from %s to %s" %
779 (enode, pnode))
780 else:
781 import_log_msg = ("import from %s to %s, %s" %
782 (enode, pnode, snode))
783
784 exp_op = opcodes.OpBackupExport(instance_name=instance,
785 target_node=enode,
786 mode=constants.EXPORT_MODE_LOCAL,
787 shutdown=True)
788 rem_op = opcodes.OpInstanceRemove(instance_name=instance,
789 ignore_failures=True)
790 imp_dir = utils.PathJoin(pathutils.EXPORT_DIR, full_name)
791 imp_op = opcodes.OpInstanceCreate(instance_name=instance,
792 disks=[{"size": size}
793 for size in self.disk_size],
794 disk_template=self.opts.disk_template,
795 nics=self.opts.nics,
796 mode=constants.INSTANCE_IMPORT,
797 src_node=enode,
798 src_path=imp_dir,
799 pnode=pnode,
800 snode=snode,
801 start=True,
802 ip_check=self.opts.ip_check,
803 name_check=self.opts.name_check,
804 wait_for_sync=True,
805 file_storage_dir=None,
806 file_driver="loop",
807 iallocator=self.opts.iallocator,
808 beparams=self.bep,
809 hvparams=self.hvp,
810 osparams=self.opts.osparams,
811 )
812
813 erem_op = opcodes.OpBackupRemove(instance_name=instance)
814
815 Log("export to node %s", enode, indent=2)
816 Log("remove instance", indent=2)
817 Log(import_log_msg, indent=2)
818 Log("remove export", indent=2)
819 self.ExecOrQueue(instance, [exp_op, rem_op, imp_op, erem_op])
820 qcl.Close()
821
822 @staticmethod
826
827 @staticmethod
831
832 @staticmethod
837
838 @_DoCheckInstances
839 @_DoBatch(True)
848
849 @_DoBatch(False)
858
860 """Rename the instances.
861
862 Note that this function will not execute in parallel, since we
863 only have one target for rename.
864
865 """
866 Log("Renaming instances")
867 rename = self.opts.rename
868 for instance in self.instances:
869 Log("instance %s", instance, indent=1)
870 op_stop1 = self.StopInstanceOp(instance)
871 op_stop2 = self.StopInstanceOp(rename)
872 op_rename1 = self.RenameInstanceOp(instance, rename)
873 op_rename2 = self.RenameInstanceOp(rename, instance)
874 op_start1 = self.StartInstanceOp(rename)
875 op_start2 = self.StartInstanceOp(instance)
876 self.ExecOp(False, op_stop1, op_rename1, op_start1)
877 self._CheckInstanceAlive(rename)
878 self.ExecOp(False, op_stop2, op_rename2, op_start2)
879 self._CheckInstanceAlive(instance)
880
881 @_DoCheckInstances
882 @_DoBatch(True)
896
897 @_DoCheckInstances
898 @_DoBatch(True)
900 """Reboot the instances."""
901 Log("Rebooting instances")
902 for instance in self.instances:
903 Log("instance %s", instance, indent=1)
904 ops = []
905 for reboot_type in self.opts.reboot_types:
906 op = opcodes.OpInstanceReboot(instance_name=instance,
907 reboot_type=reboot_type,
908 ignore_secondaries=False)
909 Log("reboot with type '%s'", reboot_type, indent=2)
910 ops.append(op)
911 self.ExecOrQueue(instance, ops)
912
913 @_DoCheckInstances
914 @_DoBatch(True)
925
926 @_DoCheckInstances
927 @_DoBatch(True)
929 """Activate and deactivate disks of the instances."""
930 Log("Activating/deactivating disks")
931 for instance in self.instances:
932 Log("instance %s", instance, indent=1)
933 op_start = self.StartInstanceOp(instance)
934 op_act = opcodes.OpInstanceActivateDisks(instance_name=instance)
935 op_deact = opcodes.OpInstanceDeactivateDisks(instance_name=instance)
936 op_stop = self.StopInstanceOp(instance)
937 Log("activate disks when online", indent=2)
938 Log("activate disks when offline", indent=2)
939 Log("deactivate disks (when offline)", indent=2)
940 self.ExecOrQueue(instance, [op_act, op_stop, op_act, op_deact, op_start])
941
942 @_DoCheckInstances
943 @_DoBatch(False)
945 """Add and remove an extra disk for the instances."""
946 Log("Adding and removing disks")
947 for instance in self.instances:
948 Log("instance %s", instance, indent=1)
949 op_add = opcodes.OpInstanceSetParams(
950 instance_name=instance,
951 disks=[(constants.DDM_ADD, {"size": self.disk_size[0]})])
952 op_rem = opcodes.OpInstanceSetParams(
953 instance_name=instance, disks=[(constants.DDM_REMOVE, {})])
954 op_stop = self.StopInstanceOp(instance)
955 op_start = self.StartInstanceOp(instance)
956 Log("adding a disk", indent=2)
957 Log("removing last disk", indent=2)
958 self.ExecOrQueue(instance, [op_add, op_stop, op_rem, op_start])
959
960 @_DoBatch(False)
962 """Add, change and remove an extra NIC for the instances."""
963 Log("Adding and removing NICs")
964 for instance in self.instances:
965 Log("instance %s", instance, indent=1)
966 op_add = opcodes.OpInstanceSetParams(
967 instance_name=instance, nics=[(constants.DDM_ADD, {})])
968 op_chg = opcodes.OpInstanceSetParams(
969 instance_name=instance, nics=[(constants.DDM_MODIFY,
970 -1, {"mac": constants.VALUE_GENERATE})])
971 op_rem = opcodes.OpInstanceSetParams(
972 instance_name=instance, nics=[(constants.DDM_REMOVE, {})])
973 Log("adding a NIC", indent=2)
974 Log("changing a NIC", indent=2)
975 Log("removing last NIC", indent=2)
976 self.ExecOrQueue(instance, [op_add, op_chg, op_rem])
977
997
999 self.confd_counting_callback.RegisterQuery(req.rsalt)
1000 self.confd_client.SendRequest(req, async=False)
1001 while not self.confd_counting_callback.AllAnswered():
1002 if not self.confd_client.ReceiveReply():
1003 Err("Did not receive all expected confd replies")
1004 break
1005
1034
1036 """Check if an instance is alive by doing http checks.
1037
1038 This will try to retrieve the url on the instance /hostname.txt
1039 and check that it contains the hostname of the instance. In case
1040 we get ECONNREFUSED, we retry up to the net timeout seconds, for
1041 any other error we abort.
1042
1043 """
1044 if not self.opts.http_check:
1045 return
1046 end_time = time.time() + self.opts.net_timeout
1047 url = None
1048 while time.time() < end_time and url is None:
1049 try:
1050 url = self.url_opener.open("http://%s/hostname.txt" % instance)
1051 except IOError:
1052
1053 time.sleep(1)
1054 if url is None:
1055 raise InstanceDown(instance, "Cannot contact instance")
1056 hostname = url.read().strip()
1057 url.close()
1058 if hostname != instance:
1059 raise InstanceDown(instance, ("Hostname mismatch, expected %s, got %s" %
1060 (instance, hostname)))
1061
1170
1173 """Main function.
1174
1175 """
1176 utils.SetupLogging(pathutils.LOG_BURNIN, sys.argv[0],
1177 debug=False, stderr_logging=True)
1178
1179 return Burner().BurninCluster()
1180