Package ganeti :: Module utils
[hide private]
[frames] | no frames]

Source Code for Module ganeti.utils

   1  # 
   2  # 
   3   
   4  # Copyright (C) 2006, 2007, 2010 Google Inc. 
   5  # 
   6  # This program is free software; you can redistribute it and/or modify 
   7  # it under the terms of the GNU General Public License as published by 
   8  # the Free Software Foundation; either version 2 of the License, or 
   9  # (at your option) any later version. 
  10  # 
  11  # This program is distributed in the hope that it will be useful, but 
  12  # WITHOUT ANY WARRANTY; without even the implied warranty of 
  13  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU 
  14  # General Public License for more details. 
  15  # 
  16  # You should have received a copy of the GNU General Public License 
  17  # along with this program; if not, write to the Free Software 
  18  # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 
  19  # 02110-1301, USA. 
  20   
  21   
  22  """Ganeti utility module. 
  23   
  24  This module holds functions that can be used in both daemons (all) and 
  25  the command line scripts. 
  26   
  27  """ 
  28   
  29   
  30  import os 
  31  import sys 
  32  import time 
  33  import subprocess 
  34  import re 
  35  import socket 
  36  import tempfile 
  37  import shutil 
  38  import errno 
  39  import pwd 
  40  import itertools 
  41  import select 
  42  import fcntl 
  43  import resource 
  44  import logging 
  45  import logging.handlers 
  46  import signal 
  47  import OpenSSL 
  48  import datetime 
  49  import calendar 
  50  import hmac 
  51  import collections 
  52   
  53  from cStringIO import StringIO 
  54   
  55  try: 
  56    # pylint: disable-msg=F0401 
  57    import ctypes 
  58  except ImportError: 
  59    ctypes = None 
  60   
  61  from ganeti import errors 
  62  from ganeti import constants 
  63  from ganeti import compat 
  64   
  65   
  66  _locksheld = [] 
  67  _re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$') 
  68   
  69  debug_locks = False 
  70   
  71  #: when set to True, L{RunCmd} is disabled 
  72  no_fork = False 
  73   
  74  _RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid" 
  75   
  76  HEX_CHAR_RE = r"[a-zA-Z0-9]" 
  77  VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S) 
  78  X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" % 
  79                              (re.escape(constants.X509_CERT_SIGNATURE_HEADER), 
  80                               HEX_CHAR_RE, HEX_CHAR_RE), 
  81                              re.S | re.I) 
  82   
  83  _VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$") 
  84   
  85  UUID_RE = re.compile('^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-' 
  86                       '[a-f0-9]{4}-[a-f0-9]{12}$') 
  87   
  88  # Certificate verification results 
  89  (CERT_WARNING, 
  90   CERT_ERROR) = range(1, 3) 
  91   
  92  # Flags for mlockall() (from bits/mman.h) 
  93  _MCL_CURRENT = 1 
  94  _MCL_FUTURE = 2 
  95   
  96  #: MAC checker regexp 
  97  _MAC_CHECK = re.compile("^([0-9a-f]{2}:){5}[0-9a-f]{2}$", re.I) 
98 99 100 -class RunResult(object):
101 """Holds the result of running external programs. 102 103 @type exit_code: int 104 @ivar exit_code: the exit code of the program, or None (if the program 105 didn't exit()) 106 @type signal: int or None 107 @ivar signal: the signal that caused the program to finish, or None 108 (if the program wasn't terminated by a signal) 109 @type stdout: str 110 @ivar stdout: the standard output of the program 111 @type stderr: str 112 @ivar stderr: the standard error of the program 113 @type failed: boolean 114 @ivar failed: True in case the program was 115 terminated by a signal or exited with a non-zero exit code 116 @ivar fail_reason: a string detailing the termination reason 117 118 """ 119 __slots__ = ["exit_code", "signal", "stdout", "stderr", 120 "failed", "fail_reason", "cmd"] 121 122
123 - def __init__(self, exit_code, signal_, stdout, stderr, cmd):
124 self.cmd = cmd 125 self.exit_code = exit_code 126 self.signal = signal_ 127 self.stdout = stdout 128 self.stderr = stderr 129 self.failed = (signal_ is not None or exit_code != 0) 130 131 if self.signal is not None: 132 self.fail_reason = "terminated by signal %s" % self.signal 133 elif self.exit_code is not None: 134 self.fail_reason = "exited with exit code %s" % self.exit_code 135 else: 136 self.fail_reason = "unable to determine termination reason" 137 138 if self.failed: 139 logging.debug("Command '%s' failed (%s); output: %s", 140 self.cmd, self.fail_reason, self.output)
141
142 - def _GetOutput(self):
143 """Returns the combined stdout and stderr for easier usage. 144 145 """ 146 return self.stdout + self.stderr
147 148 output = property(_GetOutput, None, None, "Return full output")
149
150 151 -def _BuildCmdEnvironment(env, reset):
152 """Builds the environment for an external program. 153 154 """ 155 if reset: 156 cmd_env = {} 157 else: 158 cmd_env = os.environ.copy() 159 cmd_env["LC_ALL"] = "C" 160 161 if env is not None: 162 cmd_env.update(env) 163 164 return cmd_env
165
166 167 -def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False, 168 interactive=False):
169 """Execute a (shell) command. 170 171 The command should not read from its standard input, as it will be 172 closed. 173 174 @type cmd: string or list 175 @param cmd: Command to run 176 @type env: dict 177 @param env: Additional environment variables 178 @type output: str 179 @param output: if desired, the output of the command can be 180 saved in a file instead of the RunResult instance; this 181 parameter denotes the file name (if not None) 182 @type cwd: string 183 @param cwd: if specified, will be used as the working 184 directory for the command; the default will be / 185 @type reset_env: boolean 186 @param reset_env: whether to reset or keep the default os environment 187 @type interactive: boolean 188 @param interactive: weather we pipe stdin, stdout and stderr 189 (default behaviour) or run the command interactive 190 @rtype: L{RunResult} 191 @return: RunResult instance 192 @raise errors.ProgrammerError: if we call this when forks are disabled 193 194 """ 195 if no_fork: 196 raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled") 197 198 if output and interactive: 199 raise errors.ProgrammerError("Parameters 'output' and 'interactive' can" 200 " not be provided at the same time") 201 202 if isinstance(cmd, basestring): 203 strcmd = cmd 204 shell = True 205 else: 206 cmd = [str(val) for val in cmd] 207 strcmd = ShellQuoteArgs(cmd) 208 shell = False 209 210 if output: 211 logging.debug("RunCmd %s, output file '%s'", strcmd, output) 212 else: 213 logging.debug("RunCmd %s", strcmd) 214 215 cmd_env = _BuildCmdEnvironment(env, reset_env) 216 217 try: 218 if output is None: 219 out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd, interactive) 220 else: 221 status = _RunCmdFile(cmd, cmd_env, shell, output, cwd) 222 out = err = "" 223 except OSError, err: 224 if err.errno == errno.ENOENT: 225 raise errors.OpExecError("Can't execute '%s': not found (%s)" % 226 (strcmd, err)) 227 else: 228 raise 229 230 if status >= 0: 231 exitcode = status 232 signal_ = None 233 else: 234 exitcode = None 235 signal_ = -status 236 237 return RunResult(exitcode, signal_, out, err, strcmd)
238
239 240 -def SetupDaemonEnv(cwd="/", umask=077):
241 """Setup a daemon's environment. 242 243 This should be called between the first and second fork, due to 244 setsid usage. 245 246 @param cwd: the directory to which to chdir 247 @param umask: the umask to setup 248 249 """ 250 os.chdir(cwd) 251 os.umask(umask) 252 os.setsid()
253
254 255 -def SetupDaemonFDs(output_file, output_fd):
256 """Setups up a daemon's file descriptors. 257 258 @param output_file: if not None, the file to which to redirect 259 stdout/stderr 260 @param output_fd: if not None, the file descriptor for stdout/stderr 261 262 """ 263 # check that at most one is defined 264 assert [output_file, output_fd].count(None) >= 1 265 266 # Open /dev/null (read-only, only for stdin) 267 devnull_fd = os.open(os.devnull, os.O_RDONLY) 268 269 if output_fd is not None: 270 pass 271 elif output_file is not None: 272 # Open output file 273 try: 274 output_fd = os.open(output_file, 275 os.O_WRONLY | os.O_CREAT | os.O_APPEND, 0600) 276 except EnvironmentError, err: 277 raise Exception("Opening output file failed: %s" % err) 278 else: 279 output_fd = os.open(os.devnull, os.O_WRONLY) 280 281 # Redirect standard I/O 282 os.dup2(devnull_fd, 0) 283 os.dup2(output_fd, 1) 284 os.dup2(output_fd, 2)
285
286 287 -def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None, 288 pidfile=None):
289 """Start a daemon process after forking twice. 290 291 @type cmd: string or list 292 @param cmd: Command to run 293 @type env: dict 294 @param env: Additional environment variables 295 @type cwd: string 296 @param cwd: Working directory for the program 297 @type output: string 298 @param output: Path to file in which to save the output 299 @type output_fd: int 300 @param output_fd: File descriptor for output 301 @type pidfile: string 302 @param pidfile: Process ID file 303 @rtype: int 304 @return: Daemon process ID 305 @raise errors.ProgrammerError: if we call this when forks are disabled 306 307 """ 308 if no_fork: 309 raise errors.ProgrammerError("utils.StartDaemon() called with fork()" 310 " disabled") 311 312 if output and not (bool(output) ^ (output_fd is not None)): 313 raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be" 314 " specified") 315 316 if isinstance(cmd, basestring): 317 cmd = ["/bin/sh", "-c", cmd] 318 319 strcmd = ShellQuoteArgs(cmd) 320 321 if output: 322 logging.debug("StartDaemon %s, output file '%s'", strcmd, output) 323 else: 324 logging.debug("StartDaemon %s", strcmd) 325 326 cmd_env = _BuildCmdEnvironment(env, False) 327 328 # Create pipe for sending PID back 329 (pidpipe_read, pidpipe_write) = os.pipe() 330 try: 331 try: 332 # Create pipe for sending error messages 333 (errpipe_read, errpipe_write) = os.pipe() 334 try: 335 try: 336 # First fork 337 pid = os.fork() 338 if pid == 0: 339 try: 340 # Child process, won't return 341 _StartDaemonChild(errpipe_read, errpipe_write, 342 pidpipe_read, pidpipe_write, 343 cmd, cmd_env, cwd, 344 output, output_fd, pidfile) 345 finally: 346 # Well, maybe child process failed 347 os._exit(1) # pylint: disable-msg=W0212 348 finally: 349 _CloseFDNoErr(errpipe_write) 350 351 # Wait for daemon to be started (or an error message to 352 # arrive) and read up to 100 KB as an error message 353 errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024) 354 finally: 355 _CloseFDNoErr(errpipe_read) 356 finally: 357 _CloseFDNoErr(pidpipe_write) 358 359 # Read up to 128 bytes for PID 360 pidtext = RetryOnSignal(os.read, pidpipe_read, 128) 361 finally: 362 _CloseFDNoErr(pidpipe_read) 363 364 # Try to avoid zombies by waiting for child process 365 try: 366 os.waitpid(pid, 0) 367 except OSError: 368 pass 369 370 if errormsg: 371 raise errors.OpExecError("Error when starting daemon process: %r" % 372 errormsg) 373 374 try: 375 return int(pidtext) 376 except (ValueError, TypeError), err: 377 raise errors.OpExecError("Error while trying to parse PID %r: %s" % 378 (pidtext, err))
379
380 381 -def _StartDaemonChild(errpipe_read, errpipe_write, 382 pidpipe_read, pidpipe_write, 383 args, env, cwd, 384 output, fd_output, pidfile):
385 """Child process for starting daemon. 386 387 """ 388 try: 389 # Close parent's side 390 _CloseFDNoErr(errpipe_read) 391 _CloseFDNoErr(pidpipe_read) 392 393 # First child process 394 SetupDaemonEnv() 395 396 # And fork for the second time 397 pid = os.fork() 398 if pid != 0: 399 # Exit first child process 400 os._exit(0) # pylint: disable-msg=W0212 401 402 # Make sure pipe is closed on execv* (and thereby notifies 403 # original process) 404 SetCloseOnExecFlag(errpipe_write, True) 405 406 # List of file descriptors to be left open 407 noclose_fds = [errpipe_write] 408 409 # Open PID file 410 if pidfile: 411 fd_pidfile = WritePidFile(pidfile) 412 413 # Keeping the file open to hold the lock 414 noclose_fds.append(fd_pidfile) 415 416 SetCloseOnExecFlag(fd_pidfile, False) 417 else: 418 fd_pidfile = None 419 420 SetupDaemonFDs(output, fd_output) 421 422 # Send daemon PID to parent 423 RetryOnSignal(os.write, pidpipe_write, str(os.getpid())) 424 425 # Close all file descriptors except stdio and error message pipe 426 CloseFDs(noclose_fds=noclose_fds) 427 428 # Change working directory 429 os.chdir(cwd) 430 431 if env is None: 432 os.execvp(args[0], args) 433 else: 434 os.execvpe(args[0], args, env) 435 except: # pylint: disable-msg=W0702 436 try: 437 # Report errors to original process 438 WriteErrorToFD(errpipe_write, str(sys.exc_info()[1])) 439 except: # pylint: disable-msg=W0702 440 # Ignore errors in error handling 441 pass 442 443 os._exit(1) # pylint: disable-msg=W0212
444
445 446 -def WriteErrorToFD(fd, err):
447 """Possibly write an error message to a fd. 448 449 @type fd: None or int (file descriptor) 450 @param fd: if not None, the error will be written to this fd 451 @param err: string, the error message 452 453 """ 454 if fd is None: 455 return 456 457 if not err: 458 err = "<unknown error>" 459 460 RetryOnSignal(os.write, fd, err)
461
462 463 -def _RunCmdPipe(cmd, env, via_shell, cwd, interactive):
464 """Run a command and return its output. 465 466 @type cmd: string or list 467 @param cmd: Command to run 468 @type env: dict 469 @param env: The environment to use 470 @type via_shell: bool 471 @param via_shell: if we should run via the shell 472 @type cwd: string 473 @param cwd: the working directory for the program 474 @type interactive: boolean 475 @param interactive: Run command interactive (without piping) 476 @rtype: tuple 477 @return: (out, err, status) 478 479 """ 480 poller = select.poll() 481 482 stderr = subprocess.PIPE 483 stdout = subprocess.PIPE 484 stdin = subprocess.PIPE 485 486 if interactive: 487 stderr = stdout = stdin = None 488 489 child = subprocess.Popen(cmd, shell=via_shell, 490 stderr=stderr, 491 stdout=stdout, 492 stdin=stdin, 493 close_fds=True, env=env, 494 cwd=cwd) 495 496 out = StringIO() 497 err = StringIO() 498 if not interactive: 499 child.stdin.close() 500 poller.register(child.stdout, select.POLLIN) 501 poller.register(child.stderr, select.POLLIN) 502 fdmap = { 503 child.stdout.fileno(): (out, child.stdout), 504 child.stderr.fileno(): (err, child.stderr), 505 } 506 for fd in fdmap: 507 SetNonblockFlag(fd, True) 508 509 while fdmap: 510 pollresult = RetryOnSignal(poller.poll) 511 512 for fd, event in pollresult: 513 if event & select.POLLIN or event & select.POLLPRI: 514 data = fdmap[fd][1].read() 515 # no data from read signifies EOF (the same as POLLHUP) 516 if not data: 517 poller.unregister(fd) 518 del fdmap[fd] 519 continue 520 fdmap[fd][0].write(data) 521 if (event & select.POLLNVAL or event & select.POLLHUP or 522 event & select.POLLERR): 523 poller.unregister(fd) 524 del fdmap[fd] 525 526 out = out.getvalue() 527 err = err.getvalue() 528 529 status = child.wait() 530 return out, err, status
531
532 533 -def _RunCmdFile(cmd, env, via_shell, output, cwd):
534 """Run a command and save its output to a file. 535 536 @type cmd: string or list 537 @param cmd: Command to run 538 @type env: dict 539 @param env: The environment to use 540 @type via_shell: bool 541 @param via_shell: if we should run via the shell 542 @type output: str 543 @param output: the filename in which to save the output 544 @type cwd: string 545 @param cwd: the working directory for the program 546 @rtype: int 547 @return: the exit status 548 549 """ 550 fh = open(output, "a") 551 try: 552 child = subprocess.Popen(cmd, shell=via_shell, 553 stderr=subprocess.STDOUT, 554 stdout=fh, 555 stdin=subprocess.PIPE, 556 close_fds=True, env=env, 557 cwd=cwd) 558 559 child.stdin.close() 560 status = child.wait() 561 finally: 562 fh.close() 563 return status
564
565 566 -def SetCloseOnExecFlag(fd, enable):
567 """Sets or unsets the close-on-exec flag on a file descriptor. 568 569 @type fd: int 570 @param fd: File descriptor 571 @type enable: bool 572 @param enable: Whether to set or unset it. 573 574 """ 575 flags = fcntl.fcntl(fd, fcntl.F_GETFD) 576 577 if enable: 578 flags |= fcntl.FD_CLOEXEC 579 else: 580 flags &= ~fcntl.FD_CLOEXEC 581 582 fcntl.fcntl(fd, fcntl.F_SETFD, flags)
583
584 585 -def SetNonblockFlag(fd, enable):
586 """Sets or unsets the O_NONBLOCK flag on on a file descriptor. 587 588 @type fd: int 589 @param fd: File descriptor 590 @type enable: bool 591 @param enable: Whether to set or unset it 592 593 """ 594 flags = fcntl.fcntl(fd, fcntl.F_GETFL) 595 596 if enable: 597 flags |= os.O_NONBLOCK 598 else: 599 flags &= ~os.O_NONBLOCK 600 601 fcntl.fcntl(fd, fcntl.F_SETFL, flags)
602
603 604 -def RetryOnSignal(fn, *args, **kwargs):
605 """Calls a function again if it failed due to EINTR. 606 607 """ 608 while True: 609 try: 610 return fn(*args, **kwargs) 611 except EnvironmentError, err: 612 if err.errno != errno.EINTR: 613 raise 614 except (socket.error, select.error), err: 615 # In python 2.6 and above select.error is an IOError, so it's handled 616 # above, in 2.5 and below it's not, and it's handled here. 617 if not (err.args and err.args[0] == errno.EINTR): 618 raise
619
620 621 -def RunParts(dir_name, env=None, reset_env=False):
622 """Run Scripts or programs in a directory 623 624 @type dir_name: string 625 @param dir_name: absolute path to a directory 626 @type env: dict 627 @param env: The environment to use 628 @type reset_env: boolean 629 @param reset_env: whether to reset or keep the default os environment 630 @rtype: list of tuples 631 @return: list of (name, (one of RUNDIR_STATUS), RunResult) 632 633 """ 634 rr = [] 635 636 try: 637 dir_contents = ListVisibleFiles(dir_name) 638 except OSError, err: 639 logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err) 640 return rr 641 642 for relname in sorted(dir_contents): 643 fname = PathJoin(dir_name, relname) 644 if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and 645 constants.EXT_PLUGIN_MASK.match(relname) is not None): 646 rr.append((relname, constants.RUNPARTS_SKIP, None)) 647 else: 648 try: 649 result = RunCmd([fname], env=env, reset_env=reset_env) 650 except Exception, err: # pylint: disable-msg=W0703 651 rr.append((relname, constants.RUNPARTS_ERR, str(err))) 652 else: 653 rr.append((relname, constants.RUNPARTS_RUN, result)) 654 655 return rr
656
657 658 -def RemoveFile(filename):
659 """Remove a file ignoring some errors. 660 661 Remove a file, ignoring non-existing ones or directories. Other 662 errors are passed. 663 664 @type filename: str 665 @param filename: the file to be removed 666 667 """ 668 try: 669 os.unlink(filename) 670 except OSError, err: 671 if err.errno not in (errno.ENOENT, errno.EISDIR): 672 raise
673
674 675 -def RemoveDir(dirname):
676 """Remove an empty directory. 677 678 Remove a directory, ignoring non-existing ones. 679 Other errors are passed. This includes the case, 680 where the directory is not empty, so it can't be removed. 681 682 @type dirname: str 683 @param dirname: the empty directory to be removed 684 685 """ 686 try: 687 os.rmdir(dirname) 688 except OSError, err: 689 if err.errno != errno.ENOENT: 690 raise
691
692 693 -def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
694 """Renames a file. 695 696 @type old: string 697 @param old: Original path 698 @type new: string 699 @param new: New path 700 @type mkdir: bool 701 @param mkdir: Whether to create target directory if it doesn't exist 702 @type mkdir_mode: int 703 @param mkdir_mode: Mode for newly created directories 704 705 """ 706 try: 707 return os.rename(old, new) 708 except OSError, err: 709 # In at least one use case of this function, the job queue, directory 710 # creation is very rare. Checking for the directory before renaming is not 711 # as efficient. 712 if mkdir and err.errno == errno.ENOENT: 713 # Create directory and try again 714 Makedirs(os.path.dirname(new), mode=mkdir_mode) 715 716 return os.rename(old, new) 717 718 raise
719
720 721 -def Makedirs(path, mode=0750):
722 """Super-mkdir; create a leaf directory and all intermediate ones. 723 724 This is a wrapper around C{os.makedirs} adding error handling not implemented 725 before Python 2.5. 726 727 """ 728 try: 729 os.makedirs(path, mode) 730 except OSError, err: 731 # Ignore EEXIST. This is only handled in os.makedirs as included in 732 # Python 2.5 and above. 733 if err.errno != errno.EEXIST or not os.path.exists(path): 734 raise
735
736 737 -def ResetTempfileModule():
738 """Resets the random name generator of the tempfile module. 739 740 This function should be called after C{os.fork} in the child process to 741 ensure it creates a newly seeded random generator. Otherwise it would 742 generate the same random parts as the parent process. If several processes 743 race for the creation of a temporary file, this could lead to one not getting 744 a temporary name. 745 746 """ 747 # pylint: disable-msg=W0212 748 if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"): 749 tempfile._once_lock.acquire() 750 try: 751 # Reset random name generator 752 tempfile._name_sequence = None 753 finally: 754 tempfile._once_lock.release() 755 else: 756 logging.critical("The tempfile module misses at least one of the" 757 " '_once_lock' and '_name_sequence' attributes")
758
759 760 -def _FingerprintFile(filename):
761 """Compute the fingerprint of a file. 762 763 If the file does not exist, a None will be returned 764 instead. 765 766 @type filename: str 767 @param filename: the filename to checksum 768 @rtype: str 769 @return: the hex digest of the sha checksum of the contents 770 of the file 771 772 """ 773 if not (os.path.exists(filename) and os.path.isfile(filename)): 774 return None 775 776 f = open(filename) 777 778 fp = compat.sha1_hash() 779 while True: 780 data = f.read(4096) 781 if not data: 782 break 783 784 fp.update(data) 785 786 return fp.hexdigest()
787
788 789 -def FingerprintFiles(files):
790 """Compute fingerprints for a list of files. 791 792 @type files: list 793 @param files: the list of filename to fingerprint 794 @rtype: dict 795 @return: a dictionary filename: fingerprint, holding only 796 existing files 797 798 """ 799 ret = {} 800 801 for filename in files: 802 cksum = _FingerprintFile(filename) 803 if cksum: 804 ret[filename] = cksum 805 806 return ret
807
808 809 -def ForceDictType(target, key_types, allowed_values=None):
810 """Force the values of a dict to have certain types. 811 812 @type target: dict 813 @param target: the dict to update 814 @type key_types: dict 815 @param key_types: dict mapping target dict keys to types 816 in constants.ENFORCEABLE_TYPES 817 @type allowed_values: list 818 @keyword allowed_values: list of specially allowed values 819 820 """ 821 if allowed_values is None: 822 allowed_values = [] 823 824 if not isinstance(target, dict): 825 msg = "Expected dictionary, got '%s'" % target 826 raise errors.TypeEnforcementError(msg) 827 828 for key in target: 829 if key not in key_types: 830 msg = "Unknown key '%s'" % key 831 raise errors.TypeEnforcementError(msg) 832 833 if target[key] in allowed_values: 834 continue 835 836 ktype = key_types[key] 837 if ktype not in constants.ENFORCEABLE_TYPES: 838 msg = "'%s' has non-enforceable type %s" % (key, ktype) 839 raise errors.ProgrammerError(msg) 840 841 if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING): 842 if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING: 843 pass 844 elif not isinstance(target[key], basestring): 845 if isinstance(target[key], bool) and not target[key]: 846 target[key] = '' 847 else: 848 msg = "'%s' (value %s) is not a valid string" % (key, target[key]) 849 raise errors.TypeEnforcementError(msg) 850 elif ktype == constants.VTYPE_BOOL: 851 if isinstance(target[key], basestring) and target[key]: 852 if target[key].lower() == constants.VALUE_FALSE: 853 target[key] = False 854 elif target[key].lower() == constants.VALUE_TRUE: 855 target[key] = True 856 else: 857 msg = "'%s' (value %s) is not a valid boolean" % (key, target[key]) 858 raise errors.TypeEnforcementError(msg) 859 elif target[key]: 860 target[key] = True 861 else: 862 target[key] = False 863 elif ktype == constants.VTYPE_SIZE: 864 try: 865 target[key] = ParseUnit(target[key]) 866 except errors.UnitParseError, err: 867 msg = "'%s' (value %s) is not a valid size. error: %s" % \ 868 (key, target[key], err) 869 raise errors.TypeEnforcementError(msg) 870 elif ktype == constants.VTYPE_INT: 871 try: 872 target[key] = int(target[key]) 873 except (ValueError, TypeError): 874 msg = "'%s' (value %s) is not a valid integer" % (key, target[key]) 875 raise errors.TypeEnforcementError(msg)
876
877 878 -def _GetProcStatusPath(pid):
879 """Returns the path for a PID's proc status file. 880 881 @type pid: int 882 @param pid: Process ID 883 @rtype: string 884 885 """ 886 return "/proc/%d/status" % pid
887
888 889 -def IsProcessAlive(pid):
890 """Check if a given pid exists on the system. 891 892 @note: zombie status is not handled, so zombie processes 893 will be returned as alive 894 @type pid: int 895 @param pid: the process ID to check 896 @rtype: boolean 897 @return: True if the process exists 898 899 """ 900 def _TryStat(name): 901 try: 902 os.stat(name) 903 return True 904 except EnvironmentError, err: 905 if err.errno in (errno.ENOENT, errno.ENOTDIR): 906 return False 907 elif err.errno == errno.EINVAL: 908 raise RetryAgain(err) 909 raise
910 911 assert isinstance(pid, int), "pid must be an integer" 912 if pid <= 0: 913 return False 914 915 # /proc in a multiprocessor environment can have strange behaviors. 916 # Retry the os.stat a few times until we get a good result. 917 try: 918 return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5, 919 args=[_GetProcStatusPath(pid)]) 920 except RetryTimeout, err: 921 err.RaiseInner() 922
923 924 -def _ParseSigsetT(sigset):
925 """Parse a rendered sigset_t value. 926 927 This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t 928 function. 929 930 @type sigset: string 931 @param sigset: Rendered signal set from /proc/$pid/status 932 @rtype: set 933 @return: Set of all enabled signal numbers 934 935 """ 936 result = set() 937 938 signum = 0 939 for ch in reversed(sigset): 940 chv = int(ch, 16) 941 942 # The following could be done in a loop, but it's easier to read and 943 # understand in the unrolled form 944 if chv & 1: 945 result.add(signum + 1) 946 if chv & 2: 947 result.add(signum + 2) 948 if chv & 4: 949 result.add(signum + 3) 950 if chv & 8: 951 result.add(signum + 4) 952 953 signum += 4 954 955 return result
956
957 958 -def _GetProcStatusField(pstatus, field):
959 """Retrieves a field from the contents of a proc status file. 960 961 @type pstatus: string 962 @param pstatus: Contents of /proc/$pid/status 963 @type field: string 964 @param field: Name of field whose value should be returned 965 @rtype: string 966 967 """ 968 for line in pstatus.splitlines(): 969 parts = line.split(":", 1) 970 971 if len(parts) < 2 or parts[0] != field: 972 continue 973 974 return parts[1].strip() 975 976 return None
977
978 979 -def IsProcessHandlingSignal(pid, signum, status_path=None):
980 """Checks whether a process is handling a signal. 981 982 @type pid: int 983 @param pid: Process ID 984 @type signum: int 985 @param signum: Signal number 986 @rtype: bool 987 988 """ 989 if status_path is None: 990 status_path = _GetProcStatusPath(pid) 991 992 try: 993 proc_status = ReadFile(status_path) 994 except EnvironmentError, err: 995 # In at least one case, reading /proc/$pid/status failed with ESRCH. 996 if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH): 997 return False 998 raise 999 1000 sigcgt = _GetProcStatusField(proc_status, "SigCgt") 1001 if sigcgt is None: 1002 raise RuntimeError("%s is missing 'SigCgt' field" % status_path) 1003 1004 # Now check whether signal is handled 1005 return signum in _ParseSigsetT(sigcgt)
1006
1007 1008 -def ReadPidFile(pidfile):
1009 """Read a pid from a file. 1010 1011 @type pidfile: string 1012 @param pidfile: path to the file containing the pid 1013 @rtype: int 1014 @return: The process id, if the file exists and contains a valid PID, 1015 otherwise 0 1016 1017 """ 1018 try: 1019 raw_data = ReadOneLineFile(pidfile) 1020 except EnvironmentError, err: 1021 if err.errno != errno.ENOENT: 1022 logging.exception("Can't read pid file") 1023 return 0 1024 1025 try: 1026 pid = int(raw_data) 1027 except (TypeError, ValueError), err: 1028 logging.info("Can't parse pid file contents", exc_info=True) 1029 return 0 1030 1031 return pid
1032
1033 1034 -def ReadLockedPidFile(path):
1035 """Reads a locked PID file. 1036 1037 This can be used together with L{StartDaemon}. 1038 1039 @type path: string 1040 @param path: Path to PID file 1041 @return: PID as integer or, if file was unlocked or couldn't be opened, None 1042 1043 """ 1044 try: 1045 fd = os.open(path, os.O_RDONLY) 1046 except EnvironmentError, err: 1047 if err.errno == errno.ENOENT: 1048 # PID file doesn't exist 1049 return None 1050 raise 1051 1052 try: 1053 try: 1054 # Try to acquire lock 1055 LockFile(fd) 1056 except errors.LockError: 1057 # Couldn't lock, daemon is running 1058 return int(os.read(fd, 100)) 1059 finally: 1060 os.close(fd) 1061 1062 return None
1063
1064 1065 -def MatchNameComponent(key, name_list, case_sensitive=True):
1066 """Try to match a name against a list. 1067 1068 This function will try to match a name like test1 against a list 1069 like C{['test1.example.com', 'test2.example.com', ...]}. Against 1070 this list, I{'test1'} as well as I{'test1.example'} will match, but 1071 not I{'test1.ex'}. A multiple match will be considered as no match 1072 at all (e.g. I{'test1'} against C{['test1.example.com', 1073 'test1.example.org']}), except when the key fully matches an entry 1074 (e.g. I{'test1'} against C{['test1', 'test1.example.com']}). 1075 1076 @type key: str 1077 @param key: the name to be searched 1078 @type name_list: list 1079 @param name_list: the list of strings against which to search the key 1080 @type case_sensitive: boolean 1081 @param case_sensitive: whether to provide a case-sensitive match 1082 1083 @rtype: None or str 1084 @return: None if there is no match I{or} if there are multiple matches, 1085 otherwise the element from the list which matches 1086 1087 """ 1088 if key in name_list: 1089 return key 1090 1091 re_flags = 0 1092 if not case_sensitive: 1093 re_flags |= re.IGNORECASE 1094 key = key.upper() 1095 mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags) 1096 names_filtered = [] 1097 string_matches = [] 1098 for name in name_list: 1099 if mo.match(name) is not None: 1100 names_filtered.append(name) 1101 if not case_sensitive and key == name.upper(): 1102 string_matches.append(name) 1103 1104 if len(string_matches) == 1: 1105 return string_matches[0] 1106 if len(names_filtered) == 1: 1107 return names_filtered[0] 1108 return None
1109
1110 1111 -def ValidateServiceName(name):
1112 """Validate the given service name. 1113 1114 @type name: number or string 1115 @param name: Service name or port specification 1116 1117 """ 1118 try: 1119 numport = int(name) 1120 except (ValueError, TypeError): 1121 # Non-numeric service name 1122 valid = _VALID_SERVICE_NAME_RE.match(name) 1123 else: 1124 # Numeric port (protocols other than TCP or UDP might need adjustments 1125 # here) 1126 valid = (numport >= 0 and numport < (1 << 16)) 1127 1128 if not valid: 1129 raise errors.OpPrereqError("Invalid service name '%s'" % name, 1130 errors.ECODE_INVAL) 1131 1132 return name
1133
1134 1135 -def ListVolumeGroups():
1136 """List volume groups and their size 1137 1138 @rtype: dict 1139 @return: 1140 Dictionary with keys volume name and values 1141 the size of the volume 1142 1143 """ 1144 command = "vgs --noheadings --units m --nosuffix -o name,size" 1145 result = RunCmd(command) 1146 retval = {} 1147 if result.failed: 1148 return retval 1149 1150 for line in result.stdout.splitlines(): 1151 try: 1152 name, size = line.split() 1153 size = int(float(size)) 1154 except (IndexError, ValueError), err: 1155 logging.error("Invalid output from vgs (%s): %s", err, line) 1156 continue 1157 1158 retval[name] = size 1159 1160 return retval
1161
1162 1163 -def BridgeExists(bridge):
1164 """Check whether the given bridge exists in the system 1165 1166 @type bridge: str 1167 @param bridge: the bridge name to check 1168 @rtype: boolean 1169 @return: True if it does 1170 1171 """ 1172 return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1173
1174 1175 -def NiceSort(name_list):
1176 """Sort a list of strings based on digit and non-digit groupings. 1177 1178 Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function 1179 will sort the list in the logical order C{['a1', 'a2', 'a10', 1180 'a11']}. 1181 1182 The sort algorithm breaks each name in groups of either only-digits 1183 or no-digits. Only the first eight such groups are considered, and 1184 after that we just use what's left of the string. 1185 1186 @type name_list: list 1187 @param name_list: the names to be sorted 1188 @rtype: list 1189 @return: a copy of the name list sorted with our algorithm 1190 1191 """ 1192 _SORTER_BASE = "(\D+|\d+)" 1193 _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE, 1194 _SORTER_BASE, _SORTER_BASE, 1195 _SORTER_BASE, _SORTER_BASE, 1196 _SORTER_BASE, _SORTER_BASE) 1197 _SORTER_RE = re.compile(_SORTER_FULL) 1198 _SORTER_NODIGIT = re.compile("^\D*$") 1199 def _TryInt(val): 1200 """Attempts to convert a variable to integer.""" 1201 if val is None or _SORTER_NODIGIT.match(val): 1202 return val 1203 rval = int(val) 1204 return rval
1205 1206 to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name) 1207 for name in name_list] 1208 to_sort.sort() 1209 return [tup[1] for tup in to_sort] 1210
1211 1212 -def TryConvert(fn, val):
1213 """Try to convert a value ignoring errors. 1214 1215 This function tries to apply function I{fn} to I{val}. If no 1216 C{ValueError} or C{TypeError} exceptions are raised, it will return 1217 the result, else it will return the original value. Any other 1218 exceptions are propagated to the caller. 1219 1220 @type fn: callable 1221 @param fn: function to apply to the value 1222 @param val: the value to be converted 1223 @return: The converted value if the conversion was successful, 1224 otherwise the original value. 1225 1226 """ 1227 try: 1228 nv = fn(val) 1229 except (ValueError, TypeError): 1230 nv = val 1231 return nv
1232
1233 1234 -def IsValidShellParam(word):
1235 """Verifies is the given word is safe from the shell's p.o.v. 1236 1237 This means that we can pass this to a command via the shell and be 1238 sure that it doesn't alter the command line and is passed as such to 1239 the actual command. 1240 1241 Note that we are overly restrictive here, in order to be on the safe 1242 side. 1243 1244 @type word: str 1245 @param word: the word to check 1246 @rtype: boolean 1247 @return: True if the word is 'safe' 1248 1249 """ 1250 return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1251
1252 1253 -def BuildShellCmd(template, *args):
1254 """Build a safe shell command line from the given arguments. 1255 1256 This function will check all arguments in the args list so that they 1257 are valid shell parameters (i.e. they don't contain shell 1258 metacharacters). If everything is ok, it will return the result of 1259 template % args. 1260 1261 @type template: str 1262 @param template: the string holding the template for the 1263 string formatting 1264 @rtype: str 1265 @return: the expanded command line 1266 1267 """ 1268 for word in args: 1269 if not IsValidShellParam(word): 1270 raise errors.ProgrammerError("Shell argument '%s' contains" 1271 " invalid characters" % word) 1272 return template % args
1273
1274 1275 -def FormatUnit(value, units):
1276 """Formats an incoming number of MiB with the appropriate unit. 1277 1278 @type value: int 1279 @param value: integer representing the value in MiB (1048576) 1280 @type units: char 1281 @param units: the type of formatting we should do: 1282 - 'h' for automatic scaling 1283 - 'm' for MiBs 1284 - 'g' for GiBs 1285 - 't' for TiBs 1286 @rtype: str 1287 @return: the formatted value (with suffix) 1288 1289 """ 1290 if units not in ('m', 'g', 't', 'h'): 1291 raise errors.ProgrammerError("Invalid unit specified '%s'" % str(units)) 1292 1293 suffix = '' 1294 1295 if units == 'm' or (units == 'h' and value < 1024): 1296 if units == 'h': 1297 suffix = 'M' 1298 return "%d%s" % (round(value, 0), suffix) 1299 1300 elif units == 'g' or (units == 'h' and value < (1024 * 1024)): 1301 if units == 'h': 1302 suffix = 'G' 1303 return "%0.1f%s" % (round(float(value) / 1024, 1), suffix) 1304 1305 else: 1306 if units == 'h': 1307 suffix = 'T' 1308 return "%0.1f%s" % (round(float(value) / 1024 / 1024, 1), suffix)
1309
1310 1311 -def ParseUnit(input_string):
1312 """Tries to extract number and scale from the given string. 1313 1314 Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE* 1315 [UNIT]}. If no unit is specified, it defaults to MiB. Return value 1316 is always an int in MiB. 1317 1318 """ 1319 m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string)) 1320 if not m: 1321 raise errors.UnitParseError("Invalid format") 1322 1323 value = float(m.groups()[0]) 1324 1325 unit = m.groups()[1] 1326 if unit: 1327 lcunit = unit.lower() 1328 else: 1329 lcunit = 'm' 1330 1331 if lcunit in ('m', 'mb', 'mib'): 1332 # Value already in MiB 1333 pass 1334 1335 elif lcunit in ('g', 'gb', 'gib'): 1336 value *= 1024 1337 1338 elif lcunit in ('t', 'tb', 'tib'): 1339 value *= 1024 * 1024 1340 1341 else: 1342 raise errors.UnitParseError("Unknown unit: %s" % unit) 1343 1344 # Make sure we round up 1345 if int(value) < value: 1346 value += 1 1347 1348 # Round up to the next multiple of 4 1349 value = int(value) 1350 if value % 4: 1351 value += 4 - value % 4 1352 1353 return value
1354
1355 1356 -def ParseCpuMask(cpu_mask):
1357 """Parse a CPU mask definition and return the list of CPU IDs. 1358 1359 CPU mask format: comma-separated list of CPU IDs 1360 or dash-separated ID ranges 1361 Example: "0-2,5" -> "0,1,2,5" 1362 1363 @type cpu_mask: str 1364 @param cpu_mask: CPU mask definition 1365 @rtype: list of int 1366 @return: list of CPU IDs 1367 1368 """ 1369 if not cpu_mask: 1370 return [] 1371 cpu_list = [] 1372 for range_def in cpu_mask.split(","): 1373 boundaries = range_def.split("-") 1374 n_elements = len(boundaries) 1375 if n_elements > 2: 1376 raise errors.ParseError("Invalid CPU ID range definition" 1377 " (only one hyphen allowed): %s" % range_def) 1378 try: 1379 lower = int(boundaries[0]) 1380 except (ValueError, TypeError), err: 1381 raise errors.ParseError("Invalid CPU ID value for lower boundary of" 1382 " CPU ID range: %s" % str(err)) 1383 try: 1384 higher = int(boundaries[-1]) 1385 except (ValueError, TypeError), err: 1386 raise errors.ParseError("Invalid CPU ID value for higher boundary of" 1387 " CPU ID range: %s" % str(err)) 1388 if lower > higher: 1389 raise errors.ParseError("Invalid CPU ID range definition" 1390 " (%d > %d): %s" % (lower, higher, range_def)) 1391 cpu_list.extend(range(lower, higher + 1)) 1392 return cpu_list
1393
1394 1395 -def AddAuthorizedKey(file_obj, key):
1396 """Adds an SSH public key to an authorized_keys file. 1397 1398 @type file_obj: str or file handle 1399 @param file_obj: path to authorized_keys file 1400 @type key: str 1401 @param key: string containing key 1402 1403 """ 1404 key_fields = key.split() 1405 1406 if isinstance(file_obj, basestring): 1407 f = open(file_obj, 'a+') 1408 else: 1409 f = file_obj 1410 1411 try: 1412 nl = True 1413 for line in f: 1414 # Ignore whitespace changes 1415 if line.split() == key_fields: 1416 break 1417 nl = line.endswith('\n') 1418 else: 1419 if not nl: 1420 f.write("\n") 1421 f.write(key.rstrip('\r\n')) 1422 f.write("\n") 1423 f.flush() 1424 finally: 1425 f.close()
1426
1427 1428 -def RemoveAuthorizedKey(file_name, key):
1429 """Removes an SSH public key from an authorized_keys file. 1430 1431 @type file_name: str 1432 @param file_name: path to authorized_keys file 1433 @type key: str 1434 @param key: string containing key 1435 1436 """ 1437 key_fields = key.split() 1438 1439 fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name)) 1440 try: 1441 out = os.fdopen(fd, 'w') 1442 try: 1443 f = open(file_name, 'r') 1444 try: 1445 for line in f: 1446 # Ignore whitespace changes while comparing lines 1447 if line.split() != key_fields: 1448 out.write(line) 1449 1450 out.flush() 1451 os.rename(tmpname, file_name) 1452 finally: 1453 f.close() 1454 finally: 1455 out.close() 1456 except: 1457 RemoveFile(tmpname) 1458 raise
1459
1460 1461 -def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1462 """Sets the name of an IP address and hostname in /etc/hosts. 1463 1464 @type file_name: str 1465 @param file_name: path to the file to modify (usually C{/etc/hosts}) 1466 @type ip: str 1467 @param ip: the IP address 1468 @type hostname: str 1469 @param hostname: the hostname to be added 1470 @type aliases: list 1471 @param aliases: the list of aliases to add for the hostname 1472 1473 """ 1474 # Ensure aliases are unique 1475 aliases = UniqueSequence([hostname] + aliases)[1:] 1476 1477 def _WriteEtcHosts(fd): 1478 # Duplicating file descriptor because os.fdopen's result will automatically 1479 # close the descriptor, but we would still like to have its functionality. 1480 out = os.fdopen(os.dup(fd), "w") 1481 try: 1482 for line in ReadFile(file_name).splitlines(True): 1483 fields = line.split() 1484 if fields and not fields[0].startswith("#") and ip == fields[0]: 1485 continue 1486 out.write(line) 1487 1488 out.write("%s\t%s" % (ip, hostname)) 1489 if aliases: 1490 out.write(" %s" % " ".join(aliases)) 1491 out.write("\n") 1492 out.flush() 1493 finally: 1494 out.close()
1495 1496 WriteFile(file_name, fn=_WriteEtcHosts, mode=0644) 1497
1498 1499 -def AddHostToEtcHosts(hostname, ip):
1500 """Wrapper around SetEtcHostsEntry. 1501 1502 @type hostname: str 1503 @param hostname: a hostname that will be resolved and added to 1504 L{constants.ETC_HOSTS} 1505 @type ip: str 1506 @param ip: The ip address of the host 1507 1508 """ 1509 SetEtcHostsEntry(constants.ETC_HOSTS, ip, hostname, [hostname.split(".")[0]])
1510
1511 1512 -def RemoveEtcHostsEntry(file_name, hostname):
1513 """Removes a hostname from /etc/hosts. 1514 1515 IP addresses without names are removed from the file. 1516 1517 @type file_name: str 1518 @param file_name: path to the file to modify (usually C{/etc/hosts}) 1519 @type hostname: str 1520 @param hostname: the hostname to be removed 1521 1522 """ 1523 def _WriteEtcHosts(fd): 1524 # Duplicating file descriptor because os.fdopen's result will automatically 1525 # close the descriptor, but we would still like to have its functionality. 1526 out = os.fdopen(os.dup(fd), "w") 1527 try: 1528 for line in ReadFile(file_name).splitlines(True): 1529 fields = line.split() 1530 if len(fields) > 1 and not fields[0].startswith("#"): 1531 names = fields[1:] 1532 if hostname in names: 1533 while hostname in names: 1534 names.remove(hostname) 1535 if names: 1536 out.write("%s %s\n" % (fields[0], " ".join(names))) 1537 continue 1538 1539 out.write(line) 1540 1541 out.flush() 1542 finally: 1543 out.close()
1544 1545 WriteFile(file_name, fn=_WriteEtcHosts, mode=0644) 1546
1547 1548 -def RemoveHostFromEtcHosts(hostname):
1549 """Wrapper around RemoveEtcHostsEntry. 1550 1551 @type hostname: str 1552 @param hostname: hostname that will be resolved and its 1553 full and shot name will be removed from 1554 L{constants.ETC_HOSTS} 1555 1556 """ 1557 RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname) 1558 RemoveEtcHostsEntry(constants.ETC_HOSTS, hostname.split(".")[0])
1559
1560 1561 -def TimestampForFilename():
1562 """Returns the current time formatted for filenames. 1563 1564 The format doesn't contain colons as some shells and applications them as 1565 separators. 1566 1567 """ 1568 return time.strftime("%Y-%m-%d_%H_%M_%S")
1569
1570 1571 -def CreateBackup(file_name):
1572 """Creates a backup of a file. 1573 1574 @type file_name: str 1575 @param file_name: file to be backed up 1576 @rtype: str 1577 @return: the path to the newly created backup 1578 @raise errors.ProgrammerError: for invalid file names 1579 1580 """ 1581 if not os.path.isfile(file_name): 1582 raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" % 1583 file_name) 1584 1585 prefix = ("%s.backup-%s." % 1586 (os.path.basename(file_name), TimestampForFilename())) 1587 dir_name = os.path.dirname(file_name) 1588 1589 fsrc = open(file_name, 'rb') 1590 try: 1591 (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name) 1592 fdst = os.fdopen(fd, 'wb') 1593 try: 1594 logging.debug("Backing up %s at %s", file_name, backup_name) 1595 shutil.copyfileobj(fsrc, fdst) 1596 finally: 1597 fdst.close() 1598 finally: 1599 fsrc.close() 1600 1601 return backup_name
1602
1603 1604 -def ShellQuote(value):
1605 """Quotes shell argument according to POSIX. 1606 1607 @type value: str 1608 @param value: the argument to be quoted 1609 @rtype: str 1610 @return: the quoted value 1611 1612 """ 1613 if _re_shell_unquoted.match(value): 1614 return value 1615 else: 1616 return "'%s'" % value.replace("'", "'\\''")
1617
1618 1619 -def ShellQuoteArgs(args):
1620 """Quotes a list of shell arguments. 1621 1622 @type args: list 1623 @param args: list of arguments to be quoted 1624 @rtype: str 1625 @return: the quoted arguments concatenated with spaces 1626 1627 """ 1628 return ' '.join([ShellQuote(i) for i in args])
1629
1630 1631 -class ShellWriter:
1632 """Helper class to write scripts with indentation. 1633 1634 """ 1635 INDENT_STR = " " 1636
1637 - def __init__(self, fh):
1638 """Initializes this class. 1639 1640 """ 1641 self._fh = fh 1642 self._indent = 0
1643
1644 - def IncIndent(self):
1645 """Increase indentation level by 1. 1646 1647 """ 1648 self._indent += 1
1649
1650 - def DecIndent(self):
1651 """Decrease indentation level by 1. 1652 1653 """ 1654 assert self._indent > 0 1655 self._indent -= 1
1656
1657 - def Write(self, txt, *args):
1658 """Write line to output file. 1659 1660 """ 1661 assert self._indent >= 0 1662 1663 self._fh.write(self._indent * self.INDENT_STR) 1664 1665 if args: 1666 self._fh.write(txt % args) 1667 else: 1668 self._fh.write(txt) 1669 1670 self._fh.write("\n")
1671
1672 1673 -def ListVisibleFiles(path):
1674 """Returns a list of visible files in a directory. 1675 1676 @type path: str 1677 @param path: the directory to enumerate 1678 @rtype: list 1679 @return: the list of all files not starting with a dot 1680 @raise ProgrammerError: if L{path} is not an absolue and normalized path 1681 1682 """ 1683 if not IsNormAbsPath(path): 1684 raise errors.ProgrammerError("Path passed to ListVisibleFiles is not" 1685 " absolute/normalized: '%s'" % path) 1686 files = [i for i in os.listdir(path) if not i.startswith(".")] 1687 return files
1688
1689 1690 -def GetHomeDir(user, default=None):
1691 """Try to get the homedir of the given user. 1692 1693 The user can be passed either as a string (denoting the name) or as 1694 an integer (denoting the user id). If the user is not found, the 1695 'default' argument is returned, which defaults to None. 1696 1697 """ 1698 try: 1699 if isinstance(user, basestring): 1700 result = pwd.getpwnam(user) 1701 elif isinstance(user, (int, long)): 1702 result = pwd.getpwuid(user) 1703 else: 1704 raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" % 1705 type(user)) 1706 except KeyError: 1707 return default 1708 return result.pw_dir
1709
1710 1711 -def NewUUID():
1712 """Returns a random UUID. 1713 1714 @note: This is a Linux-specific method as it uses the /proc 1715 filesystem. 1716 @rtype: str 1717 1718 """ 1719 return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1720
1721 1722 -def GenerateSecret(numbytes=20):
1723 """Generates a random secret. 1724 1725 This will generate a pseudo-random secret returning an hex string 1726 (so that it can be used where an ASCII string is needed). 1727 1728 @param numbytes: the number of bytes which will be represented by the returned 1729 string (defaulting to 20, the length of a SHA1 hash) 1730 @rtype: str 1731 @return: an hex representation of the pseudo-random sequence 1732 1733 """ 1734 return os.urandom(numbytes).encode('hex')
1735
1736 1737 -def EnsureDirs(dirs):
1738 """Make required directories, if they don't exist. 1739 1740 @param dirs: list of tuples (dir_name, dir_mode) 1741 @type dirs: list of (string, integer) 1742 1743 """ 1744 for dir_name, dir_mode in dirs: 1745 try: 1746 os.mkdir(dir_name, dir_mode) 1747 except EnvironmentError, err: 1748 if err.errno != errno.EEXIST: 1749 raise errors.GenericError("Cannot create needed directory" 1750 " '%s': %s" % (dir_name, err)) 1751 try: 1752 os.chmod(dir_name, dir_mode) 1753 except EnvironmentError, err: 1754 raise errors.GenericError("Cannot change directory permissions on" 1755 " '%s': %s" % (dir_name, err)) 1756 if not os.path.isdir(dir_name): 1757 raise errors.GenericError("%s is not a directory" % dir_name)
1758
1759 1760 -def ReadFile(file_name, size=-1):
1761 """Reads a file. 1762 1763 @type size: int 1764 @param size: Read at most size bytes (if negative, entire file) 1765 @rtype: str 1766 @return: the (possibly partial) content of the file 1767 1768 """ 1769 f = open(file_name, "r") 1770 try: 1771 return f.read(size) 1772 finally: 1773 f.close()
1774
1775 1776 -def WriteFile(file_name, fn=None, data=None, 1777 mode=None, uid=-1, gid=-1, 1778 atime=None, mtime=None, close=True, 1779 dry_run=False, backup=False, 1780 prewrite=None, postwrite=None):
1781 """(Over)write a file atomically. 1782 1783 The file_name and either fn (a function taking one argument, the 1784 file descriptor, and which should write the data to it) or data (the 1785 contents of the file) must be passed. The other arguments are 1786 optional and allow setting the file mode, owner and group, and the 1787 mtime/atime of the file. 1788 1789 If the function doesn't raise an exception, it has succeeded and the 1790 target file has the new contents. If the function has raised an 1791 exception, an existing target file should be unmodified and the 1792 temporary file should be removed. 1793 1794 @type file_name: str 1795 @param file_name: the target filename 1796 @type fn: callable 1797 @param fn: content writing function, called with 1798 file descriptor as parameter 1799 @type data: str 1800 @param data: contents of the file 1801 @type mode: int 1802 @param mode: file mode 1803 @type uid: int 1804 @param uid: the owner of the file 1805 @type gid: int 1806 @param gid: the group of the file 1807 @type atime: int 1808 @param atime: a custom access time to be set on the file 1809 @type mtime: int 1810 @param mtime: a custom modification time to be set on the file 1811 @type close: boolean 1812 @param close: whether to close file after writing it 1813 @type prewrite: callable 1814 @param prewrite: function to be called before writing content 1815 @type postwrite: callable 1816 @param postwrite: function to be called after writing content 1817 1818 @rtype: None or int 1819 @return: None if the 'close' parameter evaluates to True, 1820 otherwise the file descriptor 1821 1822 @raise errors.ProgrammerError: if any of the arguments are not valid 1823 1824 """ 1825 if not os.path.isabs(file_name): 1826 raise errors.ProgrammerError("Path passed to WriteFile is not" 1827 " absolute: '%s'" % file_name) 1828 1829 if [fn, data].count(None) != 1: 1830 raise errors.ProgrammerError("fn or data required") 1831 1832 if [atime, mtime].count(None) == 1: 1833 raise errors.ProgrammerError("Both atime and mtime must be either" 1834 " set or None") 1835 1836 if backup and not dry_run and os.path.isfile(file_name): 1837 CreateBackup(file_name) 1838 1839 dir_name, base_name = os.path.split(file_name) 1840 fd, new_name = tempfile.mkstemp('.new', base_name, dir_name) 1841 do_remove = True 1842 # here we need to make sure we remove the temp file, if any error 1843 # leaves it in place 1844 try: 1845 if uid != -1 or gid != -1: 1846 os.chown(new_name, uid, gid) 1847 if mode: 1848 os.chmod(new_name, mode) 1849 if callable(prewrite): 1850 prewrite(fd) 1851 if data is not None: 1852 os.write(fd, data) 1853 else: 1854 fn(fd) 1855 if callable(postwrite): 1856 postwrite(fd) 1857 os.fsync(fd) 1858 if atime is not None and mtime is not None: 1859 os.utime(new_name, (atime, mtime)) 1860 if not dry_run: 1861 os.rename(new_name, file_name) 1862 do_remove = False 1863 finally: 1864 if close: 1865 os.close(fd) 1866 result = None 1867 else: 1868 result = fd 1869 if do_remove: 1870 RemoveFile(new_name) 1871 1872 return result
1873
1874 1875 -def GetFileID(path=None, fd=None):
1876 """Returns the file 'id', i.e. the dev/inode and mtime information. 1877 1878 Either the path to the file or the fd must be given. 1879 1880 @param path: the file path 1881 @param fd: a file descriptor 1882 @return: a tuple of (device number, inode number, mtime) 1883 1884 """ 1885 if [path, fd].count(None) != 1: 1886 raise errors.ProgrammerError("One and only one of fd/path must be given") 1887 1888 if fd is None: 1889 st = os.stat(path) 1890 else: 1891 st = os.fstat(fd) 1892 1893 return (st.st_dev, st.st_ino, st.st_mtime)
1894
1895 1896 -def VerifyFileID(fi_disk, fi_ours):
1897 """Verifies that two file IDs are matching. 1898 1899 Differences in the inode/device are not accepted, but and older 1900 timestamp for fi_disk is accepted. 1901 1902 @param fi_disk: tuple (dev, inode, mtime) representing the actual 1903 file data 1904 @param fi_ours: tuple (dev, inode, mtime) representing the last 1905 written file data 1906 @rtype: boolean 1907 1908 """ 1909 (d1, i1, m1) = fi_disk 1910 (d2, i2, m2) = fi_ours 1911 1912 return (d1, i1) == (d2, i2) and m1 <= m2
1913
1914 1915 -def SafeWriteFile(file_name, file_id, **kwargs):
1916 """Wraper over L{WriteFile} that locks the target file. 1917 1918 By keeping the target file locked during WriteFile, we ensure that 1919 cooperating writers will safely serialise access to the file. 1920 1921 @type file_name: str 1922 @param file_name: the target filename 1923 @type file_id: tuple 1924 @param file_id: a result from L{GetFileID} 1925 1926 """ 1927 fd = os.open(file_name, os.O_RDONLY | os.O_CREAT) 1928 try: 1929 LockFile(fd) 1930 if file_id is not None: 1931 disk_id = GetFileID(fd=fd) 1932 if not VerifyFileID(disk_id, file_id): 1933 raise errors.LockError("Cannot overwrite file %s, it has been modified" 1934 " since last written" % file_name) 1935 return WriteFile(file_name, **kwargs) 1936 finally: 1937 os.close(fd)
1938
1939 1940 -def ReadOneLineFile(file_name, strict=False):
1941 """Return the first non-empty line from a file. 1942 1943 @type strict: boolean 1944 @param strict: if True, abort if the file has more than one 1945 non-empty line 1946 1947 """ 1948 file_lines = ReadFile(file_name).splitlines() 1949 full_lines = filter(bool, file_lines) 1950 if not file_lines or not full_lines: 1951 raise errors.GenericError("No data in one-liner file %s" % file_name) 1952 elif strict and len(full_lines) > 1: 1953 raise errors.GenericError("Too many lines in one-liner file %s" % 1954 file_name) 1955 return full_lines[0]
1956
1957 1958 -def FirstFree(seq, base=0):
1959 """Returns the first non-existing integer from seq. 1960 1961 The seq argument should be a sorted list of positive integers. The 1962 first time the index of an element is smaller than the element 1963 value, the index will be returned. 1964 1965 The base argument is used to start at a different offset, 1966 i.e. C{[3, 4, 6]} with I{offset=3} will return 5. 1967 1968 Example: C{[0, 1, 3]} will return I{2}. 1969 1970 @type seq: sequence 1971 @param seq: the sequence to be analyzed. 1972 @type base: int 1973 @param base: use this value as the base index of the sequence 1974 @rtype: int 1975 @return: the first non-used index in the sequence 1976 1977 """ 1978 for idx, elem in enumerate(seq): 1979 assert elem >= base, "Passed element is higher than base offset" 1980 if elem > idx + base: 1981 # idx is not used 1982 return idx + base 1983 return None
1984
1985 1986 -def SingleWaitForFdCondition(fdobj, event, timeout):
1987 """Waits for a condition to occur on the socket. 1988 1989 Immediately returns at the first interruption. 1990 1991 @type fdobj: integer or object supporting a fileno() method 1992 @param fdobj: entity to wait for events on 1993 @type event: integer 1994 @param event: ORed condition (see select module) 1995 @type timeout: float or None 1996 @param timeout: Timeout in seconds 1997 @rtype: int or None 1998 @return: None for timeout, otherwise occured conditions 1999 2000 """ 2001 check = (event | select.POLLPRI | 2002 select.POLLNVAL | select.POLLHUP | select.POLLERR) 2003 2004 if timeout is not None: 2005 # Poller object expects milliseconds 2006 timeout *= 1000 2007 2008 poller = select.poll() 2009 poller.register(fdobj, event) 2010 try: 2011 # TODO: If the main thread receives a signal and we have no timeout, we 2012 # could wait forever. This should check a global "quit" flag or something 2013 # every so often. 2014 io_events = poller.poll(timeout) 2015 except select.error, err: 2016 if err[0] != errno.EINTR: 2017 raise 2018 io_events = [] 2019 if io_events and io_events[0][1] & check: 2020 return io_events[0][1] 2021 else: 2022 return None
2023
2024 2025 -class FdConditionWaiterHelper(object):
2026 """Retry helper for WaitForFdCondition. 2027 2028 This class contains the retried and wait functions that make sure 2029 WaitForFdCondition can continue waiting until the timeout is actually 2030 expired. 2031 2032 """ 2033
2034 - def __init__(self, timeout):
2035 self.timeout = timeout
2036
2037 - def Poll(self, fdobj, event):
2038 result = SingleWaitForFdCondition(fdobj, event, self.timeout) 2039 if result is None: 2040 raise RetryAgain() 2041 else: 2042 return result
2043
2044 - def UpdateTimeout(self, timeout):
2045 self.timeout = timeout
2046
2047 2048 -def WaitForFdCondition(fdobj, event, timeout):
2049 """Waits for a condition to occur on the socket. 2050 2051 Retries until the timeout is expired, even if interrupted. 2052 2053 @type fdobj: integer or object supporting a fileno() method 2054 @param fdobj: entity to wait for events on 2055 @type event: integer 2056 @param event: ORed condition (see select module) 2057 @type timeout: float or None 2058 @param timeout: Timeout in seconds 2059 @rtype: int or None 2060 @return: None for timeout, otherwise occured conditions 2061 2062 """ 2063 if timeout is not None: 2064 retrywaiter = FdConditionWaiterHelper(timeout) 2065 try: 2066 result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout, 2067 args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout) 2068 except RetryTimeout: 2069 result = None 2070 else: 2071 result = None 2072 while result is None: 2073 result = SingleWaitForFdCondition(fdobj, event, timeout) 2074 return result
2075
2076 2077 -def UniqueSequence(seq):
2078 """Returns a list with unique elements. 2079 2080 Element order is preserved. 2081 2082 @type seq: sequence 2083 @param seq: the sequence with the source elements 2084 @rtype: list 2085 @return: list of unique elements from seq 2086 2087 """ 2088 seen = set() 2089 return [i for i in seq if i not in seen and not seen.add(i)]
2090
2091 2092 -def NormalizeAndValidateMac(mac):
2093 """Normalizes and check if a MAC address is valid. 2094 2095 Checks whether the supplied MAC address is formally correct, only 2096 accepts colon separated format. Normalize it to all lower. 2097 2098 @type mac: str 2099 @param mac: the MAC to be validated 2100 @rtype: str 2101 @return: returns the normalized and validated MAC. 2102 2103 @raise errors.OpPrereqError: If the MAC isn't valid 2104 2105 """ 2106 if not _MAC_CHECK.match(mac): 2107 raise errors.OpPrereqError("Invalid MAC address specified: %s" % 2108 mac, errors.ECODE_INVAL) 2109 2110 return mac.lower()
2111
2112 2113 -def TestDelay(duration):
2114 """Sleep for a fixed amount of time. 2115 2116 @type duration: float 2117 @param duration: the sleep duration 2118 @rtype: boolean 2119 @return: False for negative value, True otherwise 2120 2121 """ 2122 if duration < 0: 2123 return False, "Invalid sleep duration" 2124 time.sleep(duration) 2125 return True, None
2126
2127 2128 -def _CloseFDNoErr(fd, retries=5):
2129 """Close a file descriptor ignoring errors. 2130 2131 @type fd: int 2132 @param fd: the file descriptor 2133 @type retries: int 2134 @param retries: how many retries to make, in case we get any 2135 other error than EBADF 2136 2137 """ 2138 try: 2139 os.close(fd) 2140 except OSError, err: 2141 if err.errno != errno.EBADF: 2142 if retries > 0: 2143 _CloseFDNoErr(fd, retries - 1)
2144 # else either it's closed already or we're out of retries, so we
2145 # ignore this and go on 2146 2147 2148 -def CloseFDs(noclose_fds=None):
2149 """Close file descriptors. 2150 2151 This closes all file descriptors above 2 (i.e. except 2152 stdin/out/err). 2153 2154 @type noclose_fds: list or None 2155 @param noclose_fds: if given, it denotes a list of file descriptor 2156 that should not be closed 2157 2158 """ 2159 # Default maximum for the number of available file descriptors. 2160 if 'SC_OPEN_MAX' in os.sysconf_names: 2161 try: 2162 MAXFD = os.sysconf('SC_OPEN_MAX') 2163 if MAXFD < 0: 2164 MAXFD = 1024 2165 except OSError: 2166 MAXFD = 1024 2167 else: 2168 MAXFD = 1024 2169 maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] 2170 if (maxfd == resource.RLIM_INFINITY): 2171 maxfd = MAXFD 2172 2173 # Iterate through and close all file descriptors (except the standard ones) 2174 for fd in range(3, maxfd): 2175 if noclose_fds and fd in noclose_fds: 2176 continue 2177 _CloseFDNoErr(fd)
2178
2179 2180 -def Mlockall(_ctypes=ctypes):
2181 """Lock current process' virtual address space into RAM. 2182 2183 This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE), 2184 see mlock(2) for more details. This function requires ctypes module. 2185 2186 @raises errors.NoCtypesError: if ctypes module is not found 2187 2188 """ 2189 if _ctypes is None: 2190 raise errors.NoCtypesError() 2191 2192 libc = _ctypes.cdll.LoadLibrary("libc.so.6") 2193 if libc is None: 2194 logging.error("Cannot set memory lock, ctypes cannot load libc") 2195 return 2196 2197 # Some older version of the ctypes module don't have built-in functionality 2198 # to access the errno global variable, where function error codes are stored. 2199 # By declaring this variable as a pointer to an integer we can then access 2200 # its value correctly, should the mlockall call fail, in order to see what 2201 # the actual error code was. 2202 # pylint: disable-msg=W0212 2203 libc.__errno_location.restype = _ctypes.POINTER(_ctypes.c_int) 2204 2205 if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE): 2206 # pylint: disable-msg=W0212 2207 logging.error("Cannot set memory lock: %s", 2208 os.strerror(libc.__errno_location().contents.value)) 2209 return 2210 2211 logging.debug("Memory lock set")
2212
2213 2214 -def Daemonize(logfile):
2215 """Daemonize the current process. 2216 2217 This detaches the current process from the controlling terminal and 2218 runs it in the background as a daemon. 2219 2220 @type logfile: str 2221 @param logfile: the logfile to which we should redirect stdout/stderr 2222 @rtype: int 2223 @return: the value zero 2224 2225 """ 2226 # pylint: disable-msg=W0212 2227 # yes, we really want os._exit 2228 2229 # TODO: do another attempt to merge Daemonize and StartDaemon, or at 2230 # least abstract the pipe functionality between them 2231 2232 # Create pipe for sending error messages 2233 (rpipe, wpipe) = os.pipe() 2234 2235 # this might fail 2236 pid = os.fork() 2237 if (pid == 0): # The first child. 2238 SetupDaemonEnv() 2239 2240 # this might fail 2241 pid = os.fork() # Fork a second child. 2242 if (pid == 0): # The second child. 2243 _CloseFDNoErr(rpipe) 2244 else: 2245 # exit() or _exit()? See below. 2246 os._exit(0) # Exit parent (the first child) of the second child. 2247 else: 2248 _CloseFDNoErr(wpipe) 2249 # Wait for daemon to be started (or an error message to 2250 # arrive) and read up to 100 KB as an error message 2251 errormsg = RetryOnSignal(os.read, rpipe, 100 * 1024) 2252 if errormsg: 2253 sys.stderr.write("Error when starting daemon process: %r\n" % errormsg) 2254 rcode = 1 2255 else: 2256 rcode = 0 2257 os._exit(rcode) # Exit parent of the first child. 2258 2259 SetupDaemonFDs(logfile, None) 2260 return wpipe
2261
2262 2263 -def DaemonPidFileName(name):
2264 """Compute a ganeti pid file absolute path 2265 2266 @type name: str 2267 @param name: the daemon name 2268 @rtype: str 2269 @return: the full path to the pidfile corresponding to the given 2270 daemon name 2271 2272 """ 2273 return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2274
2275 2276 -def EnsureDaemon(name):
2277 """Check for and start daemon if not alive. 2278 2279 """ 2280 result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name]) 2281 if result.failed: 2282 logging.error("Can't start daemon '%s', failure %s, output: %s", 2283 name, result.fail_reason, result.output) 2284 return False 2285 2286 return True
2287
2288 2289 -def StopDaemon(name):
2290 """Stop daemon 2291 2292 """ 2293 result = RunCmd([constants.DAEMON_UTIL, "stop", name]) 2294 if result.failed: 2295 logging.error("Can't stop daemon '%s', failure %s, output: %s", 2296 name, result.fail_reason, result.output) 2297 return False 2298 2299 return True
2300
2301 2302 -def WritePidFile(pidfile):
2303 """Write the current process pidfile. 2304 2305 @type pidfile: sting 2306 @param pidfile: the path to the file to be written 2307 @raise errors.LockError: if the pid file already exists and 2308 points to a live process 2309 @rtype: int 2310 @return: the file descriptor of the lock file; do not close this unless 2311 you want to unlock the pid file 2312 2313 """ 2314 # We don't rename nor truncate the file to not drop locks under 2315 # existing processes 2316 fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600) 2317 2318 # Lock the PID file (and fail if not possible to do so). Any code 2319 # wanting to send a signal to the daemon should try to lock the PID 2320 # file before reading it. If acquiring the lock succeeds, the daemon is 2321 # no longer running and the signal should not be sent. 2322 LockFile(fd_pidfile) 2323 2324 os.write(fd_pidfile, "%d\n" % os.getpid()) 2325 2326 return fd_pidfile
2327
2328 2329 -def RemovePidFile(name):
2330 """Remove the current process pidfile. 2331 2332 Any errors are ignored. 2333 2334 @type name: str 2335 @param name: the daemon name used to derive the pidfile name 2336 2337 """ 2338 pidfilename = DaemonPidFileName(name) 2339 # TODO: we could check here that the file contains our pid 2340 try: 2341 RemoveFile(pidfilename) 2342 except: # pylint: disable-msg=W0702 2343 pass
2344
2345 2346 -def KillProcess(pid, signal_=signal.SIGTERM, timeout=30, 2347 waitpid=False):
2348 """Kill a process given by its pid. 2349 2350 @type pid: int 2351 @param pid: The PID to terminate. 2352 @type signal_: int 2353 @param signal_: The signal to send, by default SIGTERM 2354 @type timeout: int 2355 @param timeout: The timeout after which, if the process is still alive, 2356 a SIGKILL will be sent. If not positive, no such checking 2357 will be done 2358 @type waitpid: boolean 2359 @param waitpid: If true, we should waitpid on this process after 2360 sending signals, since it's our own child and otherwise it 2361 would remain as zombie 2362 2363 """ 2364 def _helper(pid, signal_, wait): 2365 """Simple helper to encapsulate the kill/waitpid sequence""" 2366 if IgnoreProcessNotFound(os.kill, pid, signal_) and wait: 2367 try: 2368 os.waitpid(pid, os.WNOHANG) 2369 except OSError: 2370 pass
2371 2372 if pid <= 0: 2373 # kill with pid=0 == suicide 2374 raise errors.ProgrammerError("Invalid pid given '%s'" % pid) 2375 2376 if not IsProcessAlive(pid): 2377 return 2378 2379 _helper(pid, signal_, waitpid) 2380 2381 if timeout <= 0: 2382 return 2383 2384 def _CheckProcess(): 2385 if not IsProcessAlive(pid): 2386 return 2387 2388 try: 2389 (result_pid, _) = os.waitpid(pid, os.WNOHANG) 2390 except OSError: 2391 raise RetryAgain() 2392 2393 if result_pid > 0: 2394 return 2395 2396 raise RetryAgain() 2397 2398 try: 2399 # Wait up to $timeout seconds 2400 Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout) 2401 except RetryTimeout: 2402 pass 2403 2404 if IsProcessAlive(pid): 2405 # Kill process if it's still alive 2406 _helper(pid, signal.SIGKILL, waitpid) 2407
2408 2409 -def FindFile(name, search_path, test=os.path.exists):
2410 """Look for a filesystem object in a given path. 2411 2412 This is an abstract method to search for filesystem object (files, 2413 dirs) under a given search path. 2414 2415 @type name: str 2416 @param name: the name to look for 2417 @type search_path: str 2418 @param search_path: location to start at 2419 @type test: callable 2420 @param test: a function taking one argument that should return True 2421 if the a given object is valid; the default value is 2422 os.path.exists, causing only existing files to be returned 2423 @rtype: str or None 2424 @return: full path to the object if found, None otherwise 2425 2426 """ 2427 # validate the filename mask 2428 if constants.EXT_PLUGIN_MASK.match(name) is None: 2429 logging.critical("Invalid value passed for external script name: '%s'", 2430 name) 2431 return None 2432 2433 for dir_name in search_path: 2434 # FIXME: investigate switch to PathJoin 2435 item_name = os.path.sep.join([dir_name, name]) 2436 # check the user test and that we're indeed resolving to the given 2437 # basename 2438 if test(item_name) and os.path.basename(item_name) == name: 2439 return item_name 2440 return None
2441
2442 2443 -def CheckVolumeGroupSize(vglist, vgname, minsize):
2444 """Checks if the volume group list is valid. 2445 2446 The function will check if a given volume group is in the list of 2447 volume groups and has a minimum size. 2448 2449 @type vglist: dict 2450 @param vglist: dictionary of volume group names and their size 2451 @type vgname: str 2452 @param vgname: the volume group we should check 2453 @type minsize: int 2454 @param minsize: the minimum size we accept 2455 @rtype: None or str 2456 @return: None for success, otherwise the error message 2457 2458 """ 2459 vgsize = vglist.get(vgname, None) 2460 if vgsize is None: 2461 return "volume group '%s' missing" % vgname 2462 elif vgsize < minsize: 2463 return ("volume group '%s' too small (%s MiB required, %d MiB found)" % 2464 (vgname, minsize, vgsize)) 2465 return None
2466
2467 2468 -def SplitTime(value):
2469 """Splits time as floating point number into a tuple. 2470 2471 @param value: Time in seconds 2472 @type value: int or float 2473 @return: Tuple containing (seconds, microseconds) 2474 2475 """ 2476 (seconds, microseconds) = divmod(int(value * 1000000), 1000000) 2477 2478 assert 0 <= seconds, \ 2479 "Seconds must be larger than or equal to 0, but are %s" % seconds 2480 assert 0 <= microseconds <= 999999, \ 2481 "Microseconds must be 0-999999, but are %s" % microseconds 2482 2483 return (int(seconds), int(microseconds))
2484
2485 2486 -def MergeTime(timetuple):
2487 """Merges a tuple into time as a floating point number. 2488 2489 @param timetuple: Time as tuple, (seconds, microseconds) 2490 @type timetuple: tuple 2491 @return: Time as a floating point number expressed in seconds 2492 2493 """ 2494 (seconds, microseconds) = timetuple 2495 2496 assert 0 <= seconds, \ 2497 "Seconds must be larger than or equal to 0, but are %s" % seconds 2498 assert 0 <= microseconds <= 999999, \ 2499 "Microseconds must be 0-999999, but are %s" % microseconds 2500 2501 return float(seconds) + (float(microseconds) * 0.000001)
2502
2503 2504 -class LogFileHandler(logging.FileHandler):
2505 """Log handler that doesn't fallback to stderr. 2506 2507 When an error occurs while writing on the logfile, logging.FileHandler tries 2508 to log on stderr. This doesn't work in ganeti since stderr is redirected to 2509 the logfile. This class avoids failures reporting errors to /dev/console. 2510 2511 """
2512 - def __init__(self, filename, mode="a", encoding=None):
2513 """Open the specified file and use it as the stream for logging. 2514 2515 Also open /dev/console to report errors while logging. 2516 2517 """ 2518 logging.FileHandler.__init__(self, filename, mode, encoding) 2519 self.console = open(constants.DEV_CONSOLE, "a")
2520
2521 - def handleError(self, record): # pylint: disable-msg=C0103
2522 """Handle errors which occur during an emit() call. 2523 2524 Try to handle errors with FileHandler method, if it fails write to 2525 /dev/console. 2526 2527 """ 2528 try: 2529 logging.FileHandler.handleError(self, record) 2530 except Exception: # pylint: disable-msg=W0703 2531 try: 2532 self.console.write("Cannot log message:\n%s\n" % self.format(record)) 2533 except Exception: # pylint: disable-msg=W0703 2534 # Log handler tried everything it could, now just give up 2535 pass
2536
2537 2538 -def SetupLogging(logfile, debug=0, stderr_logging=False, program="", 2539 multithreaded=False, syslog=constants.SYSLOG_USAGE, 2540 console_logging=False):
2541 """Configures the logging module. 2542 2543 @type logfile: str 2544 @param logfile: the filename to which we should log 2545 @type debug: integer 2546 @param debug: if greater than zero, enable debug messages, otherwise 2547 only those at C{INFO} and above level 2548 @type stderr_logging: boolean 2549 @param stderr_logging: whether we should also log to the standard error 2550 @type program: str 2551 @param program: the name under which we should log messages 2552 @type multithreaded: boolean 2553 @param multithreaded: if True, will add the thread name to the log file 2554 @type syslog: string 2555 @param syslog: one of 'no', 'yes', 'only': 2556 - if no, syslog is not used 2557 - if yes, syslog is used (in addition to file-logging) 2558 - if only, only syslog is used 2559 @type console_logging: boolean 2560 @param console_logging: if True, will use a FileHandler which falls back to 2561 the system console if logging fails 2562 @raise EnvironmentError: if we can't open the log file and 2563 syslog/stderr logging is disabled 2564 2565 """ 2566 fmt = "%(asctime)s: " + program + " pid=%(process)d" 2567 sft = program + "[%(process)d]:" 2568 if multithreaded: 2569 fmt += "/%(threadName)s" 2570 sft += " (%(threadName)s)" 2571 if debug: 2572 fmt += " %(module)s:%(lineno)s" 2573 # no debug info for syslog loggers 2574 fmt += " %(levelname)s %(message)s" 2575 # yes, we do want the textual level, as remote syslog will probably 2576 # lose the error level, and it's easier to grep for it 2577 sft += " %(levelname)s %(message)s" 2578 formatter = logging.Formatter(fmt) 2579 sys_fmt = logging.Formatter(sft) 2580 2581 root_logger = logging.getLogger("") 2582 root_logger.setLevel(logging.NOTSET) 2583 2584 # Remove all previously setup handlers 2585 for handler in root_logger.handlers: 2586 handler.close() 2587 root_logger.removeHandler(handler) 2588 2589 if stderr_logging: 2590 stderr_handler = logging.StreamHandler() 2591 stderr_handler.setFormatter(formatter) 2592 if debug: 2593 stderr_handler.setLevel(logging.NOTSET) 2594 else: 2595 stderr_handler.setLevel(logging.CRITICAL) 2596 root_logger.addHandler(stderr_handler) 2597 2598 if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY): 2599 facility = logging.handlers.SysLogHandler.LOG_DAEMON 2600 syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET, 2601 facility) 2602 syslog_handler.setFormatter(sys_fmt) 2603 # Never enable debug over syslog 2604 syslog_handler.setLevel(logging.INFO) 2605 root_logger.addHandler(syslog_handler) 2606 2607 if syslog != constants.SYSLOG_ONLY: 2608 # this can fail, if the logging directories are not setup or we have 2609 # a permisssion problem; in this case, it's best to log but ignore 2610 # the error if stderr_logging is True, and if false we re-raise the 2611 # exception since otherwise we could run but without any logs at all 2612 try: 2613 if console_logging: 2614 logfile_handler = LogFileHandler(logfile) 2615 else: 2616 logfile_handler = logging.FileHandler(logfile) 2617 logfile_handler.setFormatter(formatter) 2618 if debug: 2619 logfile_handler.setLevel(logging.DEBUG) 2620 else: 2621 logfile_handler.setLevel(logging.INFO) 2622 root_logger.addHandler(logfile_handler) 2623 except EnvironmentError: 2624 if stderr_logging or syslog == constants.SYSLOG_YES: 2625 logging.exception("Failed to enable logging to file '%s'", logfile) 2626 else: 2627 # we need to re-raise the exception 2628 raise
2629
2630 2631 -def IsNormAbsPath(path):
2632 """Check whether a path is absolute and also normalized 2633 2634 This avoids things like /dir/../../other/path to be valid. 2635 2636 """ 2637 return os.path.normpath(path) == path and os.path.isabs(path)
2638
2639 2640 -def PathJoin(*args):
2641 """Safe-join a list of path components. 2642 2643 Requirements: 2644 - the first argument must be an absolute path 2645 - no component in the path must have backtracking (e.g. /../), 2646 since we check for normalization at the end 2647 2648 @param args: the path components to be joined 2649 @raise ValueError: for invalid paths 2650 2651 """ 2652 # ensure we're having at least one path passed in 2653 assert args 2654 # ensure the first component is an absolute and normalized path name 2655 root = args[0] 2656 if not IsNormAbsPath(root): 2657 raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0])) 2658 result = os.path.join(*args) 2659 # ensure that the whole path is normalized 2660 if not IsNormAbsPath(result): 2661 raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args)) 2662 # check that we're still under the original prefix 2663 prefix = os.path.commonprefix([root, result]) 2664 if prefix != root: 2665 raise ValueError("Error: path joining resulted in different prefix" 2666 " (%s != %s)" % (prefix, root)) 2667 return result
2668
2669 2670 -def TailFile(fname, lines=20):
2671 """Return the last lines from a file. 2672 2673 @note: this function will only read and parse the last 4KB of 2674 the file; if the lines are very long, it could be that less 2675 than the requested number of lines are returned 2676 2677 @param fname: the file name 2678 @type lines: int 2679 @param lines: the (maximum) number of lines to return 2680 2681 """ 2682 fd = open(fname, "r") 2683 try: 2684 fd.seek(0, 2) 2685 pos = fd.tell() 2686 pos = max(0, pos-4096) 2687 fd.seek(pos, 0) 2688 raw_data = fd.read() 2689 finally: 2690 fd.close() 2691 2692 rows = raw_data.splitlines() 2693 return rows[-lines:]
2694
2695 2696 -def FormatTimestampWithTZ(secs):
2697 """Formats a Unix timestamp with the local timezone. 2698 2699 """ 2700 return time.strftime("%F %T %Z", time.gmtime(secs))
2701
2702 2703 -def _ParseAsn1Generalizedtime(value):
2704 """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL. 2705 2706 @type value: string 2707 @param value: ASN1 GENERALIZEDTIME timestamp 2708 2709 """ 2710 m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value) 2711 if m: 2712 # We have an offset 2713 asn1time = m.group(1) 2714 hours = int(m.group(2)) 2715 minutes = int(m.group(3)) 2716 utcoffset = (60 * hours) + minutes 2717 else: 2718 if not value.endswith("Z"): 2719 raise ValueError("Missing timezone") 2720 asn1time = value[:-1] 2721 utcoffset = 0 2722 2723 parsed = time.strptime(asn1time, "%Y%m%d%H%M%S") 2724 2725 tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset) 2726 2727 return calendar.timegm(tt.utctimetuple())
2728
2729 2730 -def GetX509CertValidity(cert):
2731 """Returns the validity period of the certificate. 2732 2733 @type cert: OpenSSL.crypto.X509 2734 @param cert: X509 certificate object 2735 2736 """ 2737 # The get_notBefore and get_notAfter functions are only supported in 2738 # pyOpenSSL 0.7 and above. 2739 try: 2740 get_notbefore_fn = cert.get_notBefore 2741 except AttributeError: 2742 not_before = None 2743 else: 2744 not_before_asn1 = get_notbefore_fn() 2745 2746 if not_before_asn1 is None: 2747 not_before = None 2748 else: 2749 not_before = _ParseAsn1Generalizedtime(not_before_asn1) 2750 2751 try: 2752 get_notafter_fn = cert.get_notAfter 2753 except AttributeError: 2754 not_after = None 2755 else: 2756 not_after_asn1 = get_notafter_fn() 2757 2758 if not_after_asn1 is None: 2759 not_after = None 2760 else: 2761 not_after = _ParseAsn1Generalizedtime(not_after_asn1) 2762 2763 return (not_before, not_after)
2764
2765 2766 -def _VerifyCertificateInner(expired, not_before, not_after, now, 2767 warn_days, error_days):
2768 """Verifies certificate validity. 2769 2770 @type expired: bool 2771 @param expired: Whether pyOpenSSL considers the certificate as expired 2772 @type not_before: number or None 2773 @param not_before: Unix timestamp before which certificate is not valid 2774 @type not_after: number or None 2775 @param not_after: Unix timestamp after which certificate is invalid 2776 @type now: number 2777 @param now: Current time as Unix timestamp 2778 @type warn_days: number or None 2779 @param warn_days: How many days before expiration a warning should be reported 2780 @type error_days: number or None 2781 @param error_days: How many days before expiration an error should be reported 2782 2783 """ 2784 if expired: 2785 msg = "Certificate is expired" 2786 2787 if not_before is not None and not_after is not None: 2788 msg += (" (valid from %s to %s)" % 2789 (FormatTimestampWithTZ(not_before), 2790 FormatTimestampWithTZ(not_after))) 2791 elif not_before is not None: 2792 msg += " (valid from %s)" % FormatTimestampWithTZ(not_before) 2793 elif not_after is not None: 2794 msg += " (valid until %s)" % FormatTimestampWithTZ(not_after) 2795 2796 return (CERT_ERROR, msg) 2797 2798 elif not_before is not None and not_before > now: 2799 return (CERT_WARNING, 2800 "Certificate not yet valid (valid from %s)" % 2801 FormatTimestampWithTZ(not_before)) 2802 2803 elif not_after is not None: 2804 remaining_days = int((not_after - now) / (24 * 3600)) 2805 2806 msg = "Certificate expires in about %d days" % remaining_days 2807 2808 if error_days is not None and remaining_days <= error_days: 2809 return (CERT_ERROR, msg) 2810 2811 if warn_days is not None and remaining_days <= warn_days: 2812 return (CERT_WARNING, msg) 2813 2814 return (None, None)
2815
2816 2817 -def VerifyX509Certificate(cert, warn_days, error_days):
2818 """Verifies a certificate for LUVerifyCluster. 2819 2820 @type cert: OpenSSL.crypto.X509 2821 @param cert: X509 certificate object 2822 @type warn_days: number or None 2823 @param warn_days: How many days before expiration a warning should be reported 2824 @type error_days: number or None 2825 @param error_days: How many days before expiration an error should be reported 2826 2827 """ 2828 # Depending on the pyOpenSSL version, this can just return (None, None) 2829 (not_before, not_after) = GetX509CertValidity(cert) 2830 2831 return _VerifyCertificateInner(cert.has_expired(), not_before, not_after, 2832 time.time(), warn_days, error_days)
2833
2834 2835 -def SignX509Certificate(cert, key, salt):
2836 """Sign a X509 certificate. 2837 2838 An RFC822-like signature header is added in front of the certificate. 2839 2840 @type cert: OpenSSL.crypto.X509 2841 @param cert: X509 certificate object 2842 @type key: string 2843 @param key: Key for HMAC 2844 @type salt: string 2845 @param salt: Salt for HMAC 2846 @rtype: string 2847 @return: Serialized and signed certificate in PEM format 2848 2849 """ 2850 if not VALID_X509_SIGNATURE_SALT.match(salt): 2851 raise errors.GenericError("Invalid salt: %r" % salt) 2852 2853 # Dumping as PEM here ensures the certificate is in a sane format 2854 cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert) 2855 2856 return ("%s: %s/%s\n\n%s" % 2857 (constants.X509_CERT_SIGNATURE_HEADER, salt, 2858 Sha1Hmac(key, cert_pem, salt=salt), 2859 cert_pem))
2860
2861 2862 -def _ExtractX509CertificateSignature(cert_pem):
2863 """Helper function to extract signature from X509 certificate. 2864 2865 """ 2866 # Extract signature from original PEM data 2867 for line in cert_pem.splitlines(): 2868 if line.startswith("---"): 2869 break 2870 2871 m = X509_SIGNATURE.match(line.strip()) 2872 if m: 2873 return (m.group("salt"), m.group("sign")) 2874 2875 raise errors.GenericError("X509 certificate signature is missing")
2876
2877 2878 -def LoadSignedX509Certificate(cert_pem, key):
2879 """Verifies a signed X509 certificate. 2880 2881 @type cert_pem: string 2882 @param cert_pem: Certificate in PEM format and with signature header 2883 @type key: string 2884 @param key: Key for HMAC 2885 @rtype: tuple; (OpenSSL.crypto.X509, string) 2886 @return: X509 certificate object and salt 2887 2888 """ 2889 (salt, signature) = _ExtractX509CertificateSignature(cert_pem) 2890 2891 # Load certificate 2892 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem) 2893 2894 # Dump again to ensure it's in a sane format 2895 sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert) 2896 2897 if not VerifySha1Hmac(key, sane_pem, signature, salt=salt): 2898 raise errors.GenericError("X509 certificate signature is invalid") 2899 2900 return (cert, salt)
2901
2902 2903 -def Sha1Hmac(key, text, salt=None):
2904 """Calculates the HMAC-SHA1 digest of a text. 2905 2906 HMAC is defined in RFC2104. 2907 2908 @type key: string 2909 @param key: Secret key 2910 @type text: string 2911 2912 """ 2913 if salt: 2914 salted_text = salt + text 2915 else: 2916 salted_text = text 2917 2918 return hmac.new(key, salted_text, compat.sha1).hexdigest()
2919
2920 2921 -def VerifySha1Hmac(key, text, digest, salt=None):
2922 """Verifies the HMAC-SHA1 digest of a text. 2923 2924 HMAC is defined in RFC2104. 2925 2926 @type key: string 2927 @param key: Secret key 2928 @type text: string 2929 @type digest: string 2930 @param digest: Expected digest 2931 @rtype: bool 2932 @return: Whether HMAC-SHA1 digest matches 2933 2934 """ 2935 return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
2936
2937 2938 -def SafeEncode(text):
2939 """Return a 'safe' version of a source string. 2940 2941 This function mangles the input string and returns a version that 2942 should be safe to display/encode as ASCII. To this end, we first 2943 convert it to ASCII using the 'backslashreplace' encoding which 2944 should get rid of any non-ASCII chars, and then we process it 2945 through a loop copied from the string repr sources in the python; we 2946 don't use string_escape anymore since that escape single quotes and 2947 backslashes too, and that is too much; and that escaping is not 2948 stable, i.e. string_escape(string_escape(x)) != string_escape(x). 2949 2950 @type text: str or unicode 2951 @param text: input data 2952 @rtype: str 2953 @return: a safe version of text 2954 2955 """ 2956 if isinstance(text, unicode): 2957 # only if unicode; if str already, we handle it below 2958 text = text.encode('ascii', 'backslashreplace') 2959 resu = "" 2960 for char in text: 2961 c = ord(char) 2962 if char == '\t': 2963 resu += r'\t' 2964 elif char == '\n': 2965 resu += r'\n' 2966 elif char == '\r': 2967 resu += r'\'r' 2968 elif c < 32 or c >= 127: # non-printable 2969 resu += "\\x%02x" % (c & 0xff) 2970 else: 2971 resu += char 2972 return resu
2973
2974 2975 -def UnescapeAndSplit(text, sep=","):
2976 """Split and unescape a string based on a given separator. 2977 2978 This function splits a string based on a separator where the 2979 separator itself can be escape in order to be an element of the 2980 elements. The escaping rules are (assuming coma being the 2981 separator): 2982 - a plain , separates the elements 2983 - a sequence \\\\, (double backslash plus comma) is handled as a 2984 backslash plus a separator comma 2985 - a sequence \, (backslash plus comma) is handled as a 2986 non-separator comma 2987 2988 @type text: string 2989 @param text: the string to split 2990 @type sep: string 2991 @param text: the separator 2992 @rtype: string 2993 @return: a list of strings 2994 2995 """ 2996 # we split the list by sep (with no escaping at this stage) 2997 slist = text.split(sep) 2998 # next, we revisit the elements and if any of them ended with an odd 2999 # number of backslashes, then we join it with the next 3000 rlist = [] 3001 while slist: 3002 e1 = slist.pop(0) 3003 if e1.endswith("\\"): 3004 num_b = len(e1) - len(e1.rstrip("\\")) 3005 if num_b % 2 == 1: 3006 e2 = slist.pop(0) 3007 # here the backslashes remain (all), and will be reduced in 3008 # the next step 3009 rlist.append(e1 + sep + e2) 3010 continue 3011 rlist.append(e1) 3012 # finally, replace backslash-something with something 3013 rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist] 3014 return rlist
3015
3016 3017 -def CommaJoin(names):
3018 """Nicely join a set of identifiers. 3019 3020 @param names: set, list or tuple 3021 @return: a string with the formatted results 3022 3023 """ 3024 return ", ".join([str(val) for val in names])
3025
3026 3027 -def FindMatch(data, name):
3028 """Tries to find an item in a dictionary matching a name. 3029 3030 Callers have to ensure the data names aren't contradictory (e.g. a regexp 3031 that matches a string). If the name isn't a direct key, all regular 3032 expression objects in the dictionary are matched against it. 3033 3034 @type data: dict 3035 @param data: Dictionary containing data 3036 @type name: string 3037 @param name: Name to look for 3038 @rtype: tuple; (value in dictionary, matched groups as list) 3039 3040 """ 3041 if name in data: 3042 return (data[name], []) 3043 3044 for key, value in data.items(): 3045 # Regex objects 3046 if hasattr(key, "match"): 3047 m = key.match(name) 3048 if m: 3049 return (value, list(m.groups())) 3050 3051 return None
3052
3053 3054 -def BytesToMebibyte(value):
3055 """Converts bytes to mebibytes. 3056 3057 @type value: int 3058 @param value: Value in bytes 3059 @rtype: int 3060 @return: Value in mebibytes 3061 3062 """ 3063 return int(round(value / (1024.0 * 1024.0), 0))
3064
3065 3066 -def CalculateDirectorySize(path):
3067 """Calculates the size of a directory recursively. 3068 3069 @type path: string 3070 @param path: Path to directory 3071 @rtype: int 3072 @return: Size in mebibytes 3073 3074 """ 3075 size = 0 3076 3077 for (curpath, _, files) in os.walk(path): 3078 for filename in files: 3079 st = os.lstat(PathJoin(curpath, filename)) 3080 size += st.st_size 3081 3082 return BytesToMebibyte(size)
3083
3084 3085 -def GetMounts(filename=constants.PROC_MOUNTS):
3086 """Returns the list of mounted filesystems. 3087 3088 This function is Linux-specific. 3089 3090 @param filename: path of mounts file (/proc/mounts by default) 3091 @rtype: list of tuples 3092 @return: list of mount entries (device, mountpoint, fstype, options) 3093 3094 """ 3095 # TODO(iustin): investigate non-Linux options (e.g. via mount output) 3096 data = [] 3097 mountlines = ReadFile(filename).splitlines() 3098 for line in mountlines: 3099 device, mountpoint, fstype, options, _ = line.split(None, 4) 3100 data.append((device, mountpoint, fstype, options)) 3101 3102 return data
3103
3104 3105 -def GetFilesystemStats(path):
3106 """Returns the total and free space on a filesystem. 3107 3108 @type path: string 3109 @param path: Path on filesystem to be examined 3110 @rtype: int 3111 @return: tuple of (Total space, Free space) in mebibytes 3112 3113 """ 3114 st = os.statvfs(path) 3115 3116 fsize = BytesToMebibyte(st.f_bavail * st.f_frsize) 3117 tsize = BytesToMebibyte(st.f_blocks * st.f_frsize) 3118 return (tsize, fsize)
3119
3120 3121 -def RunInSeparateProcess(fn, *args):
3122 """Runs a function in a separate process. 3123 3124 Note: Only boolean return values are supported. 3125 3126 @type fn: callable 3127 @param fn: Function to be called 3128 @rtype: bool 3129 @return: Function's result 3130 3131 """ 3132 pid = os.fork() 3133 if pid == 0: 3134 # Child process 3135 try: 3136 # In case the function uses temporary files 3137 ResetTempfileModule() 3138 3139 # Call function 3140 result = int(bool(fn(*args))) 3141 assert result in (0, 1) 3142 except: # pylint: disable-msg=W0702 3143 logging.exception("Error while calling function in separate process") 3144 # 0 and 1 are reserved for the return value 3145 result = 33 3146 3147 os._exit(result) # pylint: disable-msg=W0212 3148 3149 # Parent process 3150 3151 # Avoid zombies and check exit code 3152 (_, status) = os.waitpid(pid, 0) 3153 3154 if os.WIFSIGNALED(status): 3155 exitcode = None 3156 signum = os.WTERMSIG(status) 3157 else: 3158 exitcode = os.WEXITSTATUS(status) 3159 signum = None 3160 3161 if not (exitcode in (0, 1) and signum is None): 3162 raise errors.GenericError("Child program failed (code=%s, signal=%s)" % 3163 (exitcode, signum)) 3164 3165 return bool(exitcode)
3166
3167 3168 -def IgnoreProcessNotFound(fn, *args, **kwargs):
3169 """Ignores ESRCH when calling a process-related function. 3170 3171 ESRCH is raised when a process is not found. 3172 3173 @rtype: bool 3174 @return: Whether process was found 3175 3176 """ 3177 try: 3178 fn(*args, **kwargs) 3179 except EnvironmentError, err: 3180 # Ignore ESRCH 3181 if err.errno == errno.ESRCH: 3182 return False 3183 raise 3184 3185 return True
3186
3187 3188 -def IgnoreSignals(fn, *args, **kwargs):
3189 """Tries to call a function ignoring failures due to EINTR. 3190 3191 """ 3192 try: 3193 return fn(*args, **kwargs) 3194 except EnvironmentError, err: 3195 if err.errno == errno.EINTR: 3196 return None 3197 else: 3198 raise 3199 except (select.error, socket.error), err: 3200 # In python 2.6 and above select.error is an IOError, so it's handled 3201 # above, in 2.5 and below it's not, and it's handled here. 3202 if err.args and err.args[0] == errno.EINTR: 3203 return None 3204 else: 3205 raise
3206
3207 3208 -def LockFile(fd):
3209 """Locks a file using POSIX locks. 3210 3211 @type fd: int 3212 @param fd: the file descriptor we need to lock 3213 3214 """ 3215 try: 3216 fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) 3217 except IOError, err: 3218 if err.errno == errno.EAGAIN: 3219 raise errors.LockError("File already locked") 3220 raise
3221
3222 3223 -def FormatTime(val):
3224 """Formats a time value. 3225 3226 @type val: float or None 3227 @param val: the timestamp as returned by time.time() 3228 @return: a string value or N/A if we don't have a valid timestamp 3229 3230 """ 3231 if val is None or not isinstance(val, (int, float)): 3232 return "N/A" 3233 # these two codes works on Linux, but they are not guaranteed on all 3234 # platforms 3235 return time.strftime("%F %T", time.localtime(val))
3236
3237 3238 -def FormatSeconds(secs):
3239 """Formats seconds for easier reading. 3240 3241 @type secs: number 3242 @param secs: Number of seconds 3243 @rtype: string 3244 @return: Formatted seconds (e.g. "2d 9h 19m 49s") 3245 3246 """ 3247 parts = [] 3248 3249 secs = round(secs, 0) 3250 3251 if secs > 0: 3252 # Negative values would be a bit tricky 3253 for unit, one in [("d", 24 * 60 * 60), ("h", 60 * 60), ("m", 60)]: 3254 (complete, secs) = divmod(secs, one) 3255 if complete or parts: 3256 parts.append("%d%s" % (complete, unit)) 3257 3258 parts.append("%ds" % secs) 3259 3260 return " ".join(parts)
3261
3262 3263 -def ReadWatcherPauseFile(filename, now=None, remove_after=3600):
3264 """Reads the watcher pause file. 3265 3266 @type filename: string 3267 @param filename: Path to watcher pause file 3268 @type now: None, float or int 3269 @param now: Current time as Unix timestamp 3270 @type remove_after: int 3271 @param remove_after: Remove watcher pause file after specified amount of 3272 seconds past the pause end time 3273 3274 """ 3275 if now is None: 3276 now = time.time() 3277 3278 try: 3279 value = ReadFile(filename) 3280 except IOError, err: 3281 if err.errno != errno.ENOENT: 3282 raise 3283 value = None 3284 3285 if value is not None: 3286 try: 3287 value = int(value) 3288 except ValueError: 3289 logging.warning(("Watcher pause file (%s) contains invalid value," 3290 " removing it"), filename) 3291 RemoveFile(filename) 3292 value = None 3293 3294 if value is not None: 3295 # Remove file if it's outdated 3296 if now > (value + remove_after): 3297 RemoveFile(filename) 3298 value = None 3299 3300 elif now > value: 3301 value = None 3302 3303 return value
3304
3305 3306 -class RetryTimeout(Exception):
3307 """Retry loop timed out. 3308 3309 Any arguments which was passed by the retried function to RetryAgain will be 3310 preserved in RetryTimeout, if it is raised. If such argument was an exception 3311 the RaiseInner helper method will reraise it. 3312 3313 """
3314 - def RaiseInner(self):
3315 if self.args and isinstance(self.args[0], Exception): 3316 raise self.args[0] 3317 else: 3318 raise RetryTimeout(*self.args)
3319
3320 3321 -class RetryAgain(Exception):
3322 """Retry again. 3323 3324 Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as 3325 arguments to RetryTimeout. If an exception is passed, the RaiseInner() method 3326 of the RetryTimeout() method can be used to reraise it. 3327 3328 """
3329
3330 3331 -class _RetryDelayCalculator(object):
3332 """Calculator for increasing delays. 3333 3334 """ 3335 __slots__ = [ 3336 "_factor", 3337 "_limit", 3338 "_next", 3339 "_start", 3340 ] 3341
3342 - def __init__(self, start, factor, limit):
3343 """Initializes this class. 3344 3345 @type start: float 3346 @param start: Initial delay 3347 @type factor: float 3348 @param factor: Factor for delay increase 3349 @type limit: float or None 3350 @param limit: Upper limit for delay or None for no limit 3351 3352 """ 3353 assert start > 0.0 3354 assert factor >= 1.0 3355 assert limit is None or limit >= 0.0 3356 3357 self._start = start 3358 self._factor = factor 3359 self._limit = limit 3360 3361 self._next = start
3362
3363 - def __call__(self):
3364 """Returns current delay and calculates the next one. 3365 3366 """ 3367 current = self._next 3368 3369 # Update for next run 3370 if self._limit is None or self._next < self._limit: 3371 self._next = min(self._limit, self._next * self._factor) 3372 3373 return current
3374 3375 3376 #: Special delay to specify whole remaining timeout 3377 RETRY_REMAINING_TIME = object()
3378 3379 3380 -def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep, 3381 _time_fn=time.time):
3382 """Call a function repeatedly until it succeeds. 3383 3384 The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain} 3385 anymore. Between calls a delay, specified by C{delay}, is inserted. After a 3386 total of C{timeout} seconds, this function throws L{RetryTimeout}. 3387 3388 C{delay} can be one of the following: 3389 - callable returning the delay length as a float 3390 - Tuple of (start, factor, limit) 3391 - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is 3392 useful when overriding L{wait_fn} to wait for an external event) 3393 - A static delay as a number (int or float) 3394 3395 @type fn: callable 3396 @param fn: Function to be called 3397 @param delay: Either a callable (returning the delay), a tuple of (start, 3398 factor, limit) (see L{_RetryDelayCalculator}), 3399 L{RETRY_REMAINING_TIME} or a number (int or float) 3400 @type timeout: float 3401 @param timeout: Total timeout 3402 @type wait_fn: callable 3403 @param wait_fn: Waiting function 3404 @return: Return value of function 3405 3406 """ 3407 assert callable(fn) 3408 assert callable(wait_fn) 3409 assert callable(_time_fn) 3410 3411 if args is None: 3412 args = [] 3413 3414 end_time = _time_fn() + timeout 3415 3416 if callable(delay): 3417 # External function to calculate delay 3418 calc_delay = delay 3419 3420 elif isinstance(delay, (tuple, list)): 3421 # Increasing delay with optional upper boundary 3422 (start, factor, limit) = delay 3423 calc_delay = _RetryDelayCalculator(start, factor, limit) 3424 3425 elif delay is RETRY_REMAINING_TIME: 3426 # Always use the remaining time 3427 calc_delay = None 3428 3429 else: 3430 # Static delay 3431 calc_delay = lambda: delay 3432 3433 assert calc_delay is None or callable(calc_delay) 3434 3435 while True: 3436 retry_args = [] 3437 try: 3438 # pylint: disable-msg=W0142 3439 return fn(*args) 3440 except RetryAgain, err: 3441 retry_args = err.args 3442 except RetryTimeout: 3443 raise errors.ProgrammerError("Nested retry loop detected that didn't" 3444 " handle RetryTimeout") 3445 3446 remaining_time = end_time - _time_fn() 3447 3448 if remaining_time < 0.0: 3449 # pylint: disable-msg=W0142 3450 raise RetryTimeout(*retry_args) 3451 3452 assert remaining_time >= 0.0 3453 3454 if calc_delay is None: 3455 wait_fn(remaining_time) 3456 else: 3457 current_delay = calc_delay() 3458 if current_delay > 0.0: 3459 wait_fn(current_delay)
3460
3461 3462 -def GetClosedTempfile(*args, **kwargs):
3463 """Creates a temporary file and returns its path. 3464 3465 """ 3466 (fd, path) = tempfile.mkstemp(*args, **kwargs) 3467 _CloseFDNoErr(fd) 3468 return path
3469
3470 3471 -def GenerateSelfSignedX509Cert(common_name, validity):
3472 """Generates a self-signed X509 certificate. 3473 3474 @type common_name: string 3475 @param common_name: commonName value 3476 @type validity: int 3477 @param validity: Validity for certificate in seconds 3478 3479 """ 3480 # Create private and public key 3481 key = OpenSSL.crypto.PKey() 3482 key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS) 3483 3484 # Create self-signed certificate 3485 cert = OpenSSL.crypto.X509() 3486 if common_name: 3487 cert.get_subject().CN = common_name 3488 cert.set_serial_number(1) 3489 cert.gmtime_adj_notBefore(0) 3490 cert.gmtime_adj_notAfter(validity) 3491 cert.set_issuer(cert.get_subject()) 3492 cert.set_pubkey(key) 3493 cert.sign(key, constants.X509_CERT_SIGN_DIGEST) 3494 3495 key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key) 3496 cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert) 3497 3498 return (key_pem, cert_pem)
3499
3500 3501 -def GenerateSelfSignedSslCert(filename, common_name=constants.X509_CERT_CN, 3502 validity=constants.X509_CERT_DEFAULT_VALIDITY):
3503 """Legacy function to generate self-signed X509 certificate. 3504 3505 @type filename: str 3506 @param filename: path to write certificate to 3507 @type common_name: string 3508 @param common_name: commonName value 3509 @type validity: int 3510 @param validity: validity of certificate in number of days 3511 3512 """ 3513 # TODO: Investigate using the cluster name instead of X505_CERT_CN for 3514 # common_name, as cluster-renames are very seldom, and it'd be nice if RAPI 3515 # and node daemon certificates have the proper Subject/Issuer. 3516 (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name, 3517 validity * 24 * 60 * 60) 3518 3519 WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3520
3521 3522 -class FileLock(object):
3523 """Utility class for file locks. 3524 3525 """
3526 - def __init__(self, fd, filename):
3527 """Constructor for FileLock. 3528 3529 @type fd: file 3530 @param fd: File object 3531 @type filename: str 3532 @param filename: Path of the file opened at I{fd} 3533 3534 """ 3535 self.fd = fd 3536 self.filename = filename
3537 3538 @classmethod
3539 - def Open(cls, filename):
3540 """Creates and opens a file to be used as a file-based lock. 3541 3542 @type filename: string 3543 @param filename: path to the file to be locked 3544 3545 """ 3546 # Using "os.open" is necessary to allow both opening existing file 3547 # read/write and creating if not existing. Vanilla "open" will truncate an 3548 # existing file -or- allow creating if not existing. 3549 return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"), 3550 filename)
3551
3552 - def __del__(self):
3553 self.Close()
3554
3555 - def Close(self):
3556 """Close the file and release the lock. 3557 3558 """ 3559 if hasattr(self, "fd") and self.fd: 3560 self.fd.close() 3561 self.fd = None
3562
3563 - def _flock(self, flag, blocking, timeout, errmsg):
3564 """Wrapper for fcntl.flock. 3565 3566 @type flag: int 3567 @param flag: operation flag 3568 @type blocking: bool 3569 @param blocking: whether the operation should be done in blocking mode. 3570 @type timeout: None or float 3571 @param timeout: for how long the operation should be retried (implies 3572 non-blocking mode). 3573 @type errmsg: string 3574 @param errmsg: error message in case operation fails. 3575 3576 """ 3577 assert self.fd, "Lock was closed" 3578 assert timeout is None or timeout >= 0, \ 3579 "If specified, timeout must be positive" 3580 assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set" 3581 3582 # When a timeout is used, LOCK_NB must always be set 3583 if not (timeout is None and blocking): 3584 flag |= fcntl.LOCK_NB 3585 3586 if timeout is None: 3587 self._Lock(self.fd, flag, timeout) 3588 else: 3589 try: 3590 Retry(self._Lock, (0.1, 1.2, 1.0), timeout, 3591 args=(self.fd, flag, timeout)) 3592 except RetryTimeout: 3593 raise errors.LockError(errmsg)
3594 3595 @staticmethod
3596 - def _Lock(fd, flag, timeout):
3597 try: 3598 fcntl.flock(fd, flag) 3599 except IOError, err: 3600 if timeout is not None and err.errno == errno.EAGAIN: 3601 raise RetryAgain() 3602 3603 logging.exception("fcntl.flock failed") 3604 raise
3605
3606 - def Exclusive(self, blocking=False, timeout=None):
3607 """Locks the file in exclusive mode. 3608 3609 @type blocking: boolean 3610 @param blocking: whether to block and wait until we 3611 can lock the file or return immediately 3612 @type timeout: int or None 3613 @param timeout: if not None, the duration to wait for the lock 3614 (in blocking mode) 3615 3616 """ 3617 self._flock(fcntl.LOCK_EX, blocking, timeout, 3618 "Failed to lock %s in exclusive mode" % self.filename)
3619
3620 - def Shared(self, blocking=False, timeout=None):
3621 """Locks the file in shared mode. 3622 3623 @type blocking: boolean 3624 @param blocking: whether to block and wait until we 3625 can lock the file or return immediately 3626 @type timeout: int or None 3627 @param timeout: if not None, the duration to wait for the lock 3628 (in blocking mode) 3629 3630 """ 3631 self._flock(fcntl.LOCK_SH, blocking, timeout, 3632 "Failed to lock %s in shared mode" % self.filename)
3633
3634 - def Unlock(self, blocking=True, timeout=None):
3635 """Unlocks the file. 3636 3637 According to C{flock(2)}, unlocking can also be a nonblocking 3638 operation:: 3639 3640 To make a non-blocking request, include LOCK_NB with any of the above 3641 operations. 3642 3643 @type blocking: boolean 3644 @param blocking: whether to block and wait until we 3645 can lock the file or return immediately 3646 @type timeout: int or None 3647 @param timeout: if not None, the duration to wait for the lock 3648 (in blocking mode) 3649 3650 """ 3651 self._flock(fcntl.LOCK_UN, blocking, timeout, 3652 "Failed to unlock %s" % self.filename)
3653
3654 3655 -class LineSplitter:
3656 """Splits data chunks into lines separated by newline. 3657 3658 Instances provide a file-like interface. 3659 3660 """
3661 - def __init__(self, line_fn, *args):
3662 """Initializes this class. 3663 3664 @type line_fn: callable 3665 @param line_fn: Function called for each line, first parameter is line 3666 @param args: Extra arguments for L{line_fn} 3667 3668 """ 3669 assert callable(line_fn) 3670 3671 if args: 3672 # Python 2.4 doesn't have functools.partial yet 3673 self._line_fn = \ 3674 lambda line: line_fn(line, *args) # pylint: disable-msg=W0142 3675 else: 3676 self._line_fn = line_fn 3677 3678 self._lines = collections.deque() 3679 self._buffer = ""
3680
3681 - def write(self, data):
3682 parts = (self._buffer + data).split("\n") 3683 self._buffer = parts.pop() 3684 self._lines.extend(parts)
3685
3686 - def flush(self):
3687 while self._lines: 3688 self._line_fn(self._lines.popleft().rstrip("\r\n"))
3689
3690 - def close(self):
3691 self.flush() 3692 if self._buffer: 3693 self._line_fn(self._buffer)
3694
3695 3696 -def SignalHandled(signums):
3697 """Signal Handled decoration. 3698 3699 This special decorator installs a signal handler and then calls the target 3700 function. The function must accept a 'signal_handlers' keyword argument, 3701 which will contain a dict indexed by signal number, with SignalHandler 3702 objects as values. 3703 3704 The decorator can be safely stacked with iself, to handle multiple signals 3705 with different handlers. 3706 3707 @type signums: list 3708 @param signums: signals to intercept 3709 3710 """ 3711 def wrap(fn): 3712 def sig_function(*args, **kwargs): 3713 assert 'signal_handlers' not in kwargs or \ 3714 kwargs['signal_handlers'] is None or \ 3715 isinstance(kwargs['signal_handlers'], dict), \ 3716 "Wrong signal_handlers parameter in original function call" 3717 if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None: 3718 signal_handlers = kwargs['signal_handlers'] 3719 else: 3720 signal_handlers = {} 3721 kwargs['signal_handlers'] = signal_handlers 3722 sighandler = SignalHandler(signums) 3723 try: 3724 for sig in signums: 3725 signal_handlers[sig] = sighandler 3726 return fn(*args, **kwargs) 3727 finally: 3728 sighandler.Reset()
3729 return sig_function 3730 return wrap 3731
3732 3733 -class SignalWakeupFd(object):
3734 try: 3735 # This is only supported in Python 2.5 and above (some distributions 3736 # backported it to Python 2.4) 3737 _set_wakeup_fd_fn = signal.set_wakeup_fd 3738 except AttributeError: 3739 # Not supported
3740 - def _SetWakeupFd(self, _): # pylint: disable-msg=R0201
3741 return -1
3742 else:
3743 - def _SetWakeupFd(self, fd):
3744 return self._set_wakeup_fd_fn(fd)
3745
3746 - def __init__(self):
3747 """Initializes this class. 3748 3749 """ 3750 (read_fd, write_fd) = os.pipe() 3751 3752 # Once these succeeded, the file descriptors will be closed automatically. 3753 # Buffer size 0 is important, otherwise .read() with a specified length 3754 # might buffer data and the file descriptors won't be marked readable. 3755 self._read_fh = os.fdopen(read_fd, "r", 0) 3756 self._write_fh = os.fdopen(write_fd, "w", 0) 3757 3758 self._previous = self._SetWakeupFd(self._write_fh.fileno()) 3759 3760 # Utility functions 3761 self.fileno = self._read_fh.fileno 3762 self.read = self._read_fh.read
3763
3764 - def Reset(self):
3765 """Restores the previous wakeup file descriptor. 3766 3767 """ 3768 if hasattr(self, "_previous") and self._previous is not None: 3769 self._SetWakeupFd(self._previous) 3770 self._previous = None
3771
3772 - def Notify(self):
3773 """Notifies the wakeup file descriptor. 3774 3775 """ 3776 self._write_fh.write("\0")
3777
3778 - def __del__(self):
3779 """Called before object deletion. 3780 3781 """ 3782 self.Reset()
3783
3784 3785 -class SignalHandler(object):
3786 """Generic signal handler class. 3787 3788 It automatically restores the original handler when deconstructed or 3789 when L{Reset} is called. You can either pass your own handler 3790 function in or query the L{called} attribute to detect whether the 3791 signal was sent. 3792 3793 @type signum: list 3794 @ivar signum: the signals we handle 3795 @type called: boolean 3796 @ivar called: tracks whether any of the signals have been raised 3797 3798 """
3799 - def __init__(self, signum, handler_fn=None, wakeup=None):
3800 """Constructs a new SignalHandler instance. 3801 3802 @type signum: int or list of ints 3803 @param signum: Single signal number or set of signal numbers 3804 @type handler_fn: callable 3805 @param handler_fn: Signal handling function 3806 3807 """ 3808 assert handler_fn is None or callable(handler_fn) 3809 3810 self.signum = set(signum) 3811 self.called = False 3812 3813 self._handler_fn = handler_fn 3814 self._wakeup = wakeup 3815 3816 self._previous = {} 3817 try: 3818 for signum in self.signum: 3819 # Setup handler 3820 prev_handler = signal.signal(signum, self._HandleSignal) 3821 try: 3822 self._previous[signum] = prev_handler 3823 except: 3824 # Restore previous handler 3825 signal.signal(signum, prev_handler) 3826 raise 3827 except: 3828 # Reset all handlers 3829 self.Reset() 3830 # Here we have a race condition: a handler may have already been called, 3831 # but there's not much we can do about it at this point. 3832 raise
3833
3834 - def __del__(self):
3835 self.Reset()
3836
3837 - def Reset(self):
3838 """Restore previous handler. 3839 3840 This will reset all the signals to their previous handlers. 3841 3842 """ 3843 for signum, prev_handler in self._previous.items(): 3844 signal.signal(signum, prev_handler) 3845 # If successful, remove from dict 3846 del self._previous[signum]
3847
3848 - def Clear(self):
3849 """Unsets the L{called} flag. 3850 3851 This function can be used in case a signal may arrive several times. 3852 3853 """ 3854 self.called = False
3855
3856 - def _HandleSignal(self, signum, frame):
3857 """Actual signal handling function. 3858 3859 """ 3860 # This is not nice and not absolutely atomic, but it appears to be the only 3861 # solution in Python -- there are no atomic types. 3862 self.called = True 3863 3864 if self._wakeup: 3865 # Notify whoever is interested in signals 3866 self._wakeup.Notify() 3867 3868 if self._handler_fn: 3869 self._handler_fn(signum, frame)
3870
3871 3872 -class FieldSet(object):
3873 """A simple field set. 3874 3875 Among the features are: 3876 - checking if a string is among a list of static string or regex objects 3877 - checking if a whole list of string matches 3878 - returning the matching groups from a regex match 3879 3880 Internally, all fields are held as regular expression objects. 3881 3882 """
3883 - def __init__(self, *items):
3884 self.items = [re.compile("^%s$" % value) for value in items]
3885
3886 - def Extend(self, other_set):
3887 """Extend the field set with the items from another one""" 3888 self.items.extend(other_set.items)
3889
3890 - def Matches(self, field):
3891 """Checks if a field matches the current set 3892 3893 @type field: str 3894 @param field: the string to match 3895 @return: either None or a regular expression match object 3896 3897 """ 3898 for m in itertools.ifilter(None, (val.match(field) for val in self.items)): 3899 return m 3900 return None
3901
3902 - def NonMatching(self, items):
3903 """Returns the list of fields not matching the current set 3904 3905 @type items: list 3906 @param items: the list of fields to check 3907 @rtype: list 3908 @return: list of non-matching fields 3909 3910 """ 3911 return [val for val in items if not self.Matches(val)]
3912