1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 """Ganeti utility module.
23
24 This module holds functions that can be used in both daemons (all) and
25 the command line scripts.
26
27 """
28
29
30 import os
31 import sys
32 import time
33 import subprocess
34 import re
35 import socket
36 import tempfile
37 import shutil
38 import errno
39 import pwd
40 import itertools
41 import select
42 import fcntl
43 import resource
44 import logging
45 import logging.handlers
46 import signal
47 import OpenSSL
48 import datetime
49 import calendar
50 import hmac
51 import collections
52
53 from cStringIO import StringIO
54
55 try:
56
57 import ctypes
58 except ImportError:
59 ctypes = None
60
61 from ganeti import errors
62 from ganeti import constants
63 from ganeti import compat
64 from ganeti import netutils
65
66
67 _locksheld = []
68 _re_shell_unquoted = re.compile('^[-.,=:/_+@A-Za-z0-9]+$')
69
70 debug_locks = False
71
72
73 no_fork = False
74
75 _RANDOM_UUID_FILE = "/proc/sys/kernel/random/uuid"
76
77 HEX_CHAR_RE = r"[a-zA-Z0-9]"
78 VALID_X509_SIGNATURE_SALT = re.compile("^%s+$" % HEX_CHAR_RE, re.S)
79 X509_SIGNATURE = re.compile(r"^%s:\s*(?P<salt>%s+)/(?P<sign>%s+)$" %
80 (re.escape(constants.X509_CERT_SIGNATURE_HEADER),
81 HEX_CHAR_RE, HEX_CHAR_RE),
82 re.S | re.I)
83
84 _VALID_SERVICE_NAME_RE = re.compile("^[-_.a-zA-Z0-9]{1,128}$")
85
86
87 (CERT_WARNING,
88 CERT_ERROR) = range(1, 3)
89
90
91 _MCL_CURRENT = 1
92 _MCL_FUTURE = 2
93
94
95 _MAC_CHECK = re.compile("^([0-9a-f]{2}:){5}[0-9a-f]{2}$", re.I)
99 """Holds the result of running external programs.
100
101 @type exit_code: int
102 @ivar exit_code: the exit code of the program, or None (if the program
103 didn't exit())
104 @type signal: int or None
105 @ivar signal: the signal that caused the program to finish, or None
106 (if the program wasn't terminated by a signal)
107 @type stdout: str
108 @ivar stdout: the standard output of the program
109 @type stderr: str
110 @ivar stderr: the standard error of the program
111 @type failed: boolean
112 @ivar failed: True in case the program was
113 terminated by a signal or exited with a non-zero exit code
114 @ivar fail_reason: a string detailing the termination reason
115
116 """
117 __slots__ = ["exit_code", "signal", "stdout", "stderr",
118 "failed", "fail_reason", "cmd"]
119
120
121 - def __init__(self, exit_code, signal_, stdout, stderr, cmd):
122 self.cmd = cmd
123 self.exit_code = exit_code
124 self.signal = signal_
125 self.stdout = stdout
126 self.stderr = stderr
127 self.failed = (signal_ is not None or exit_code != 0)
128
129 if self.signal is not None:
130 self.fail_reason = "terminated by signal %s" % self.signal
131 elif self.exit_code is not None:
132 self.fail_reason = "exited with exit code %s" % self.exit_code
133 else:
134 self.fail_reason = "unable to determine termination reason"
135
136 if self.failed:
137 logging.debug("Command '%s' failed (%s); output: %s",
138 self.cmd, self.fail_reason, self.output)
139
141 """Returns the combined stdout and stderr for easier usage.
142
143 """
144 return self.stdout + self.stderr
145
146 output = property(_GetOutput, None, None, "Return full output")
147
150 """Builds the environment for an external program.
151
152 """
153 if reset:
154 cmd_env = {}
155 else:
156 cmd_env = os.environ.copy()
157 cmd_env["LC_ALL"] = "C"
158
159 if env is not None:
160 cmd_env.update(env)
161
162 return cmd_env
163
164
165 -def RunCmd(cmd, env=None, output=None, cwd="/", reset_env=False):
166 """Execute a (shell) command.
167
168 The command should not read from its standard input, as it will be
169 closed.
170
171 @type cmd: string or list
172 @param cmd: Command to run
173 @type env: dict
174 @param env: Additional environment variables
175 @type output: str
176 @param output: if desired, the output of the command can be
177 saved in a file instead of the RunResult instance; this
178 parameter denotes the file name (if not None)
179 @type cwd: string
180 @param cwd: if specified, will be used as the working
181 directory for the command; the default will be /
182 @type reset_env: boolean
183 @param reset_env: whether to reset or keep the default os environment
184 @rtype: L{RunResult}
185 @return: RunResult instance
186 @raise errors.ProgrammerError: if we call this when forks are disabled
187
188 """
189 if no_fork:
190 raise errors.ProgrammerError("utils.RunCmd() called with fork() disabled")
191
192 if isinstance(cmd, basestring):
193 strcmd = cmd
194 shell = True
195 else:
196 cmd = [str(val) for val in cmd]
197 strcmd = ShellQuoteArgs(cmd)
198 shell = False
199
200 if output:
201 logging.debug("RunCmd %s, output file '%s'", strcmd, output)
202 else:
203 logging.debug("RunCmd %s", strcmd)
204
205 cmd_env = _BuildCmdEnvironment(env, reset_env)
206
207 try:
208 if output is None:
209 out, err, status = _RunCmdPipe(cmd, cmd_env, shell, cwd)
210 else:
211 status = _RunCmdFile(cmd, cmd_env, shell, output, cwd)
212 out = err = ""
213 except OSError, err:
214 if err.errno == errno.ENOENT:
215 raise errors.OpExecError("Can't execute '%s': not found (%s)" %
216 (strcmd, err))
217 else:
218 raise
219
220 if status >= 0:
221 exitcode = status
222 signal_ = None
223 else:
224 exitcode = None
225 signal_ = -status
226
227 return RunResult(exitcode, signal_, out, err, strcmd)
228
229
230 -def StartDaemon(cmd, env=None, cwd="/", output=None, output_fd=None,
231 pidfile=None):
232 """Start a daemon process after forking twice.
233
234 @type cmd: string or list
235 @param cmd: Command to run
236 @type env: dict
237 @param env: Additional environment variables
238 @type cwd: string
239 @param cwd: Working directory for the program
240 @type output: string
241 @param output: Path to file in which to save the output
242 @type output_fd: int
243 @param output_fd: File descriptor for output
244 @type pidfile: string
245 @param pidfile: Process ID file
246 @rtype: int
247 @return: Daemon process ID
248 @raise errors.ProgrammerError: if we call this when forks are disabled
249
250 """
251 if no_fork:
252 raise errors.ProgrammerError("utils.StartDaemon() called with fork()"
253 " disabled")
254
255 if output and not (bool(output) ^ (output_fd is not None)):
256 raise errors.ProgrammerError("Only one of 'output' and 'output_fd' can be"
257 " specified")
258
259 if isinstance(cmd, basestring):
260 cmd = ["/bin/sh", "-c", cmd]
261
262 strcmd = ShellQuoteArgs(cmd)
263
264 if output:
265 logging.debug("StartDaemon %s, output file '%s'", strcmd, output)
266 else:
267 logging.debug("StartDaemon %s", strcmd)
268
269 cmd_env = _BuildCmdEnvironment(env, False)
270
271
272 (pidpipe_read, pidpipe_write) = os.pipe()
273 try:
274 try:
275
276 (errpipe_read, errpipe_write) = os.pipe()
277 try:
278 try:
279
280 pid = os.fork()
281 if pid == 0:
282 try:
283
284 _StartDaemonChild(errpipe_read, errpipe_write,
285 pidpipe_read, pidpipe_write,
286 cmd, cmd_env, cwd,
287 output, output_fd, pidfile)
288 finally:
289
290 os._exit(1)
291 finally:
292 _CloseFDNoErr(errpipe_write)
293
294
295
296 errormsg = RetryOnSignal(os.read, errpipe_read, 100 * 1024)
297 finally:
298 _CloseFDNoErr(errpipe_read)
299 finally:
300 _CloseFDNoErr(pidpipe_write)
301
302
303 pidtext = RetryOnSignal(os.read, pidpipe_read, 128)
304 finally:
305 _CloseFDNoErr(pidpipe_read)
306
307
308 try:
309 os.waitpid(pid, 0)
310 except OSError:
311 pass
312
313 if errormsg:
314 raise errors.OpExecError("Error when starting daemon process: %r" %
315 errormsg)
316
317 try:
318 return int(pidtext)
319 except (ValueError, TypeError), err:
320 raise errors.OpExecError("Error while trying to parse PID %r: %s" %
321 (pidtext, err))
322
323
324 -def _StartDaemonChild(errpipe_read, errpipe_write,
325 pidpipe_read, pidpipe_write,
326 args, env, cwd,
327 output, fd_output, pidfile):
328 """Child process for starting daemon.
329
330 """
331 try:
332
333 _CloseFDNoErr(errpipe_read)
334 _CloseFDNoErr(pidpipe_read)
335
336
337 os.chdir("/")
338 os.umask(077)
339 os.setsid()
340
341
342 pid = os.fork()
343 if pid != 0:
344
345 os._exit(0)
346
347
348 SetCloseOnExecFlag(errpipe_write, True)
349
350
351 noclose_fds = [errpipe_write]
352
353
354 if pidfile:
355 try:
356
357
358 fd_pidfile = os.open(pidfile, os.O_WRONLY | os.O_CREAT, 0600)
359
360
361
362
363
364 LockFile(fd_pidfile)
365
366 os.write(fd_pidfile, "%d\n" % os.getpid())
367 except Exception, err:
368 raise Exception("Creating and locking PID file failed: %s" % err)
369
370
371 noclose_fds.append(fd_pidfile)
372
373 SetCloseOnExecFlag(fd_pidfile, False)
374 else:
375 fd_pidfile = None
376
377
378 fd_devnull = os.open(os.devnull, os.O_RDWR)
379
380 assert not output or (bool(output) ^ (fd_output is not None))
381
382 if fd_output is not None:
383 pass
384 elif output:
385
386 try:
387
388 fd_output = os.open(output, os.O_WRONLY | os.O_CREAT, 0600)
389 except EnvironmentError, err:
390 raise Exception("Opening output file failed: %s" % err)
391 else:
392 fd_output = fd_devnull
393
394
395 os.dup2(fd_devnull, 0)
396 os.dup2(fd_output, 1)
397 os.dup2(fd_output, 2)
398
399
400 RetryOnSignal(os.write, pidpipe_write, str(os.getpid()))
401
402
403 CloseFDs(noclose_fds=noclose_fds)
404
405
406 os.chdir(cwd)
407
408 if env is None:
409 os.execvp(args[0], args)
410 else:
411 os.execvpe(args[0], args, env)
412 except:
413 try:
414
415 buf = str(sys.exc_info()[1])
416
417 RetryOnSignal(os.write, errpipe_write, buf)
418 except:
419
420 pass
421
422 os._exit(1)
423
426 """Run a command and return its output.
427
428 @type cmd: string or list
429 @param cmd: Command to run
430 @type env: dict
431 @param env: The environment to use
432 @type via_shell: bool
433 @param via_shell: if we should run via the shell
434 @type cwd: string
435 @param cwd: the working directory for the program
436 @rtype: tuple
437 @return: (out, err, status)
438
439 """
440 poller = select.poll()
441 child = subprocess.Popen(cmd, shell=via_shell,
442 stderr=subprocess.PIPE,
443 stdout=subprocess.PIPE,
444 stdin=subprocess.PIPE,
445 close_fds=True, env=env,
446 cwd=cwd)
447
448 child.stdin.close()
449 poller.register(child.stdout, select.POLLIN)
450 poller.register(child.stderr, select.POLLIN)
451 out = StringIO()
452 err = StringIO()
453 fdmap = {
454 child.stdout.fileno(): (out, child.stdout),
455 child.stderr.fileno(): (err, child.stderr),
456 }
457 for fd in fdmap:
458 SetNonblockFlag(fd, True)
459
460 while fdmap:
461 pollresult = RetryOnSignal(poller.poll)
462
463 for fd, event in pollresult:
464 if event & select.POLLIN or event & select.POLLPRI:
465 data = fdmap[fd][1].read()
466
467 if not data:
468 poller.unregister(fd)
469 del fdmap[fd]
470 continue
471 fdmap[fd][0].write(data)
472 if (event & select.POLLNVAL or event & select.POLLHUP or
473 event & select.POLLERR):
474 poller.unregister(fd)
475 del fdmap[fd]
476
477 out = out.getvalue()
478 err = err.getvalue()
479
480 status = child.wait()
481 return out, err, status
482
485 """Run a command and save its output to a file.
486
487 @type cmd: string or list
488 @param cmd: Command to run
489 @type env: dict
490 @param env: The environment to use
491 @type via_shell: bool
492 @param via_shell: if we should run via the shell
493 @type output: str
494 @param output: the filename in which to save the output
495 @type cwd: string
496 @param cwd: the working directory for the program
497 @rtype: int
498 @return: the exit status
499
500 """
501 fh = open(output, "a")
502 try:
503 child = subprocess.Popen(cmd, shell=via_shell,
504 stderr=subprocess.STDOUT,
505 stdout=fh,
506 stdin=subprocess.PIPE,
507 close_fds=True, env=env,
508 cwd=cwd)
509
510 child.stdin.close()
511 status = child.wait()
512 finally:
513 fh.close()
514 return status
515
518 """Sets or unsets the close-on-exec flag on a file descriptor.
519
520 @type fd: int
521 @param fd: File descriptor
522 @type enable: bool
523 @param enable: Whether to set or unset it.
524
525 """
526 flags = fcntl.fcntl(fd, fcntl.F_GETFD)
527
528 if enable:
529 flags |= fcntl.FD_CLOEXEC
530 else:
531 flags &= ~fcntl.FD_CLOEXEC
532
533 fcntl.fcntl(fd, fcntl.F_SETFD, flags)
534
537 """Sets or unsets the O_NONBLOCK flag on on a file descriptor.
538
539 @type fd: int
540 @param fd: File descriptor
541 @type enable: bool
542 @param enable: Whether to set or unset it
543
544 """
545 flags = fcntl.fcntl(fd, fcntl.F_GETFL)
546
547 if enable:
548 flags |= os.O_NONBLOCK
549 else:
550 flags &= ~os.O_NONBLOCK
551
552 fcntl.fcntl(fd, fcntl.F_SETFL, flags)
553
556 """Calls a function again if it failed due to EINTR.
557
558 """
559 while True:
560 try:
561 return fn(*args, **kwargs)
562 except EnvironmentError, err:
563 if err.errno != errno.EINTR:
564 raise
565 except (socket.error, select.error), err:
566
567
568 if not (err.args and err.args[0] == errno.EINTR):
569 raise
570
571
572 -def RunParts(dir_name, env=None, reset_env=False):
573 """Run Scripts or programs in a directory
574
575 @type dir_name: string
576 @param dir_name: absolute path to a directory
577 @type env: dict
578 @param env: The environment to use
579 @type reset_env: boolean
580 @param reset_env: whether to reset or keep the default os environment
581 @rtype: list of tuples
582 @return: list of (name, (one of RUNDIR_STATUS), RunResult)
583
584 """
585 rr = []
586
587 try:
588 dir_contents = ListVisibleFiles(dir_name)
589 except OSError, err:
590 logging.warning("RunParts: skipping %s (cannot list: %s)", dir_name, err)
591 return rr
592
593 for relname in sorted(dir_contents):
594 fname = PathJoin(dir_name, relname)
595 if not (os.path.isfile(fname) and os.access(fname, os.X_OK) and
596 constants.EXT_PLUGIN_MASK.match(relname) is not None):
597 rr.append((relname, constants.RUNPARTS_SKIP, None))
598 else:
599 try:
600 result = RunCmd([fname], env=env, reset_env=reset_env)
601 except Exception, err:
602 rr.append((relname, constants.RUNPARTS_ERR, str(err)))
603 else:
604 rr.append((relname, constants.RUNPARTS_RUN, result))
605
606 return rr
607
610 """Remove a file ignoring some errors.
611
612 Remove a file, ignoring non-existing ones or directories. Other
613 errors are passed.
614
615 @type filename: str
616 @param filename: the file to be removed
617
618 """
619 try:
620 os.unlink(filename)
621 except OSError, err:
622 if err.errno not in (errno.ENOENT, errno.EISDIR):
623 raise
624
627 """Remove an empty directory.
628
629 Remove a directory, ignoring non-existing ones.
630 Other errors are passed. This includes the case,
631 where the directory is not empty, so it can't be removed.
632
633 @type dirname: str
634 @param dirname: the empty directory to be removed
635
636 """
637 try:
638 os.rmdir(dirname)
639 except OSError, err:
640 if err.errno != errno.ENOENT:
641 raise
642
643
644 -def RenameFile(old, new, mkdir=False, mkdir_mode=0750):
645 """Renames a file.
646
647 @type old: string
648 @param old: Original path
649 @type new: string
650 @param new: New path
651 @type mkdir: bool
652 @param mkdir: Whether to create target directory if it doesn't exist
653 @type mkdir_mode: int
654 @param mkdir_mode: Mode for newly created directories
655
656 """
657 try:
658 return os.rename(old, new)
659 except OSError, err:
660
661
662
663 if mkdir and err.errno == errno.ENOENT:
664
665 Makedirs(os.path.dirname(new), mode=mkdir_mode)
666
667 return os.rename(old, new)
668
669 raise
670
673 """Super-mkdir; create a leaf directory and all intermediate ones.
674
675 This is a wrapper around C{os.makedirs} adding error handling not implemented
676 before Python 2.5.
677
678 """
679 try:
680 os.makedirs(path, mode)
681 except OSError, err:
682
683
684 if err.errno != errno.EEXIST or not os.path.exists(path):
685 raise
686
689 """Resets the random name generator of the tempfile module.
690
691 This function should be called after C{os.fork} in the child process to
692 ensure it creates a newly seeded random generator. Otherwise it would
693 generate the same random parts as the parent process. If several processes
694 race for the creation of a temporary file, this could lead to one not getting
695 a temporary name.
696
697 """
698
699 if hasattr(tempfile, "_once_lock") and hasattr(tempfile, "_name_sequence"):
700 tempfile._once_lock.acquire()
701 try:
702
703 tempfile._name_sequence = None
704 finally:
705 tempfile._once_lock.release()
706 else:
707 logging.critical("The tempfile module misses at least one of the"
708 " '_once_lock' and '_name_sequence' attributes")
709
712 """Compute the fingerprint of a file.
713
714 If the file does not exist, a None will be returned
715 instead.
716
717 @type filename: str
718 @param filename: the filename to checksum
719 @rtype: str
720 @return: the hex digest of the sha checksum of the contents
721 of the file
722
723 """
724 if not (os.path.exists(filename) and os.path.isfile(filename)):
725 return None
726
727 f = open(filename)
728
729 fp = compat.sha1_hash()
730 while True:
731 data = f.read(4096)
732 if not data:
733 break
734
735 fp.update(data)
736
737 return fp.hexdigest()
738
741 """Compute fingerprints for a list of files.
742
743 @type files: list
744 @param files: the list of filename to fingerprint
745 @rtype: dict
746 @return: a dictionary filename: fingerprint, holding only
747 existing files
748
749 """
750 ret = {}
751
752 for filename in files:
753 cksum = _FingerprintFile(filename)
754 if cksum:
755 ret[filename] = cksum
756
757 return ret
758
761 """Force the values of a dict to have certain types.
762
763 @type target: dict
764 @param target: the dict to update
765 @type key_types: dict
766 @param key_types: dict mapping target dict keys to types
767 in constants.ENFORCEABLE_TYPES
768 @type allowed_values: list
769 @keyword allowed_values: list of specially allowed values
770
771 """
772 if allowed_values is None:
773 allowed_values = []
774
775 if not isinstance(target, dict):
776 msg = "Expected dictionary, got '%s'" % target
777 raise errors.TypeEnforcementError(msg)
778
779 for key in target:
780 if key not in key_types:
781 msg = "Unknown key '%s'" % key
782 raise errors.TypeEnforcementError(msg)
783
784 if target[key] in allowed_values:
785 continue
786
787 ktype = key_types[key]
788 if ktype not in constants.ENFORCEABLE_TYPES:
789 msg = "'%s' has non-enforceable type %s" % (key, ktype)
790 raise errors.ProgrammerError(msg)
791
792 if ktype in (constants.VTYPE_STRING, constants.VTYPE_MAYBE_STRING):
793 if target[key] is None and ktype == constants.VTYPE_MAYBE_STRING:
794 pass
795 elif not isinstance(target[key], basestring):
796 if isinstance(target[key], bool) and not target[key]:
797 target[key] = ''
798 else:
799 msg = "'%s' (value %s) is not a valid string" % (key, target[key])
800 raise errors.TypeEnforcementError(msg)
801 elif ktype == constants.VTYPE_BOOL:
802 if isinstance(target[key], basestring) and target[key]:
803 if target[key].lower() == constants.VALUE_FALSE:
804 target[key] = False
805 elif target[key].lower() == constants.VALUE_TRUE:
806 target[key] = True
807 else:
808 msg = "'%s' (value %s) is not a valid boolean" % (key, target[key])
809 raise errors.TypeEnforcementError(msg)
810 elif target[key]:
811 target[key] = True
812 else:
813 target[key] = False
814 elif ktype == constants.VTYPE_SIZE:
815 try:
816 target[key] = ParseUnit(target[key])
817 except errors.UnitParseError, err:
818 msg = "'%s' (value %s) is not a valid size. error: %s" % \
819 (key, target[key], err)
820 raise errors.TypeEnforcementError(msg)
821 elif ktype == constants.VTYPE_INT:
822 try:
823 target[key] = int(target[key])
824 except (ValueError, TypeError):
825 msg = "'%s' (value %s) is not a valid integer" % (key, target[key])
826 raise errors.TypeEnforcementError(msg)
827
830 """Returns the path for a PID's proc status file.
831
832 @type pid: int
833 @param pid: Process ID
834 @rtype: string
835
836 """
837 return "/proc/%d/status" % pid
838
841 """Check if a given pid exists on the system.
842
843 @note: zombie status is not handled, so zombie processes
844 will be returned as alive
845 @type pid: int
846 @param pid: the process ID to check
847 @rtype: boolean
848 @return: True if the process exists
849
850 """
851 def _TryStat(name):
852 try:
853 os.stat(name)
854 return True
855 except EnvironmentError, err:
856 if err.errno in (errno.ENOENT, errno.ENOTDIR):
857 return False
858 elif err.errno == errno.EINVAL:
859 raise RetryAgain(err)
860 raise
861
862 assert isinstance(pid, int), "pid must be an integer"
863 if pid <= 0:
864 return False
865
866
867
868 try:
869 return Retry(_TryStat, (0.01, 1.5, 0.1), 0.5,
870 args=[_GetProcStatusPath(pid)])
871 except RetryTimeout, err:
872 err.RaiseInner()
873
876 """Parse a rendered sigset_t value.
877
878 This is the opposite of the Linux kernel's fs/proc/array.c:render_sigset_t
879 function.
880
881 @type sigset: string
882 @param sigset: Rendered signal set from /proc/$pid/status
883 @rtype: set
884 @return: Set of all enabled signal numbers
885
886 """
887 result = set()
888
889 signum = 0
890 for ch in reversed(sigset):
891 chv = int(ch, 16)
892
893
894
895 if chv & 1:
896 result.add(signum + 1)
897 if chv & 2:
898 result.add(signum + 2)
899 if chv & 4:
900 result.add(signum + 3)
901 if chv & 8:
902 result.add(signum + 4)
903
904 signum += 4
905
906 return result
907
910 """Retrieves a field from the contents of a proc status file.
911
912 @type pstatus: string
913 @param pstatus: Contents of /proc/$pid/status
914 @type field: string
915 @param field: Name of field whose value should be returned
916 @rtype: string
917
918 """
919 for line in pstatus.splitlines():
920 parts = line.split(":", 1)
921
922 if len(parts) < 2 or parts[0] != field:
923 continue
924
925 return parts[1].strip()
926
927 return None
928
931 """Checks whether a process is handling a signal.
932
933 @type pid: int
934 @param pid: Process ID
935 @type signum: int
936 @param signum: Signal number
937 @rtype: bool
938
939 """
940 if status_path is None:
941 status_path = _GetProcStatusPath(pid)
942
943 try:
944 proc_status = ReadFile(status_path)
945 except EnvironmentError, err:
946
947 if err.errno in (errno.ENOENT, errno.ENOTDIR, errno.EINVAL, errno.ESRCH):
948 return False
949 raise
950
951 sigcgt = _GetProcStatusField(proc_status, "SigCgt")
952 if sigcgt is None:
953 raise RuntimeError("%s is missing 'SigCgt' field" % status_path)
954
955
956 return signum in _ParseSigsetT(sigcgt)
957
960 """Read a pid from a file.
961
962 @type pidfile: string
963 @param pidfile: path to the file containing the pid
964 @rtype: int
965 @return: The process id, if the file exists and contains a valid PID,
966 otherwise 0
967
968 """
969 try:
970 raw_data = ReadOneLineFile(pidfile)
971 except EnvironmentError, err:
972 if err.errno != errno.ENOENT:
973 logging.exception("Can't read pid file")
974 return 0
975
976 try:
977 pid = int(raw_data)
978 except (TypeError, ValueError), err:
979 logging.info("Can't parse pid file contents", exc_info=True)
980 return 0
981
982 return pid
983
986 """Reads a locked PID file.
987
988 This can be used together with L{StartDaemon}.
989
990 @type path: string
991 @param path: Path to PID file
992 @return: PID as integer or, if file was unlocked or couldn't be opened, None
993
994 """
995 try:
996 fd = os.open(path, os.O_RDONLY)
997 except EnvironmentError, err:
998 if err.errno == errno.ENOENT:
999
1000 return None
1001 raise
1002
1003 try:
1004 try:
1005
1006 LockFile(fd)
1007 except errors.LockError:
1008
1009 return int(os.read(fd, 100))
1010 finally:
1011 os.close(fd)
1012
1013 return None
1014
1017 """Try to match a name against a list.
1018
1019 This function will try to match a name like test1 against a list
1020 like C{['test1.example.com', 'test2.example.com', ...]}. Against
1021 this list, I{'test1'} as well as I{'test1.example'} will match, but
1022 not I{'test1.ex'}. A multiple match will be considered as no match
1023 at all (e.g. I{'test1'} against C{['test1.example.com',
1024 'test1.example.org']}), except when the key fully matches an entry
1025 (e.g. I{'test1'} against C{['test1', 'test1.example.com']}).
1026
1027 @type key: str
1028 @param key: the name to be searched
1029 @type name_list: list
1030 @param name_list: the list of strings against which to search the key
1031 @type case_sensitive: boolean
1032 @param case_sensitive: whether to provide a case-sensitive match
1033
1034 @rtype: None or str
1035 @return: None if there is no match I{or} if there are multiple matches,
1036 otherwise the element from the list which matches
1037
1038 """
1039 if key in name_list:
1040 return key
1041
1042 re_flags = 0
1043 if not case_sensitive:
1044 re_flags |= re.IGNORECASE
1045 key = key.upper()
1046 mo = re.compile("^%s(\..*)?$" % re.escape(key), re_flags)
1047 names_filtered = []
1048 string_matches = []
1049 for name in name_list:
1050 if mo.match(name) is not None:
1051 names_filtered.append(name)
1052 if not case_sensitive and key == name.upper():
1053 string_matches.append(name)
1054
1055 if len(string_matches) == 1:
1056 return string_matches[0]
1057 if len(names_filtered) == 1:
1058 return names_filtered[0]
1059 return None
1060
1063 """Validate the given service name.
1064
1065 @type name: number or string
1066 @param name: Service name or port specification
1067
1068 """
1069 try:
1070 numport = int(name)
1071 except (ValueError, TypeError):
1072
1073 valid = _VALID_SERVICE_NAME_RE.match(name)
1074 else:
1075
1076
1077 valid = (numport >= 0 and numport < (1 << 16))
1078
1079 if not valid:
1080 raise errors.OpPrereqError("Invalid service name '%s'" % name,
1081 errors.ECODE_INVAL)
1082
1083 return name
1084
1087 """List volume groups and their size
1088
1089 @rtype: dict
1090 @return:
1091 Dictionary with keys volume name and values
1092 the size of the volume
1093
1094 """
1095 command = "vgs --noheadings --units m --nosuffix -o name,size"
1096 result = RunCmd(command)
1097 retval = {}
1098 if result.failed:
1099 return retval
1100
1101 for line in result.stdout.splitlines():
1102 try:
1103 name, size = line.split()
1104 size = int(float(size))
1105 except (IndexError, ValueError), err:
1106 logging.error("Invalid output from vgs (%s): %s", err, line)
1107 continue
1108
1109 retval[name] = size
1110
1111 return retval
1112
1115 """Check whether the given bridge exists in the system
1116
1117 @type bridge: str
1118 @param bridge: the bridge name to check
1119 @rtype: boolean
1120 @return: True if it does
1121
1122 """
1123 return os.path.isdir("/sys/class/net/%s/bridge" % bridge)
1124
1127 """Sort a list of strings based on digit and non-digit groupings.
1128
1129 Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
1130 will sort the list in the logical order C{['a1', 'a2', 'a10',
1131 'a11']}.
1132
1133 The sort algorithm breaks each name in groups of either only-digits
1134 or no-digits. Only the first eight such groups are considered, and
1135 after that we just use what's left of the string.
1136
1137 @type name_list: list
1138 @param name_list: the names to be sorted
1139 @rtype: list
1140 @return: a copy of the name list sorted with our algorithm
1141
1142 """
1143 _SORTER_BASE = "(\D+|\d+)"
1144 _SORTER_FULL = "^%s%s?%s?%s?%s?%s?%s?%s?.*$" % (_SORTER_BASE, _SORTER_BASE,
1145 _SORTER_BASE, _SORTER_BASE,
1146 _SORTER_BASE, _SORTER_BASE,
1147 _SORTER_BASE, _SORTER_BASE)
1148 _SORTER_RE = re.compile(_SORTER_FULL)
1149 _SORTER_NODIGIT = re.compile("^\D*$")
1150 def _TryInt(val):
1151 """Attempts to convert a variable to integer."""
1152 if val is None or _SORTER_NODIGIT.match(val):
1153 return val
1154 rval = int(val)
1155 return rval
1156
1157 to_sort = [([_TryInt(grp) for grp in _SORTER_RE.match(name).groups()], name)
1158 for name in name_list]
1159 to_sort.sort()
1160 return [tup[1] for tup in to_sort]
1161
1164 """Try to convert a value ignoring errors.
1165
1166 This function tries to apply function I{fn} to I{val}. If no
1167 C{ValueError} or C{TypeError} exceptions are raised, it will return
1168 the result, else it will return the original value. Any other
1169 exceptions are propagated to the caller.
1170
1171 @type fn: callable
1172 @param fn: function to apply to the value
1173 @param val: the value to be converted
1174 @return: The converted value if the conversion was successful,
1175 otherwise the original value.
1176
1177 """
1178 try:
1179 nv = fn(val)
1180 except (ValueError, TypeError):
1181 nv = val
1182 return nv
1183
1186 """Verifies is the given word is safe from the shell's p.o.v.
1187
1188 This means that we can pass this to a command via the shell and be
1189 sure that it doesn't alter the command line and is passed as such to
1190 the actual command.
1191
1192 Note that we are overly restrictive here, in order to be on the safe
1193 side.
1194
1195 @type word: str
1196 @param word: the word to check
1197 @rtype: boolean
1198 @return: True if the word is 'safe'
1199
1200 """
1201 return bool(re.match("^[-a-zA-Z0-9._+/:%@]+$", word))
1202
1205 """Build a safe shell command line from the given arguments.
1206
1207 This function will check all arguments in the args list so that they
1208 are valid shell parameters (i.e. they don't contain shell
1209 metacharacters). If everything is ok, it will return the result of
1210 template % args.
1211
1212 @type template: str
1213 @param template: the string holding the template for the
1214 string formatting
1215 @rtype: str
1216 @return: the expanded command line
1217
1218 """
1219 for word in args:
1220 if not IsValidShellParam(word):
1221 raise errors.ProgrammerError("Shell argument '%s' contains"
1222 " invalid characters" % word)
1223 return template % args
1224
1260
1263 """Tries to extract number and scale from the given string.
1264
1265 Input must be in the format C{NUMBER+ [DOT NUMBER+] SPACE*
1266 [UNIT]}. If no unit is specified, it defaults to MiB. Return value
1267 is always an int in MiB.
1268
1269 """
1270 m = re.match('^([.\d]+)\s*([a-zA-Z]+)?$', str(input_string))
1271 if not m:
1272 raise errors.UnitParseError("Invalid format")
1273
1274 value = float(m.groups()[0])
1275
1276 unit = m.groups()[1]
1277 if unit:
1278 lcunit = unit.lower()
1279 else:
1280 lcunit = 'm'
1281
1282 if lcunit in ('m', 'mb', 'mib'):
1283
1284 pass
1285
1286 elif lcunit in ('g', 'gb', 'gib'):
1287 value *= 1024
1288
1289 elif lcunit in ('t', 'tb', 'tib'):
1290 value *= 1024 * 1024
1291
1292 else:
1293 raise errors.UnitParseError("Unknown unit: %s" % unit)
1294
1295
1296 if int(value) < value:
1297 value += 1
1298
1299
1300 value = int(value)
1301 if value % 4:
1302 value += 4 - value % 4
1303
1304 return value
1305
1308 """Parse a CPU mask definition and return the list of CPU IDs.
1309
1310 CPU mask format: comma-separated list of CPU IDs
1311 or dash-separated ID ranges
1312 Example: "0-2,5" -> "0,1,2,5"
1313
1314 @type cpu_mask: str
1315 @param cpu_mask: CPU mask definition
1316 @rtype: list of int
1317 @return: list of CPU IDs
1318
1319 """
1320 if not cpu_mask:
1321 return []
1322 cpu_list = []
1323 for range_def in cpu_mask.split(","):
1324 boundaries = range_def.split("-")
1325 n_elements = len(boundaries)
1326 if n_elements > 2:
1327 raise errors.ParseError("Invalid CPU ID range definition"
1328 " (only one hyphen allowed): %s" % range_def)
1329 try:
1330 lower = int(boundaries[0])
1331 except (ValueError, TypeError), err:
1332 raise errors.ParseError("Invalid CPU ID value for lower boundary of"
1333 " CPU ID range: %s" % str(err))
1334 try:
1335 higher = int(boundaries[-1])
1336 except (ValueError, TypeError), err:
1337 raise errors.ParseError("Invalid CPU ID value for higher boundary of"
1338 " CPU ID range: %s" % str(err))
1339 if lower > higher:
1340 raise errors.ParseError("Invalid CPU ID range definition"
1341 " (%d > %d): %s" % (lower, higher, range_def))
1342 cpu_list.extend(range(lower, higher + 1))
1343 return cpu_list
1344
1347 """Adds an SSH public key to an authorized_keys file.
1348
1349 @type file_obj: str or file handle
1350 @param file_obj: path to authorized_keys file
1351 @type key: str
1352 @param key: string containing key
1353
1354 """
1355 key_fields = key.split()
1356
1357 if isinstance(file_obj, basestring):
1358 f = open(file_obj, 'a+')
1359 else:
1360 f = file_obj
1361
1362 try:
1363 nl = True
1364 for line in f:
1365
1366 if line.split() == key_fields:
1367 break
1368 nl = line.endswith('\n')
1369 else:
1370 if not nl:
1371 f.write("\n")
1372 f.write(key.rstrip('\r\n'))
1373 f.write("\n")
1374 f.flush()
1375 finally:
1376 f.close()
1377
1380 """Removes an SSH public key from an authorized_keys file.
1381
1382 @type file_name: str
1383 @param file_name: path to authorized_keys file
1384 @type key: str
1385 @param key: string containing key
1386
1387 """
1388 key_fields = key.split()
1389
1390 fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1391 try:
1392 out = os.fdopen(fd, 'w')
1393 try:
1394 f = open(file_name, 'r')
1395 try:
1396 for line in f:
1397
1398 if line.split() != key_fields:
1399 out.write(line)
1400
1401 out.flush()
1402 os.rename(tmpname, file_name)
1403 finally:
1404 f.close()
1405 finally:
1406 out.close()
1407 except:
1408 RemoveFile(tmpname)
1409 raise
1410
1411
1412 -def SetEtcHostsEntry(file_name, ip, hostname, aliases):
1413 """Sets the name of an IP address and hostname in /etc/hosts.
1414
1415 @type file_name: str
1416 @param file_name: path to the file to modify (usually C{/etc/hosts})
1417 @type ip: str
1418 @param ip: the IP address
1419 @type hostname: str
1420 @param hostname: the hostname to be added
1421 @type aliases: list
1422 @param aliases: the list of aliases to add for the hostname
1423
1424 """
1425
1426
1427 aliases = UniqueSequence([hostname] + aliases)[1:]
1428
1429 fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1430 try:
1431 out = os.fdopen(fd, 'w')
1432 try:
1433 f = open(file_name, 'r')
1434 try:
1435 for line in f:
1436 fields = line.split()
1437 if fields and not fields[0].startswith('#') and ip == fields[0]:
1438 continue
1439 out.write(line)
1440
1441 out.write("%s\t%s" % (ip, hostname))
1442 if aliases:
1443 out.write(" %s" % ' '.join(aliases))
1444 out.write('\n')
1445
1446 out.flush()
1447 os.fsync(out)
1448 os.chmod(tmpname, 0644)
1449 os.rename(tmpname, file_name)
1450 finally:
1451 f.close()
1452 finally:
1453 out.close()
1454 except:
1455 RemoveFile(tmpname)
1456 raise
1457
1460 """Wrapper around SetEtcHostsEntry.
1461
1462 @type hostname: str
1463 @param hostname: a hostname that will be resolved and added to
1464 L{constants.ETC_HOSTS}
1465
1466 """
1467 hi = netutils.HostInfo(name=hostname)
1468 SetEtcHostsEntry(constants.ETC_HOSTS, hi.ip, hi.name, [hi.ShortName()])
1469
1470
1471 -def RemoveEtcHostsEntry(file_name, hostname):
1472 """Removes a hostname from /etc/hosts.
1473
1474 IP addresses without names are removed from the file.
1475
1476 @type file_name: str
1477 @param file_name: path to the file to modify (usually C{/etc/hosts})
1478 @type hostname: str
1479 @param hostname: the hostname to be removed
1480
1481 """
1482
1483 fd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name))
1484 try:
1485 out = os.fdopen(fd, 'w')
1486 try:
1487 f = open(file_name, 'r')
1488 try:
1489 for line in f:
1490 fields = line.split()
1491 if len(fields) > 1 and not fields[0].startswith('#'):
1492 names = fields[1:]
1493 if hostname in names:
1494 while hostname in names:
1495 names.remove(hostname)
1496 if names:
1497 out.write("%s %s\n" % (fields[0], ' '.join(names)))
1498 continue
1499
1500 out.write(line)
1501
1502 out.flush()
1503 os.fsync(out)
1504 os.chmod(tmpname, 0644)
1505 os.rename(tmpname, file_name)
1506 finally:
1507 f.close()
1508 finally:
1509 out.close()
1510 except:
1511 RemoveFile(tmpname)
1512 raise
1513
1527
1530 """Returns the current time formatted for filenames.
1531
1532 The format doesn't contain colons as some shells and applications them as
1533 separators.
1534
1535 """
1536 return time.strftime("%Y-%m-%d_%H_%M_%S")
1537
1540 """Creates a backup of a file.
1541
1542 @type file_name: str
1543 @param file_name: file to be backed up
1544 @rtype: str
1545 @return: the path to the newly created backup
1546 @raise errors.ProgrammerError: for invalid file names
1547
1548 """
1549 if not os.path.isfile(file_name):
1550 raise errors.ProgrammerError("Can't make a backup of a non-file '%s'" %
1551 file_name)
1552
1553 prefix = ("%s.backup-%s." %
1554 (os.path.basename(file_name), TimestampForFilename()))
1555 dir_name = os.path.dirname(file_name)
1556
1557 fsrc = open(file_name, 'rb')
1558 try:
1559 (fd, backup_name) = tempfile.mkstemp(prefix=prefix, dir=dir_name)
1560 fdst = os.fdopen(fd, 'wb')
1561 try:
1562 logging.debug("Backing up %s at %s", file_name, backup_name)
1563 shutil.copyfileobj(fsrc, fdst)
1564 finally:
1565 fdst.close()
1566 finally:
1567 fsrc.close()
1568
1569 return backup_name
1570
1573 """Quotes shell argument according to POSIX.
1574
1575 @type value: str
1576 @param value: the argument to be quoted
1577 @rtype: str
1578 @return: the quoted value
1579
1580 """
1581 if _re_shell_unquoted.match(value):
1582 return value
1583 else:
1584 return "'%s'" % value.replace("'", "'\\''")
1585
1588 """Quotes a list of shell arguments.
1589
1590 @type args: list
1591 @param args: list of arguments to be quoted
1592 @rtype: str
1593 @return: the quoted arguments concatenated with spaces
1594
1595 """
1596 return ' '.join([ShellQuote(i) for i in args])
1597
1600 """Helper class to write scripts with indentation.
1601
1602 """
1603 INDENT_STR = " "
1604
1606 """Initializes this class.
1607
1608 """
1609 self._fh = fh
1610 self._indent = 0
1611
1613 """Increase indentation level by 1.
1614
1615 """
1616 self._indent += 1
1617
1619 """Decrease indentation level by 1.
1620
1621 """
1622 assert self._indent > 0
1623 self._indent -= 1
1624
1625 - def Write(self, txt, *args):
1626 """Write line to output file.
1627
1628 """
1629 assert self._indent >= 0
1630
1631 self._fh.write(self._indent * self.INDENT_STR)
1632
1633 if args:
1634 self._fh.write(txt % args)
1635 else:
1636 self._fh.write(txt)
1637
1638 self._fh.write("\n")
1639
1642 """Returns a list of visible files in a directory.
1643
1644 @type path: str
1645 @param path: the directory to enumerate
1646 @rtype: list
1647 @return: the list of all files not starting with a dot
1648 @raise ProgrammerError: if L{path} is not an absolue and normalized path
1649
1650 """
1651 if not IsNormAbsPath(path):
1652 raise errors.ProgrammerError("Path passed to ListVisibleFiles is not"
1653 " absolute/normalized: '%s'" % path)
1654 files = [i for i in os.listdir(path) if not i.startswith(".")]
1655 return files
1656
1659 """Try to get the homedir of the given user.
1660
1661 The user can be passed either as a string (denoting the name) or as
1662 an integer (denoting the user id). If the user is not found, the
1663 'default' argument is returned, which defaults to None.
1664
1665 """
1666 try:
1667 if isinstance(user, basestring):
1668 result = pwd.getpwnam(user)
1669 elif isinstance(user, (int, long)):
1670 result = pwd.getpwuid(user)
1671 else:
1672 raise errors.ProgrammerError("Invalid type passed to GetHomeDir (%s)" %
1673 type(user))
1674 except KeyError:
1675 return default
1676 return result.pw_dir
1677
1680 """Returns a random UUID.
1681
1682 @note: This is a Linux-specific method as it uses the /proc
1683 filesystem.
1684 @rtype: str
1685
1686 """
1687 return ReadFile(_RANDOM_UUID_FILE, size=128).rstrip("\n")
1688
1691 """Generates a random secret.
1692
1693 This will generate a pseudo-random secret returning an hex string
1694 (so that it can be used where an ASCII string is needed).
1695
1696 @param numbytes: the number of bytes which will be represented by the returned
1697 string (defaulting to 20, the length of a SHA1 hash)
1698 @rtype: str
1699 @return: an hex representation of the pseudo-random sequence
1700
1701 """
1702 return os.urandom(numbytes).encode('hex')
1703
1706 """Make required directories, if they don't exist.
1707
1708 @param dirs: list of tuples (dir_name, dir_mode)
1709 @type dirs: list of (string, integer)
1710
1711 """
1712 for dir_name, dir_mode in dirs:
1713 try:
1714 os.mkdir(dir_name, dir_mode)
1715 except EnvironmentError, err:
1716 if err.errno != errno.EEXIST:
1717 raise errors.GenericError("Cannot create needed directory"
1718 " '%s': %s" % (dir_name, err))
1719 try:
1720 os.chmod(dir_name, dir_mode)
1721 except EnvironmentError, err:
1722 raise errors.GenericError("Cannot change directory permissions on"
1723 " '%s': %s" % (dir_name, err))
1724 if not os.path.isdir(dir_name):
1725 raise errors.GenericError("%s is not a directory" % dir_name)
1726
1729 """Reads a file.
1730
1731 @type size: int
1732 @param size: Read at most size bytes (if negative, entire file)
1733 @rtype: str
1734 @return: the (possibly partial) content of the file
1735
1736 """
1737 f = open(file_name, "r")
1738 try:
1739 return f.read(size)
1740 finally:
1741 f.close()
1742
1743
1744 -def WriteFile(file_name, fn=None, data=None,
1745 mode=None, uid=-1, gid=-1,
1746 atime=None, mtime=None, close=True,
1747 dry_run=False, backup=False,
1748 prewrite=None, postwrite=None):
1749 """(Over)write a file atomically.
1750
1751 The file_name and either fn (a function taking one argument, the
1752 file descriptor, and which should write the data to it) or data (the
1753 contents of the file) must be passed. The other arguments are
1754 optional and allow setting the file mode, owner and group, and the
1755 mtime/atime of the file.
1756
1757 If the function doesn't raise an exception, it has succeeded and the
1758 target file has the new contents. If the function has raised an
1759 exception, an existing target file should be unmodified and the
1760 temporary file should be removed.
1761
1762 @type file_name: str
1763 @param file_name: the target filename
1764 @type fn: callable
1765 @param fn: content writing function, called with
1766 file descriptor as parameter
1767 @type data: str
1768 @param data: contents of the file
1769 @type mode: int
1770 @param mode: file mode
1771 @type uid: int
1772 @param uid: the owner of the file
1773 @type gid: int
1774 @param gid: the group of the file
1775 @type atime: int
1776 @param atime: a custom access time to be set on the file
1777 @type mtime: int
1778 @param mtime: a custom modification time to be set on the file
1779 @type close: boolean
1780 @param close: whether to close file after writing it
1781 @type prewrite: callable
1782 @param prewrite: function to be called before writing content
1783 @type postwrite: callable
1784 @param postwrite: function to be called after writing content
1785
1786 @rtype: None or int
1787 @return: None if the 'close' parameter evaluates to True,
1788 otherwise the file descriptor
1789
1790 @raise errors.ProgrammerError: if any of the arguments are not valid
1791
1792 """
1793 if not os.path.isabs(file_name):
1794 raise errors.ProgrammerError("Path passed to WriteFile is not"
1795 " absolute: '%s'" % file_name)
1796
1797 if [fn, data].count(None) != 1:
1798 raise errors.ProgrammerError("fn or data required")
1799
1800 if [atime, mtime].count(None) == 1:
1801 raise errors.ProgrammerError("Both atime and mtime must be either"
1802 " set or None")
1803
1804 if backup and not dry_run and os.path.isfile(file_name):
1805 CreateBackup(file_name)
1806
1807 dir_name, base_name = os.path.split(file_name)
1808 fd, new_name = tempfile.mkstemp('.new', base_name, dir_name)
1809 do_remove = True
1810
1811
1812 try:
1813 if uid != -1 or gid != -1:
1814 os.chown(new_name, uid, gid)
1815 if mode:
1816 os.chmod(new_name, mode)
1817 if callable(prewrite):
1818 prewrite(fd)
1819 if data is not None:
1820 os.write(fd, data)
1821 else:
1822 fn(fd)
1823 if callable(postwrite):
1824 postwrite(fd)
1825 os.fsync(fd)
1826 if atime is not None and mtime is not None:
1827 os.utime(new_name, (atime, mtime))
1828 if not dry_run:
1829 os.rename(new_name, file_name)
1830 do_remove = False
1831 finally:
1832 if close:
1833 os.close(fd)
1834 result = None
1835 else:
1836 result = fd
1837 if do_remove:
1838 RemoveFile(new_name)
1839
1840 return result
1841
1844 """Return the first non-empty line from a file.
1845
1846 @type strict: boolean
1847 @param strict: if True, abort if the file has more than one
1848 non-empty line
1849
1850 """
1851 file_lines = ReadFile(file_name).splitlines()
1852 full_lines = filter(bool, file_lines)
1853 if not file_lines or not full_lines:
1854 raise errors.GenericError("No data in one-liner file %s" % file_name)
1855 elif strict and len(full_lines) > 1:
1856 raise errors.GenericError("Too many lines in one-liner file %s" %
1857 file_name)
1858 return full_lines[0]
1859
1862 """Returns the first non-existing integer from seq.
1863
1864 The seq argument should be a sorted list of positive integers. The
1865 first time the index of an element is smaller than the element
1866 value, the index will be returned.
1867
1868 The base argument is used to start at a different offset,
1869 i.e. C{[3, 4, 6]} with I{offset=3} will return 5.
1870
1871 Example: C{[0, 1, 3]} will return I{2}.
1872
1873 @type seq: sequence
1874 @param seq: the sequence to be analyzed.
1875 @type base: int
1876 @param base: use this value as the base index of the sequence
1877 @rtype: int
1878 @return: the first non-used index in the sequence
1879
1880 """
1881 for idx, elem in enumerate(seq):
1882 assert elem >= base, "Passed element is higher than base offset"
1883 if elem > idx + base:
1884
1885 return idx + base
1886 return None
1887
1890 """Waits for a condition to occur on the socket.
1891
1892 Immediately returns at the first interruption.
1893
1894 @type fdobj: integer or object supporting a fileno() method
1895 @param fdobj: entity to wait for events on
1896 @type event: integer
1897 @param event: ORed condition (see select module)
1898 @type timeout: float or None
1899 @param timeout: Timeout in seconds
1900 @rtype: int or None
1901 @return: None for timeout, otherwise occured conditions
1902
1903 """
1904 check = (event | select.POLLPRI |
1905 select.POLLNVAL | select.POLLHUP | select.POLLERR)
1906
1907 if timeout is not None:
1908
1909 timeout *= 1000
1910
1911 poller = select.poll()
1912 poller.register(fdobj, event)
1913 try:
1914
1915
1916
1917 io_events = poller.poll(timeout)
1918 except select.error, err:
1919 if err[0] != errno.EINTR:
1920 raise
1921 io_events = []
1922 if io_events and io_events[0][1] & check:
1923 return io_events[0][1]
1924 else:
1925 return None
1926
1929 """Retry helper for WaitForFdCondition.
1930
1931 This class contains the retried and wait functions that make sure
1932 WaitForFdCondition can continue waiting until the timeout is actually
1933 expired.
1934
1935 """
1936
1938 self.timeout = timeout
1939
1940 - def Poll(self, fdobj, event):
1946
1948 self.timeout = timeout
1949
1952 """Waits for a condition to occur on the socket.
1953
1954 Retries until the timeout is expired, even if interrupted.
1955
1956 @type fdobj: integer or object supporting a fileno() method
1957 @param fdobj: entity to wait for events on
1958 @type event: integer
1959 @param event: ORed condition (see select module)
1960 @type timeout: float or None
1961 @param timeout: Timeout in seconds
1962 @rtype: int or None
1963 @return: None for timeout, otherwise occured conditions
1964
1965 """
1966 if timeout is not None:
1967 retrywaiter = FdConditionWaiterHelper(timeout)
1968 try:
1969 result = Retry(retrywaiter.Poll, RETRY_REMAINING_TIME, timeout,
1970 args=(fdobj, event), wait_fn=retrywaiter.UpdateTimeout)
1971 except RetryTimeout:
1972 result = None
1973 else:
1974 result = None
1975 while result is None:
1976 result = SingleWaitForFdCondition(fdobj, event, timeout)
1977 return result
1978
1981 """Returns a list with unique elements.
1982
1983 Element order is preserved.
1984
1985 @type seq: sequence
1986 @param seq: the sequence with the source elements
1987 @rtype: list
1988 @return: list of unique elements from seq
1989
1990 """
1991 seen = set()
1992 return [i for i in seq if i not in seen and not seen.add(i)]
1993
1996 """Normalizes and check if a MAC address is valid.
1997
1998 Checks whether the supplied MAC address is formally correct, only
1999 accepts colon separated format. Normalize it to all lower.
2000
2001 @type mac: str
2002 @param mac: the MAC to be validated
2003 @rtype: str
2004 @return: returns the normalized and validated MAC.
2005
2006 @raise errors.OpPrereqError: If the MAC isn't valid
2007
2008 """
2009 if not _MAC_CHECK.match(mac):
2010 raise errors.OpPrereqError("Invalid MAC address specified: %s" %
2011 mac, errors.ECODE_INVAL)
2012
2013 return mac.lower()
2014
2017 """Sleep for a fixed amount of time.
2018
2019 @type duration: float
2020 @param duration: the sleep duration
2021 @rtype: boolean
2022 @return: False for negative value, True otherwise
2023
2024 """
2025 if duration < 0:
2026 return False, "Invalid sleep duration"
2027 time.sleep(duration)
2028 return True, None
2029
2032 """Close a file descriptor ignoring errors.
2033
2034 @type fd: int
2035 @param fd: the file descriptor
2036 @type retries: int
2037 @param retries: how many retries to make, in case we get any
2038 other error than EBADF
2039
2040 """
2041 try:
2042 os.close(fd)
2043 except OSError, err:
2044 if err.errno != errno.EBADF:
2045 if retries > 0:
2046 _CloseFDNoErr(fd, retries - 1)
2047
2048
2049
2050
2051 -def CloseFDs(noclose_fds=None):
2052 """Close file descriptors.
2053
2054 This closes all file descriptors above 2 (i.e. except
2055 stdin/out/err).
2056
2057 @type noclose_fds: list or None
2058 @param noclose_fds: if given, it denotes a list of file descriptor
2059 that should not be closed
2060
2061 """
2062
2063 if 'SC_OPEN_MAX' in os.sysconf_names:
2064 try:
2065 MAXFD = os.sysconf('SC_OPEN_MAX')
2066 if MAXFD < 0:
2067 MAXFD = 1024
2068 except OSError:
2069 MAXFD = 1024
2070 else:
2071 MAXFD = 1024
2072 maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
2073 if (maxfd == resource.RLIM_INFINITY):
2074 maxfd = MAXFD
2075
2076
2077 for fd in range(3, maxfd):
2078 if noclose_fds and fd in noclose_fds:
2079 continue
2080 _CloseFDNoErr(fd)
2081
2084 """Lock current process' virtual address space into RAM.
2085
2086 This is equivalent to the C call mlockall(MCL_CURRENT|MCL_FUTURE),
2087 see mlock(2) for more details. This function requires ctypes module.
2088
2089 @raises errors.NoCtypesError: if ctypes module is not found
2090
2091 """
2092 if _ctypes is None:
2093 raise errors.NoCtypesError()
2094
2095 libc = _ctypes.cdll.LoadLibrary("libc.so.6")
2096 if libc is None:
2097 logging.error("Cannot set memory lock, ctypes cannot load libc")
2098 return
2099
2100
2101
2102
2103
2104
2105
2106 libc.__errno_location.restype = _ctypes.POINTER(_ctypes.c_int)
2107
2108 if libc.mlockall(_MCL_CURRENT | _MCL_FUTURE):
2109
2110 logging.error("Cannot set memory lock: %s",
2111 os.strerror(libc.__errno_location().contents.value))
2112 return
2113
2114 logging.debug("Memory lock set")
2115
2116
2117 -def Daemonize(logfile, run_uid, run_gid):
2118 """Daemonize the current process.
2119
2120 This detaches the current process from the controlling terminal and
2121 runs it in the background as a daemon.
2122
2123 @type logfile: str
2124 @param logfile: the logfile to which we should redirect stdout/stderr
2125 @type run_uid: int
2126 @param run_uid: Run the child under this uid
2127 @type run_gid: int
2128 @param run_gid: Run the child under this gid
2129 @rtype: int
2130 @return: the value zero
2131
2132 """
2133
2134
2135 UMASK = 077
2136 WORKDIR = "/"
2137
2138
2139 pid = os.fork()
2140 if (pid == 0):
2141 os.setsid()
2142
2143
2144
2145 os.setgid(run_gid)
2146 os.setuid(run_uid)
2147
2148 pid = os.fork()
2149 if (pid == 0):
2150 os.chdir(WORKDIR)
2151 os.umask(UMASK)
2152 else:
2153
2154 os._exit(0)
2155 else:
2156 os._exit(0)
2157
2158 for fd in range(3):
2159 _CloseFDNoErr(fd)
2160 i = os.open("/dev/null", os.O_RDONLY)
2161 assert i == 0, "Can't close/reopen stdin"
2162 i = os.open(logfile, os.O_WRONLY|os.O_CREAT|os.O_APPEND, 0600)
2163 assert i == 1, "Can't close/reopen stdout"
2164
2165 os.dup2(1, 2)
2166 return 0
2167
2170 """Compute a ganeti pid file absolute path
2171
2172 @type name: str
2173 @param name: the daemon name
2174 @rtype: str
2175 @return: the full path to the pidfile corresponding to the given
2176 daemon name
2177
2178 """
2179 return PathJoin(constants.RUN_GANETI_DIR, "%s.pid" % name)
2180
2183 """Check for and start daemon if not alive.
2184
2185 """
2186 result = RunCmd([constants.DAEMON_UTIL, "check-and-start", name])
2187 if result.failed:
2188 logging.error("Can't start daemon '%s', failure %s, output: %s",
2189 name, result.fail_reason, result.output)
2190 return False
2191
2192 return True
2193
2196 """Stop daemon
2197
2198 """
2199 result = RunCmd([constants.DAEMON_UTIL, "stop", name])
2200 if result.failed:
2201 logging.error("Can't stop daemon '%s', failure %s, output: %s",
2202 name, result.fail_reason, result.output)
2203 return False
2204
2205 return True
2206
2209 """Write the current process pidfile.
2210
2211 The file will be written to L{constants.RUN_GANETI_DIR}I{/name.pid}
2212
2213 @type name: str
2214 @param name: the daemon name to use
2215 @raise errors.GenericError: if the pid file already exists and
2216 points to a live process
2217
2218 """
2219 pid = os.getpid()
2220 pidfilename = DaemonPidFileName(name)
2221 if IsProcessAlive(ReadPidFile(pidfilename)):
2222 raise errors.GenericError("%s contains a live process" % pidfilename)
2223
2224 WriteFile(pidfilename, data="%d\n" % pid)
2225
2228 """Remove the current process pidfile.
2229
2230 Any errors are ignored.
2231
2232 @type name: str
2233 @param name: the daemon name used to derive the pidfile name
2234
2235 """
2236 pidfilename = DaemonPidFileName(name)
2237
2238 try:
2239 RemoveFile(pidfilename)
2240 except:
2241 pass
2242
2246 """Kill a process given by its pid.
2247
2248 @type pid: int
2249 @param pid: The PID to terminate.
2250 @type signal_: int
2251 @param signal_: The signal to send, by default SIGTERM
2252 @type timeout: int
2253 @param timeout: The timeout after which, if the process is still alive,
2254 a SIGKILL will be sent. If not positive, no such checking
2255 will be done
2256 @type waitpid: boolean
2257 @param waitpid: If true, we should waitpid on this process after
2258 sending signals, since it's our own child and otherwise it
2259 would remain as zombie
2260
2261 """
2262 def _helper(pid, signal_, wait):
2263 """Simple helper to encapsulate the kill/waitpid sequence"""
2264 if IgnoreProcessNotFound(os.kill, pid, signal_) and wait:
2265 try:
2266 os.waitpid(pid, os.WNOHANG)
2267 except OSError:
2268 pass
2269
2270 if pid <= 0:
2271
2272 raise errors.ProgrammerError("Invalid pid given '%s'" % pid)
2273
2274 if not IsProcessAlive(pid):
2275 return
2276
2277 _helper(pid, signal_, waitpid)
2278
2279 if timeout <= 0:
2280 return
2281
2282 def _CheckProcess():
2283 if not IsProcessAlive(pid):
2284 return
2285
2286 try:
2287 (result_pid, _) = os.waitpid(pid, os.WNOHANG)
2288 except OSError:
2289 raise RetryAgain()
2290
2291 if result_pid > 0:
2292 return
2293
2294 raise RetryAgain()
2295
2296 try:
2297
2298 Retry(_CheckProcess, (0.01, 1.5, 0.1), timeout)
2299 except RetryTimeout:
2300 pass
2301
2302 if IsProcessAlive(pid):
2303
2304 _helper(pid, signal.SIGKILL, waitpid)
2305
2306
2307 -def FindFile(name, search_path, test=os.path.exists):
2308 """Look for a filesystem object in a given path.
2309
2310 This is an abstract method to search for filesystem object (files,
2311 dirs) under a given search path.
2312
2313 @type name: str
2314 @param name: the name to look for
2315 @type search_path: str
2316 @param search_path: location to start at
2317 @type test: callable
2318 @param test: a function taking one argument that should return True
2319 if the a given object is valid; the default value is
2320 os.path.exists, causing only existing files to be returned
2321 @rtype: str or None
2322 @return: full path to the object if found, None otherwise
2323
2324 """
2325
2326 if constants.EXT_PLUGIN_MASK.match(name) is None:
2327 logging.critical("Invalid value passed for external script name: '%s'",
2328 name)
2329 return None
2330
2331 for dir_name in search_path:
2332
2333 item_name = os.path.sep.join([dir_name, name])
2334
2335
2336 if test(item_name) and os.path.basename(item_name) == name:
2337 return item_name
2338 return None
2339
2342 """Checks if the volume group list is valid.
2343
2344 The function will check if a given volume group is in the list of
2345 volume groups and has a minimum size.
2346
2347 @type vglist: dict
2348 @param vglist: dictionary of volume group names and their size
2349 @type vgname: str
2350 @param vgname: the volume group we should check
2351 @type minsize: int
2352 @param minsize: the minimum size we accept
2353 @rtype: None or str
2354 @return: None for success, otherwise the error message
2355
2356 """
2357 vgsize = vglist.get(vgname, None)
2358 if vgsize is None:
2359 return "volume group '%s' missing" % vgname
2360 elif vgsize < minsize:
2361 return ("volume group '%s' too small (%s MiB required, %d MiB found)" %
2362 (vgname, minsize, vgsize))
2363 return None
2364
2367 """Splits time as floating point number into a tuple.
2368
2369 @param value: Time in seconds
2370 @type value: int or float
2371 @return: Tuple containing (seconds, microseconds)
2372
2373 """
2374 (seconds, microseconds) = divmod(int(value * 1000000), 1000000)
2375
2376 assert 0 <= seconds, \
2377 "Seconds must be larger than or equal to 0, but are %s" % seconds
2378 assert 0 <= microseconds <= 999999, \
2379 "Microseconds must be 0-999999, but are %s" % microseconds
2380
2381 return (int(seconds), int(microseconds))
2382
2385 """Merges a tuple into time as a floating point number.
2386
2387 @param timetuple: Time as tuple, (seconds, microseconds)
2388 @type timetuple: tuple
2389 @return: Time as a floating point number expressed in seconds
2390
2391 """
2392 (seconds, microseconds) = timetuple
2393
2394 assert 0 <= seconds, \
2395 "Seconds must be larger than or equal to 0, but are %s" % seconds
2396 assert 0 <= microseconds <= 999999, \
2397 "Microseconds must be 0-999999, but are %s" % microseconds
2398
2399 return float(seconds) + (float(microseconds) * 0.000001)
2400
2403 """Log handler that doesn't fallback to stderr.
2404
2405 When an error occurs while writing on the logfile, logging.FileHandler tries
2406 to log on stderr. This doesn't work in ganeti since stderr is redirected to
2407 the logfile. This class avoids failures reporting errors to /dev/console.
2408
2409 """
2410 - def __init__(self, filename, mode="a", encoding=None):
2411 """Open the specified file and use it as the stream for logging.
2412
2413 Also open /dev/console to report errors while logging.
2414
2415 """
2416 logging.FileHandler.__init__(self, filename, mode, encoding)
2417 self.console = open(constants.DEV_CONSOLE, "a")
2418
2420 """Handle errors which occur during an emit() call.
2421
2422 Try to handle errors with FileHandler method, if it fails write to
2423 /dev/console.
2424
2425 """
2426 try:
2427 logging.FileHandler.handleError(self, record)
2428 except Exception:
2429 try:
2430 self.console.write("Cannot log message:\n%s\n" % self.format(record))
2431 except Exception:
2432
2433 pass
2434
2439 """Configures the logging module.
2440
2441 @type logfile: str
2442 @param logfile: the filename to which we should log
2443 @type debug: integer
2444 @param debug: if greater than zero, enable debug messages, otherwise
2445 only those at C{INFO} and above level
2446 @type stderr_logging: boolean
2447 @param stderr_logging: whether we should also log to the standard error
2448 @type program: str
2449 @param program: the name under which we should log messages
2450 @type multithreaded: boolean
2451 @param multithreaded: if True, will add the thread name to the log file
2452 @type syslog: string
2453 @param syslog: one of 'no', 'yes', 'only':
2454 - if no, syslog is not used
2455 - if yes, syslog is used (in addition to file-logging)
2456 - if only, only syslog is used
2457 @type console_logging: boolean
2458 @param console_logging: if True, will use a FileHandler which falls back to
2459 the system console if logging fails
2460 @raise EnvironmentError: if we can't open the log file and
2461 syslog/stderr logging is disabled
2462
2463 """
2464 fmt = "%(asctime)s: " + program + " pid=%(process)d"
2465 sft = program + "[%(process)d]:"
2466 if multithreaded:
2467 fmt += "/%(threadName)s"
2468 sft += " (%(threadName)s)"
2469 if debug:
2470 fmt += " %(module)s:%(lineno)s"
2471
2472 fmt += " %(levelname)s %(message)s"
2473
2474
2475 sft += " %(levelname)s %(message)s"
2476 formatter = logging.Formatter(fmt)
2477 sys_fmt = logging.Formatter(sft)
2478
2479 root_logger = logging.getLogger("")
2480 root_logger.setLevel(logging.NOTSET)
2481
2482
2483 for handler in root_logger.handlers:
2484 handler.close()
2485 root_logger.removeHandler(handler)
2486
2487 if stderr_logging:
2488 stderr_handler = logging.StreamHandler()
2489 stderr_handler.setFormatter(formatter)
2490 if debug:
2491 stderr_handler.setLevel(logging.NOTSET)
2492 else:
2493 stderr_handler.setLevel(logging.CRITICAL)
2494 root_logger.addHandler(stderr_handler)
2495
2496 if syslog in (constants.SYSLOG_YES, constants.SYSLOG_ONLY):
2497 facility = logging.handlers.SysLogHandler.LOG_DAEMON
2498 syslog_handler = logging.handlers.SysLogHandler(constants.SYSLOG_SOCKET,
2499 facility)
2500 syslog_handler.setFormatter(sys_fmt)
2501
2502 syslog_handler.setLevel(logging.INFO)
2503 root_logger.addHandler(syslog_handler)
2504
2505 if syslog != constants.SYSLOG_ONLY:
2506
2507
2508
2509
2510 try:
2511 if console_logging:
2512 logfile_handler = LogFileHandler(logfile)
2513 else:
2514 logfile_handler = logging.FileHandler(logfile)
2515 logfile_handler.setFormatter(formatter)
2516 if debug:
2517 logfile_handler.setLevel(logging.DEBUG)
2518 else:
2519 logfile_handler.setLevel(logging.INFO)
2520 root_logger.addHandler(logfile_handler)
2521 except EnvironmentError:
2522 if stderr_logging or syslog == constants.SYSLOG_YES:
2523 logging.exception("Failed to enable logging to file '%s'", logfile)
2524 else:
2525
2526 raise
2527
2530 """Check whether a path is absolute and also normalized
2531
2532 This avoids things like /dir/../../other/path to be valid.
2533
2534 """
2535 return os.path.normpath(path) == path and os.path.isabs(path)
2536
2539 """Safe-join a list of path components.
2540
2541 Requirements:
2542 - the first argument must be an absolute path
2543 - no component in the path must have backtracking (e.g. /../),
2544 since we check for normalization at the end
2545
2546 @param args: the path components to be joined
2547 @raise ValueError: for invalid paths
2548
2549 """
2550
2551 assert args
2552
2553 root = args[0]
2554 if not IsNormAbsPath(root):
2555 raise ValueError("Invalid parameter to PathJoin: '%s'" % str(args[0]))
2556 result = os.path.join(*args)
2557
2558 if not IsNormAbsPath(result):
2559 raise ValueError("Invalid parameters to PathJoin: '%s'" % str(args))
2560
2561 prefix = os.path.commonprefix([root, result])
2562 if prefix != root:
2563 raise ValueError("Error: path joining resulted in different prefix"
2564 " (%s != %s)" % (prefix, root))
2565 return result
2566
2569 """Return the last lines from a file.
2570
2571 @note: this function will only read and parse the last 4KB of
2572 the file; if the lines are very long, it could be that less
2573 than the requested number of lines are returned
2574
2575 @param fname: the file name
2576 @type lines: int
2577 @param lines: the (maximum) number of lines to return
2578
2579 """
2580 fd = open(fname, "r")
2581 try:
2582 fd.seek(0, 2)
2583 pos = fd.tell()
2584 pos = max(0, pos-4096)
2585 fd.seek(pos, 0)
2586 raw_data = fd.read()
2587 finally:
2588 fd.close()
2589
2590 rows = raw_data.splitlines()
2591 return rows[-lines:]
2592
2599
2602 """Parses an ASN1 GENERALIZEDTIME timestamp as used by pyOpenSSL.
2603
2604 @type value: string
2605 @param value: ASN1 GENERALIZEDTIME timestamp
2606
2607 """
2608 m = re.match(r"^(\d+)([-+]\d\d)(\d\d)$", value)
2609 if m:
2610
2611 asn1time = m.group(1)
2612 hours = int(m.group(2))
2613 minutes = int(m.group(3))
2614 utcoffset = (60 * hours) + minutes
2615 else:
2616 if not value.endswith("Z"):
2617 raise ValueError("Missing timezone")
2618 asn1time = value[:-1]
2619 utcoffset = 0
2620
2621 parsed = time.strptime(asn1time, "%Y%m%d%H%M%S")
2622
2623 tt = datetime.datetime(*(parsed[:7])) - datetime.timedelta(minutes=utcoffset)
2624
2625 return calendar.timegm(tt.utctimetuple())
2626
2629 """Returns the validity period of the certificate.
2630
2631 @type cert: OpenSSL.crypto.X509
2632 @param cert: X509 certificate object
2633
2634 """
2635
2636
2637 try:
2638 get_notbefore_fn = cert.get_notBefore
2639 except AttributeError:
2640 not_before = None
2641 else:
2642 not_before_asn1 = get_notbefore_fn()
2643
2644 if not_before_asn1 is None:
2645 not_before = None
2646 else:
2647 not_before = _ParseAsn1Generalizedtime(not_before_asn1)
2648
2649 try:
2650 get_notafter_fn = cert.get_notAfter
2651 except AttributeError:
2652 not_after = None
2653 else:
2654 not_after_asn1 = get_notafter_fn()
2655
2656 if not_after_asn1 is None:
2657 not_after = None
2658 else:
2659 not_after = _ParseAsn1Generalizedtime(not_after_asn1)
2660
2661 return (not_before, not_after)
2662
2666 """Verifies certificate validity.
2667
2668 @type expired: bool
2669 @param expired: Whether pyOpenSSL considers the certificate as expired
2670 @type not_before: number or None
2671 @param not_before: Unix timestamp before which certificate is not valid
2672 @type not_after: number or None
2673 @param not_after: Unix timestamp after which certificate is invalid
2674 @type now: number
2675 @param now: Current time as Unix timestamp
2676 @type warn_days: number or None
2677 @param warn_days: How many days before expiration a warning should be reported
2678 @type error_days: number or None
2679 @param error_days: How many days before expiration an error should be reported
2680
2681 """
2682 if expired:
2683 msg = "Certificate is expired"
2684
2685 if not_before is not None and not_after is not None:
2686 msg += (" (valid from %s to %s)" %
2687 (FormatTimestampWithTZ(not_before),
2688 FormatTimestampWithTZ(not_after)))
2689 elif not_before is not None:
2690 msg += " (valid from %s)" % FormatTimestampWithTZ(not_before)
2691 elif not_after is not None:
2692 msg += " (valid until %s)" % FormatTimestampWithTZ(not_after)
2693
2694 return (CERT_ERROR, msg)
2695
2696 elif not_before is not None and not_before > now:
2697 return (CERT_WARNING,
2698 "Certificate not yet valid (valid from %s)" %
2699 FormatTimestampWithTZ(not_before))
2700
2701 elif not_after is not None:
2702 remaining_days = int((not_after - now) / (24 * 3600))
2703
2704 msg = "Certificate expires in about %d days" % remaining_days
2705
2706 if error_days is not None and remaining_days <= error_days:
2707 return (CERT_ERROR, msg)
2708
2709 if warn_days is not None and remaining_days <= warn_days:
2710 return (CERT_WARNING, msg)
2711
2712 return (None, None)
2713
2716 """Verifies a certificate for LUVerifyCluster.
2717
2718 @type cert: OpenSSL.crypto.X509
2719 @param cert: X509 certificate object
2720 @type warn_days: number or None
2721 @param warn_days: How many days before expiration a warning should be reported
2722 @type error_days: number or None
2723 @param error_days: How many days before expiration an error should be reported
2724
2725 """
2726
2727 (not_before, not_after) = GetX509CertValidity(cert)
2728
2729 return _VerifyCertificateInner(cert.has_expired(), not_before, not_after,
2730 time.time(), warn_days, error_days)
2731
2734 """Sign a X509 certificate.
2735
2736 An RFC822-like signature header is added in front of the certificate.
2737
2738 @type cert: OpenSSL.crypto.X509
2739 @param cert: X509 certificate object
2740 @type key: string
2741 @param key: Key for HMAC
2742 @type salt: string
2743 @param salt: Salt for HMAC
2744 @rtype: string
2745 @return: Serialized and signed certificate in PEM format
2746
2747 """
2748 if not VALID_X509_SIGNATURE_SALT.match(salt):
2749 raise errors.GenericError("Invalid salt: %r" % salt)
2750
2751
2752 cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2753
2754 return ("%s: %s/%s\n\n%s" %
2755 (constants.X509_CERT_SIGNATURE_HEADER, salt,
2756 Sha1Hmac(key, cert_pem, salt=salt),
2757 cert_pem))
2758
2761 """Helper function to extract signature from X509 certificate.
2762
2763 """
2764
2765 for line in cert_pem.splitlines():
2766 if line.startswith("---"):
2767 break
2768
2769 m = X509_SIGNATURE.match(line.strip())
2770 if m:
2771 return (m.group("salt"), m.group("sign"))
2772
2773 raise errors.GenericError("X509 certificate signature is missing")
2774
2777 """Verifies a signed X509 certificate.
2778
2779 @type cert_pem: string
2780 @param cert_pem: Certificate in PEM format and with signature header
2781 @type key: string
2782 @param key: Key for HMAC
2783 @rtype: tuple; (OpenSSL.crypto.X509, string)
2784 @return: X509 certificate object and salt
2785
2786 """
2787 (salt, signature) = _ExtractX509CertificateSignature(cert_pem)
2788
2789
2790 cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, cert_pem)
2791
2792
2793 sane_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
2794
2795 if not VerifySha1Hmac(key, sane_pem, signature, salt=salt):
2796 raise errors.GenericError("X509 certificate signature is invalid")
2797
2798 return (cert, salt)
2799
2800
2801 -def Sha1Hmac(key, text, salt=None):
2802 """Calculates the HMAC-SHA1 digest of a text.
2803
2804 HMAC is defined in RFC2104.
2805
2806 @type key: string
2807 @param key: Secret key
2808 @type text: string
2809
2810 """
2811 if salt:
2812 salted_text = salt + text
2813 else:
2814 salted_text = text
2815
2816 return hmac.new(key, salted_text, compat.sha1).hexdigest()
2817
2820 """Verifies the HMAC-SHA1 digest of a text.
2821
2822 HMAC is defined in RFC2104.
2823
2824 @type key: string
2825 @param key: Secret key
2826 @type text: string
2827 @type digest: string
2828 @param digest: Expected digest
2829 @rtype: bool
2830 @return: Whether HMAC-SHA1 digest matches
2831
2832 """
2833 return digest.lower() == Sha1Hmac(key, text, salt=salt).lower()
2834
2837 """Return a 'safe' version of a source string.
2838
2839 This function mangles the input string and returns a version that
2840 should be safe to display/encode as ASCII. To this end, we first
2841 convert it to ASCII using the 'backslashreplace' encoding which
2842 should get rid of any non-ASCII chars, and then we process it
2843 through a loop copied from the string repr sources in the python; we
2844 don't use string_escape anymore since that escape single quotes and
2845 backslashes too, and that is too much; and that escaping is not
2846 stable, i.e. string_escape(string_escape(x)) != string_escape(x).
2847
2848 @type text: str or unicode
2849 @param text: input data
2850 @rtype: str
2851 @return: a safe version of text
2852
2853 """
2854 if isinstance(text, unicode):
2855
2856 text = text.encode('ascii', 'backslashreplace')
2857 resu = ""
2858 for char in text:
2859 c = ord(char)
2860 if char == '\t':
2861 resu += r'\t'
2862 elif char == '\n':
2863 resu += r'\n'
2864 elif char == '\r':
2865 resu += r'\'r'
2866 elif c < 32 or c >= 127:
2867 resu += "\\x%02x" % (c & 0xff)
2868 else:
2869 resu += char
2870 return resu
2871
2874 """Split and unescape a string based on a given separator.
2875
2876 This function splits a string based on a separator where the
2877 separator itself can be escape in order to be an element of the
2878 elements. The escaping rules are (assuming coma being the
2879 separator):
2880 - a plain , separates the elements
2881 - a sequence \\\\, (double backslash plus comma) is handled as a
2882 backslash plus a separator comma
2883 - a sequence \, (backslash plus comma) is handled as a
2884 non-separator comma
2885
2886 @type text: string
2887 @param text: the string to split
2888 @type sep: string
2889 @param text: the separator
2890 @rtype: string
2891 @return: a list of strings
2892
2893 """
2894
2895 slist = text.split(sep)
2896
2897
2898 rlist = []
2899 while slist:
2900 e1 = slist.pop(0)
2901 if e1.endswith("\\"):
2902 num_b = len(e1) - len(e1.rstrip("\\"))
2903 if num_b % 2 == 1:
2904 e2 = slist.pop(0)
2905
2906
2907 rlist.append(e1 + sep + e2)
2908 continue
2909 rlist.append(e1)
2910
2911 rlist = [re.sub(r"\\(.)", r"\1", v) for v in rlist]
2912 return rlist
2913
2916 """Nicely join a set of identifiers.
2917
2918 @param names: set, list or tuple
2919 @return: a string with the formatted results
2920
2921 """
2922 return ", ".join([str(val) for val in names])
2923
2926 """Converts bytes to mebibytes.
2927
2928 @type value: int
2929 @param value: Value in bytes
2930 @rtype: int
2931 @return: Value in mebibytes
2932
2933 """
2934 return int(round(value / (1024.0 * 1024.0), 0))
2935
2938 """Calculates the size of a directory recursively.
2939
2940 @type path: string
2941 @param path: Path to directory
2942 @rtype: int
2943 @return: Size in mebibytes
2944
2945 """
2946 size = 0
2947
2948 for (curpath, _, files) in os.walk(path):
2949 for filename in files:
2950 st = os.lstat(PathJoin(curpath, filename))
2951 size += st.st_size
2952
2953 return BytesToMebibyte(size)
2954
2957 """Returns the list of mounted filesystems.
2958
2959 This function is Linux-specific.
2960
2961 @param filename: path of mounts file (/proc/mounts by default)
2962 @rtype: list of tuples
2963 @return: list of mount entries (device, mountpoint, fstype, options)
2964
2965 """
2966
2967 data = []
2968 mountlines = ReadFile(filename).splitlines()
2969 for line in mountlines:
2970 device, mountpoint, fstype, options, _ = line.split(None, 4)
2971 data.append((device, mountpoint, fstype, options))
2972
2973 return data
2974
2977 """Returns the total and free space on a filesystem.
2978
2979 @type path: string
2980 @param path: Path on filesystem to be examined
2981 @rtype: int
2982 @return: tuple of (Total space, Free space) in mebibytes
2983
2984 """
2985 st = os.statvfs(path)
2986
2987 fsize = BytesToMebibyte(st.f_bavail * st.f_frsize)
2988 tsize = BytesToMebibyte(st.f_blocks * st.f_frsize)
2989 return (tsize, fsize)
2990
2993 """Runs a function in a separate process.
2994
2995 Note: Only boolean return values are supported.
2996
2997 @type fn: callable
2998 @param fn: Function to be called
2999 @rtype: bool
3000 @return: Function's result
3001
3002 """
3003 pid = os.fork()
3004 if pid == 0:
3005
3006 try:
3007
3008 ResetTempfileModule()
3009
3010
3011 result = int(bool(fn(*args)))
3012 assert result in (0, 1)
3013 except:
3014 logging.exception("Error while calling function in separate process")
3015
3016 result = 33
3017
3018 os._exit(result)
3019
3020
3021
3022
3023 (_, status) = os.waitpid(pid, 0)
3024
3025 if os.WIFSIGNALED(status):
3026 exitcode = None
3027 signum = os.WTERMSIG(status)
3028 else:
3029 exitcode = os.WEXITSTATUS(status)
3030 signum = None
3031
3032 if not (exitcode in (0, 1) and signum is None):
3033 raise errors.GenericError("Child program failed (code=%s, signal=%s)" %
3034 (exitcode, signum))
3035
3036 return bool(exitcode)
3037
3040 """Ignores ESRCH when calling a process-related function.
3041
3042 ESRCH is raised when a process is not found.
3043
3044 @rtype: bool
3045 @return: Whether process was found
3046
3047 """
3048 try:
3049 fn(*args, **kwargs)
3050 except EnvironmentError, err:
3051
3052 if err.errno == errno.ESRCH:
3053 return False
3054 raise
3055
3056 return True
3057
3060 """Tries to call a function ignoring failures due to EINTR.
3061
3062 """
3063 try:
3064 return fn(*args, **kwargs)
3065 except EnvironmentError, err:
3066 if err.errno == errno.EINTR:
3067 return None
3068 else:
3069 raise
3070 except (select.error, socket.error), err:
3071
3072
3073 if err.args and err.args[0] == errno.EINTR:
3074 return None
3075 else:
3076 raise
3077
3080 """Locks a file using POSIX locks.
3081
3082 @type fd: int
3083 @param fd: the file descriptor we need to lock
3084
3085 """
3086 try:
3087 fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
3088 except IOError, err:
3089 if err.errno == errno.EAGAIN:
3090 raise errors.LockError("File already locked")
3091 raise
3092
3107
3132
3135 """Reads the watcher pause file.
3136
3137 @type filename: string
3138 @param filename: Path to watcher pause file
3139 @type now: None, float or int
3140 @param now: Current time as Unix timestamp
3141 @type remove_after: int
3142 @param remove_after: Remove watcher pause file after specified amount of
3143 seconds past the pause end time
3144
3145 """
3146 if now is None:
3147 now = time.time()
3148
3149 try:
3150 value = ReadFile(filename)
3151 except IOError, err:
3152 if err.errno != errno.ENOENT:
3153 raise
3154 value = None
3155
3156 if value is not None:
3157 try:
3158 value = int(value)
3159 except ValueError:
3160 logging.warning(("Watcher pause file (%s) contains invalid value,"
3161 " removing it"), filename)
3162 RemoveFile(filename)
3163 value = None
3164
3165 if value is not None:
3166
3167 if now > (value + remove_after):
3168 RemoveFile(filename)
3169 value = None
3170
3171 elif now > value:
3172 value = None
3173
3174 return value
3175
3178 """Retry loop timed out.
3179
3180 Any arguments which was passed by the retried function to RetryAgain will be
3181 preserved in RetryTimeout, if it is raised. If such argument was an exception
3182 the RaiseInner helper method will reraise it.
3183
3184 """
3190
3193 """Retry again.
3194
3195 Any arguments passed to RetryAgain will be preserved, if a timeout occurs, as
3196 arguments to RetryTimeout. If an exception is passed, the RaiseInner() method
3197 of the RetryTimeout() method can be used to reraise it.
3198
3199 """
3200
3203 """Calculator for increasing delays.
3204
3205 """
3206 __slots__ = [
3207 "_factor",
3208 "_limit",
3209 "_next",
3210 "_start",
3211 ]
3212
3213 - def __init__(self, start, factor, limit):
3214 """Initializes this class.
3215
3216 @type start: float
3217 @param start: Initial delay
3218 @type factor: float
3219 @param factor: Factor for delay increase
3220 @type limit: float or None
3221 @param limit: Upper limit for delay or None for no limit
3222
3223 """
3224 assert start > 0.0
3225 assert factor >= 1.0
3226 assert limit is None or limit >= 0.0
3227
3228 self._start = start
3229 self._factor = factor
3230 self._limit = limit
3231
3232 self._next = start
3233
3235 """Returns current delay and calculates the next one.
3236
3237 """
3238 current = self._next
3239
3240
3241 if self._limit is None or self._next < self._limit:
3242 self._next = min(self._limit, self._next * self._factor)
3243
3244 return current
3245
3246
3247
3248 RETRY_REMAINING_TIME = object()
3249
3250
3251 -def Retry(fn, delay, timeout, args=None, wait_fn=time.sleep,
3252 _time_fn=time.time):
3253 """Call a function repeatedly until it succeeds.
3254
3255 The function C{fn} is called repeatedly until it doesn't throw L{RetryAgain}
3256 anymore. Between calls a delay, specified by C{delay}, is inserted. After a
3257 total of C{timeout} seconds, this function throws L{RetryTimeout}.
3258
3259 C{delay} can be one of the following:
3260 - callable returning the delay length as a float
3261 - Tuple of (start, factor, limit)
3262 - L{RETRY_REMAINING_TIME} to sleep until the timeout expires (this is
3263 useful when overriding L{wait_fn} to wait for an external event)
3264 - A static delay as a number (int or float)
3265
3266 @type fn: callable
3267 @param fn: Function to be called
3268 @param delay: Either a callable (returning the delay), a tuple of (start,
3269 factor, limit) (see L{_RetryDelayCalculator}),
3270 L{RETRY_REMAINING_TIME} or a number (int or float)
3271 @type timeout: float
3272 @param timeout: Total timeout
3273 @type wait_fn: callable
3274 @param wait_fn: Waiting function
3275 @return: Return value of function
3276
3277 """
3278 assert callable(fn)
3279 assert callable(wait_fn)
3280 assert callable(_time_fn)
3281
3282 if args is None:
3283 args = []
3284
3285 end_time = _time_fn() + timeout
3286
3287 if callable(delay):
3288
3289 calc_delay = delay
3290
3291 elif isinstance(delay, (tuple, list)):
3292
3293 (start, factor, limit) = delay
3294 calc_delay = _RetryDelayCalculator(start, factor, limit)
3295
3296 elif delay is RETRY_REMAINING_TIME:
3297
3298 calc_delay = None
3299
3300 else:
3301
3302 calc_delay = lambda: delay
3303
3304 assert calc_delay is None or callable(calc_delay)
3305
3306 while True:
3307 retry_args = []
3308 try:
3309
3310 return fn(*args)
3311 except RetryAgain, err:
3312 retry_args = err.args
3313 except RetryTimeout:
3314 raise errors.ProgrammerError("Nested retry loop detected that didn't"
3315 " handle RetryTimeout")
3316
3317 remaining_time = end_time - _time_fn()
3318
3319 if remaining_time < 0.0:
3320
3321 raise RetryTimeout(*retry_args)
3322
3323 assert remaining_time >= 0.0
3324
3325 if calc_delay is None:
3326 wait_fn(remaining_time)
3327 else:
3328 current_delay = calc_delay()
3329 if current_delay > 0.0:
3330 wait_fn(current_delay)
3331
3334 """Creates a temporary file and returns its path.
3335
3336 """
3337 (fd, path) = tempfile.mkstemp(*args, **kwargs)
3338 _CloseFDNoErr(fd)
3339 return path
3340
3343 """Generates a self-signed X509 certificate.
3344
3345 @type common_name: string
3346 @param common_name: commonName value
3347 @type validity: int
3348 @param validity: Validity for certificate in seconds
3349
3350 """
3351
3352 key = OpenSSL.crypto.PKey()
3353 key.generate_key(OpenSSL.crypto.TYPE_RSA, constants.RSA_KEY_BITS)
3354
3355
3356 cert = OpenSSL.crypto.X509()
3357 if common_name:
3358 cert.get_subject().CN = common_name
3359 cert.set_serial_number(1)
3360 cert.gmtime_adj_notBefore(0)
3361 cert.gmtime_adj_notAfter(validity)
3362 cert.set_issuer(cert.get_subject())
3363 cert.set_pubkey(key)
3364 cert.sign(key, constants.X509_CERT_SIGN_DIGEST)
3365
3366 key_pem = OpenSSL.crypto.dump_privatekey(OpenSSL.crypto.FILETYPE_PEM, key)
3367 cert_pem = OpenSSL.crypto.dump_certificate(OpenSSL.crypto.FILETYPE_PEM, cert)
3368
3369 return (key_pem, cert_pem)
3370
3374 """Legacy function to generate self-signed X509 certificate.
3375
3376 @type filename: str
3377 @param filename: path to write certificate to
3378 @type common_name: string
3379 @param common_name: commonName value
3380 @type validity: int
3381 @param validity: validity of certificate in number of days
3382
3383 """
3384
3385
3386
3387 (key_pem, cert_pem) = GenerateSelfSignedX509Cert(common_name,
3388 validity * 24 * 60 * 60)
3389
3390 WriteFile(filename, mode=0400, data=key_pem + cert_pem)
3391
3394 """Utility class for file locks.
3395
3396 """
3398 """Constructor for FileLock.
3399
3400 @type fd: file
3401 @param fd: File object
3402 @type filename: str
3403 @param filename: Path of the file opened at I{fd}
3404
3405 """
3406 self.fd = fd
3407 self.filename = filename
3408
3409 @classmethod
3410 - def Open(cls, filename):
3411 """Creates and opens a file to be used as a file-based lock.
3412
3413 @type filename: string
3414 @param filename: path to the file to be locked
3415
3416 """
3417
3418
3419
3420 return cls(os.fdopen(os.open(filename, os.O_RDWR | os.O_CREAT), "w+"),
3421 filename)
3422
3425
3427 """Close the file and release the lock.
3428
3429 """
3430 if hasattr(self, "fd") and self.fd:
3431 self.fd.close()
3432 self.fd = None
3433
3434 - def _flock(self, flag, blocking, timeout, errmsg):
3435 """Wrapper for fcntl.flock.
3436
3437 @type flag: int
3438 @param flag: operation flag
3439 @type blocking: bool
3440 @param blocking: whether the operation should be done in blocking mode.
3441 @type timeout: None or float
3442 @param timeout: for how long the operation should be retried (implies
3443 non-blocking mode).
3444 @type errmsg: string
3445 @param errmsg: error message in case operation fails.
3446
3447 """
3448 assert self.fd, "Lock was closed"
3449 assert timeout is None or timeout >= 0, \
3450 "If specified, timeout must be positive"
3451 assert not (flag & fcntl.LOCK_NB), "LOCK_NB must not be set"
3452
3453
3454 if not (timeout is None and blocking):
3455 flag |= fcntl.LOCK_NB
3456
3457 if timeout is None:
3458 self._Lock(self.fd, flag, timeout)
3459 else:
3460 try:
3461 Retry(self._Lock, (0.1, 1.2, 1.0), timeout,
3462 args=(self.fd, flag, timeout))
3463 except RetryTimeout:
3464 raise errors.LockError(errmsg)
3465
3466 @staticmethod
3467 - def _Lock(fd, flag, timeout):
3468 try:
3469 fcntl.flock(fd, flag)
3470 except IOError, err:
3471 if timeout is not None and err.errno == errno.EAGAIN:
3472 raise RetryAgain()
3473
3474 logging.exception("fcntl.flock failed")
3475 raise
3476
3477 - def Exclusive(self, blocking=False, timeout=None):
3478 """Locks the file in exclusive mode.
3479
3480 @type blocking: boolean
3481 @param blocking: whether to block and wait until we
3482 can lock the file or return immediately
3483 @type timeout: int or None
3484 @param timeout: if not None, the duration to wait for the lock
3485 (in blocking mode)
3486
3487 """
3488 self._flock(fcntl.LOCK_EX, blocking, timeout,
3489 "Failed to lock %s in exclusive mode" % self.filename)
3490
3491 - def Shared(self, blocking=False, timeout=None):
3492 """Locks the file in shared mode.
3493
3494 @type blocking: boolean
3495 @param blocking: whether to block and wait until we
3496 can lock the file or return immediately
3497 @type timeout: int or None
3498 @param timeout: if not None, the duration to wait for the lock
3499 (in blocking mode)
3500
3501 """
3502 self._flock(fcntl.LOCK_SH, blocking, timeout,
3503 "Failed to lock %s in shared mode" % self.filename)
3504
3505 - def Unlock(self, blocking=True, timeout=None):
3506 """Unlocks the file.
3507
3508 According to C{flock(2)}, unlocking can also be a nonblocking
3509 operation::
3510
3511 To make a non-blocking request, include LOCK_NB with any of the above
3512 operations.
3513
3514 @type blocking: boolean
3515 @param blocking: whether to block and wait until we
3516 can lock the file or return immediately
3517 @type timeout: int or None
3518 @param timeout: if not None, the duration to wait for the lock
3519 (in blocking mode)
3520
3521 """
3522 self._flock(fcntl.LOCK_UN, blocking, timeout,
3523 "Failed to unlock %s" % self.filename)
3524
3527 """Splits data chunks into lines separated by newline.
3528
3529 Instances provide a file-like interface.
3530
3531 """
3533 """Initializes this class.
3534
3535 @type line_fn: callable
3536 @param line_fn: Function called for each line, first parameter is line
3537 @param args: Extra arguments for L{line_fn}
3538
3539 """
3540 assert callable(line_fn)
3541
3542 if args:
3543
3544 self._line_fn = \
3545 lambda line: line_fn(line, *args)
3546 else:
3547 self._line_fn = line_fn
3548
3549 self._lines = collections.deque()
3550 self._buffer = ""
3551
3553 parts = (self._buffer + data).split("\n")
3554 self._buffer = parts.pop()
3555 self._lines.extend(parts)
3556
3558 while self._lines:
3559 self._line_fn(self._lines.popleft().rstrip("\r\n"))
3560
3562 self.flush()
3563 if self._buffer:
3564 self._line_fn(self._buffer)
3565
3568 """Signal Handled decoration.
3569
3570 This special decorator installs a signal handler and then calls the target
3571 function. The function must accept a 'signal_handlers' keyword argument,
3572 which will contain a dict indexed by signal number, with SignalHandler
3573 objects as values.
3574
3575 The decorator can be safely stacked with iself, to handle multiple signals
3576 with different handlers.
3577
3578 @type signums: list
3579 @param signums: signals to intercept
3580
3581 """
3582 def wrap(fn):
3583 def sig_function(*args, **kwargs):
3584 assert 'signal_handlers' not in kwargs or \
3585 kwargs['signal_handlers'] is None or \
3586 isinstance(kwargs['signal_handlers'], dict), \
3587 "Wrong signal_handlers parameter in original function call"
3588 if 'signal_handlers' in kwargs and kwargs['signal_handlers'] is not None:
3589 signal_handlers = kwargs['signal_handlers']
3590 else:
3591 signal_handlers = {}
3592 kwargs['signal_handlers'] = signal_handlers
3593 sighandler = SignalHandler(signums)
3594 try:
3595 for sig in signums:
3596 signal_handlers[sig] = sighandler
3597 return fn(*args, **kwargs)
3598 finally:
3599 sighandler.Reset()
3600 return sig_function
3601 return wrap
3602
3613 else:
3616
3618 """Initializes this class.
3619
3620 """
3621 (read_fd, write_fd) = os.pipe()
3622
3623
3624
3625
3626 self._read_fh = os.fdopen(read_fd, "r", 0)
3627 self._write_fh = os.fdopen(write_fd, "w", 0)
3628
3629 self._previous = self._SetWakeupFd(self._write_fh.fileno())
3630
3631
3632 self.fileno = self._read_fh.fileno
3633 self.read = self._read_fh.read
3634
3636 """Restores the previous wakeup file descriptor.
3637
3638 """
3639 if hasattr(self, "_previous") and self._previous is not None:
3640 self._SetWakeupFd(self._previous)
3641 self._previous = None
3642
3644 """Notifies the wakeup file descriptor.
3645
3646 """
3647 self._write_fh.write("\0")
3648
3650 """Called before object deletion.
3651
3652 """
3653 self.Reset()
3654
3657 """Generic signal handler class.
3658
3659 It automatically restores the original handler when deconstructed or
3660 when L{Reset} is called. You can either pass your own handler
3661 function in or query the L{called} attribute to detect whether the
3662 signal was sent.
3663
3664 @type signum: list
3665 @ivar signum: the signals we handle
3666 @type called: boolean
3667 @ivar called: tracks whether any of the signals have been raised
3668
3669 """
3670 - def __init__(self, signum, handler_fn=None, wakeup=None):
3671 """Constructs a new SignalHandler instance.
3672
3673 @type signum: int or list of ints
3674 @param signum: Single signal number or set of signal numbers
3675 @type handler_fn: callable
3676 @param handler_fn: Signal handling function
3677
3678 """
3679 assert handler_fn is None or callable(handler_fn)
3680
3681 self.signum = set(signum)
3682 self.called = False
3683
3684 self._handler_fn = handler_fn
3685 self._wakeup = wakeup
3686
3687 self._previous = {}
3688 try:
3689 for signum in self.signum:
3690
3691 prev_handler = signal.signal(signum, self._HandleSignal)
3692 try:
3693 self._previous[signum] = prev_handler
3694 except:
3695
3696 signal.signal(signum, prev_handler)
3697 raise
3698 except:
3699
3700 self.Reset()
3701
3702
3703 raise
3704
3707
3709 """Restore previous handler.
3710
3711 This will reset all the signals to their previous handlers.
3712
3713 """
3714 for signum, prev_handler in self._previous.items():
3715 signal.signal(signum, prev_handler)
3716
3717 del self._previous[signum]
3718
3720 """Unsets the L{called} flag.
3721
3722 This function can be used in case a signal may arrive several times.
3723
3724 """
3725 self.called = False
3726
3728 """Actual signal handling function.
3729
3730 """
3731
3732
3733 self.called = True
3734
3735 if self._wakeup:
3736
3737 self._wakeup.Notify()
3738
3739 if self._handler_fn:
3740 self._handler_fn(signum, frame)
3741
3744 """A simple field set.
3745
3746 Among the features are:
3747 - checking if a string is among a list of static string or regex objects
3748 - checking if a whole list of string matches
3749 - returning the matching groups from a regex match
3750
3751 Internally, all fields are held as regular expression objects.
3752
3753 """
3755 self.items = [re.compile("^%s$" % value) for value in items]
3756
3757 - def Extend(self, other_set):
3758 """Extend the field set with the items from another one"""
3759 self.items.extend(other_set.items)
3760
3762 """Checks if a field matches the current set
3763
3764 @type field: str
3765 @param field: the string to match
3766 @return: either None or a regular expression match object
3767
3768 """
3769 for m in itertools.ifilter(None, (val.match(field) for val in self.items)):
3770 return m
3771 return None
3772
3774 """Returns the list of fields not matching the current set
3775
3776 @type items: list
3777 @param items: the list of fields to check
3778 @rtype: list
3779 @return: list of non-matching fields
3780
3781 """
3782 return [val for val in items if not self.Matches(val)]
3783