1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 """Block device abstraction"""
23
24 import re
25 import time
26 import errno
27 import shlex
28 import stat
29 import pyparsing as pyp
30 import os
31 import logging
32 import math
33
34 from ganeti import utils
35 from ganeti import errors
36 from ganeti import constants
37 from ganeti import objects
38 from ganeti import compat
39 from ganeti import netutils
40 from ganeti import pathutils
41 from ganeti import serializer
42
43
44
45 _DEVICE_READ_SIZE = 128 * 1024
49 """`rbd showmmapped' JSON formatting error Exception class.
50
51 """
52 pass
53
56 """Executes the given function, ignoring BlockDeviceErrors.
57
58 This is used in order to simplify the execution of cleanup or
59 rollback functions.
60
61 @rtype: boolean
62 @return: True when fn didn't raise an exception, False otherwise
63
64 """
65 try:
66 fn(*args, **kwargs)
67 return True
68 except errors.BlockDeviceError, err:
69 logging.warning("Caught BlockDeviceError but ignoring: %s", str(err))
70 return False
71
74 """Log an error to the node daemon and the raise an exception.
75
76 @type msg: string
77 @param msg: the text of the exception
78 @raise errors.BlockDeviceError
79
80 """
81 if args:
82 msg = msg % args
83 logging.error(msg)
84 raise errors.BlockDeviceError(msg)
85
88 """Throws an error if the given result is a failed one.
89
90 @param result: result from RunCmd
91
92 """
93 if result.failed:
94 _ThrowError("Command: %s error: %s - %s", result.cmd, result.fail_reason,
95 result.output)
96
99 """Check if we can read from the given device.
100
101 This tries to read the first 128k of the device.
102
103 """
104 try:
105 utils.ReadFile(path, size=_DEVICE_READ_SIZE)
106 return True
107 except EnvironmentError:
108 logging.warning("Can't read from device %s", path, exc_info=True)
109 return False
110
113 """Builds a list of path prefixes which shouldn't be used for file storage.
114
115 @rtype: frozenset
116
117 """
118 paths = set([
119 "/boot",
120 "/dev",
121 "/etc",
122 "/home",
123 "/proc",
124 "/root",
125 "/sys",
126 ])
127
128 for prefix in ["", "/usr", "/usr/local"]:
129 paths.update(map(lambda s: "%s/%s" % (prefix, s),
130 ["bin", "lib", "lib32", "lib64", "sbin"]))
131
132 return compat.UniqueFrozenset(map(os.path.normpath, paths))
133
137 """Cross-checks a list of paths for prefixes considered bad.
138
139 Some paths, e.g. "/bin", should not be used for file storage.
140
141 @type paths: list
142 @param paths: List of paths to be checked
143 @rtype: list
144 @return: Sorted list of paths for which the user should be warned
145
146 """
147 def _Check(path):
148 return (not os.path.isabs(path) or
149 path in _forbidden or
150 filter(lambda p: utils.IsBelowDir(p, path), _forbidden))
151
152 return utils.NiceSort(filter(_Check, map(os.path.normpath, paths)))
153
162
165 """Checks if a path is in a list of allowed paths for file storage.
166
167 @type path: string
168 @param path: Path to check
169 @type allowed: list
170 @param allowed: List of allowed paths
171 @raise errors.FileStoragePathError: If the path is not allowed
172
173 """
174 if not os.path.isabs(path):
175 raise errors.FileStoragePathError("File storage path must be absolute,"
176 " got '%s'" % path)
177
178 for i in allowed:
179 if not os.path.isabs(i):
180 logging.info("Ignoring relative path '%s' for file storage", i)
181 continue
182
183 if utils.IsBelowDir(i, path):
184 break
185 else:
186 raise errors.FileStoragePathError("Path '%s' is not acceptable for file"
187 " storage. A possible fix might be to add"
188 " it to /etc/ganeti/file-storage-paths"
189 " on all nodes." % path)
190
193 """Loads file containing allowed file storage paths.
194
195 @rtype: list
196 @return: List of allowed paths (can be an empty list)
197
198 """
199 try:
200 contents = utils.ReadFile(filename)
201 except EnvironmentError:
202 return []
203 else:
204 return utils.FilterEmptyLinesAndComments(contents)
205
222
225 """Block device abstract class.
226
227 A block device can be in the following states:
228 - not existing on the system, and by `Create()` it goes into:
229 - existing but not setup/not active, and by `Assemble()` goes into:
230 - active read-write and by `Open()` it goes into
231 - online (=used, or ready for use)
232
233 A device can also be online but read-only, however we are not using
234 the readonly state (LV has it, if needed in the future) and we are
235 usually looking at this like at a stack, so it's easier to
236 conceptualise the transition from not-existing to online and back
237 like a linear one.
238
239 The many different states of the device are due to the fact that we
240 need to cover many device types:
241 - logical volumes are created, lvchange -a y $lv, and used
242 - drbd devices are attached to a local disk/remote peer and made primary
243
244 A block device is identified by three items:
245 - the /dev path of the device (dynamic)
246 - a unique ID of the device (static)
247 - it's major/minor pair (dynamic)
248
249 Not all devices implement both the first two as distinct items. LVM
250 logical volumes have their unique ID (the pair volume group, logical
251 volume name) in a 1-to-1 relation to the dev path. For DRBD devices,
252 the /dev path is again dynamic and the unique id is the pair (host1,
253 dev1), (host2, dev2).
254
255 You can get to a device in two ways:
256 - creating the (real) device, which returns you
257 an attached instance (lvcreate)
258 - attaching of a python instance to an existing (real) device
259
260 The second point, the attachment to a device, is different
261 depending on whether the device is assembled or not. At init() time,
262 we search for a device with the same unique_id as us. If found,
263 good. It also means that the device is already assembled. If not,
264 after assembly we'll have our correct major/minor.
265
266 """
267 - def __init__(self, unique_id, children, size, params):
268 self._children = children
269 self.dev_path = None
270 self.unique_id = unique_id
271 self.major = None
272 self.minor = None
273 self.attached = False
274 self.size = size
275 self.params = params
276
278 """Assemble the device from its components.
279
280 Implementations of this method by child classes must ensure that:
281 - after the device has been assembled, it knows its major/minor
282 numbers; this allows other devices (usually parents) to probe
283 correctly for their children
284 - calling this method on an existing, in-use device is safe
285 - if the device is already configured (and in an OK state),
286 this method is idempotent
287
288 """
289 pass
290
292 """Find a device which matches our config and attach to it.
293
294 """
295 raise NotImplementedError
296
298 """Notifies that the device will no longer be used for I/O.
299
300 """
301 raise NotImplementedError
302
303 @classmethod
304 - def Create(cls, unique_id, children, size, params, excl_stor):
305 """Create the device.
306
307 If the device cannot be created, it will return None
308 instead. Error messages go to the logging system.
309
310 Note that for some devices, the unique_id is used, and for other,
311 the children. The idea is that these two, taken together, are
312 enough for both creation and assembly (later).
313
314 """
315 raise NotImplementedError
316
318 """Remove this device.
319
320 This makes sense only for some of the device types: LV and file
321 storage. Also note that if the device can't attach, the removal
322 can't be completed.
323
324 """
325 raise NotImplementedError
326
328 """Rename this device.
329
330 This may or may not make sense for a given device type.
331
332 """
333 raise NotImplementedError
334
335 - def Open(self, force=False):
336 """Make the device ready for use.
337
338 This makes the device ready for I/O. For now, just the DRBD
339 devices need this.
340
341 The force parameter signifies that if the device has any kind of
342 --force thing, it should be used, we know what we are doing.
343
344 """
345 raise NotImplementedError
346
348 """Shut down the device, freeing its children.
349
350 This undoes the `Assemble()` work, except for the child
351 assembling; as such, the children on the device are still
352 assembled after this call.
353
354 """
355 raise NotImplementedError
356
358 """Adjust the synchronization parameters of the mirror.
359
360 In case this is not a mirroring device, this is no-op.
361
362 @param params: dictionary of LD level disk parameters related to the
363 synchronization.
364 @rtype: list
365 @return: a list of error messages, emitted both by the current node and by
366 children. An empty list means no errors.
367
368 """
369 result = []
370 if self._children:
371 for child in self._children:
372 result.extend(child.SetSyncParams(params))
373 return result
374
376 """Pause/Resume the sync of the mirror.
377
378 In case this is not a mirroring device, this is no-op.
379
380 @param pause: Whether to pause or resume
381
382 """
383 result = True
384 if self._children:
385 for child in self._children:
386 result = result and child.PauseResumeSync(pause)
387 return result
388
390 """Returns the sync status of the device.
391
392 If this device is a mirroring device, this function returns the
393 status of the mirror.
394
395 If sync_percent is None, it means the device is not syncing.
396
397 If estimated_time is None, it means we can't estimate
398 the time needed, otherwise it's the time left in seconds.
399
400 If is_degraded is True, it means the device is missing
401 redundancy. This is usually a sign that something went wrong in
402 the device setup, if sync_percent is None.
403
404 The ldisk parameter represents the degradation of the local
405 data. This is only valid for some devices, the rest will always
406 return False (not degraded).
407
408 @rtype: objects.BlockDevStatus
409
410 """
411 return objects.BlockDevStatus(dev_path=self.dev_path,
412 major=self.major,
413 minor=self.minor,
414 sync_percent=None,
415 estimated_time=None,
416 is_degraded=False,
417 ldisk_status=constants.LDS_OKAY)
418
420 """Calculate the mirror status recursively for our children.
421
422 The return value is the same as for `GetSyncStatus()` except the
423 minimum percent and maximum time are calculated across our
424 children.
425
426 @rtype: objects.BlockDevStatus
427
428 """
429 status = self.GetSyncStatus()
430
431 min_percent = status.sync_percent
432 max_time = status.estimated_time
433 is_degraded = status.is_degraded
434 ldisk_status = status.ldisk_status
435
436 if self._children:
437 for child in self._children:
438 child_status = child.GetSyncStatus()
439
440 if min_percent is None:
441 min_percent = child_status.sync_percent
442 elif child_status.sync_percent is not None:
443 min_percent = min(min_percent, child_status.sync_percent)
444
445 if max_time is None:
446 max_time = child_status.estimated_time
447 elif child_status.estimated_time is not None:
448 max_time = max(max_time, child_status.estimated_time)
449
450 is_degraded = is_degraded or child_status.is_degraded
451
452 if ldisk_status is None:
453 ldisk_status = child_status.ldisk_status
454 elif child_status.ldisk_status is not None:
455 ldisk_status = max(ldisk_status, child_status.ldisk_status)
456
457 return objects.BlockDevStatus(dev_path=self.dev_path,
458 major=self.major,
459 minor=self.minor,
460 sync_percent=min_percent,
461 estimated_time=max_time,
462 is_degraded=is_degraded,
463 ldisk_status=ldisk_status)
464
466 """Update metadata with info text.
467
468 Only supported for some device types.
469
470 """
471 for child in self._children:
472 child.SetInfo(text)
473
474 - def Grow(self, amount, dryrun, backingstore):
475 """Grow the block device.
476
477 @type amount: integer
478 @param amount: the amount (in mebibytes) to grow with
479 @type dryrun: boolean
480 @param dryrun: whether to execute the operation in simulation mode
481 only, without actually increasing the size
482 @param backingstore: whether to execute the operation on backing storage
483 only, or on "logical" storage only; e.g. DRBD is logical storage,
484 whereas LVM, file, RBD are backing storage
485
486 """
487 raise NotImplementedError
488
490 """Return the actual disk size.
491
492 @note: the device needs to be active when this is called
493
494 """
495 assert self.attached, "BlockDevice not attached in GetActualSize()"
496 result = utils.RunCmd(["blockdev", "--getsize64", self.dev_path])
497 if result.failed:
498 _ThrowError("blockdev failed (%s): %s",
499 result.fail_reason, result.output)
500 try:
501 sz = int(result.output.strip())
502 except (ValueError, TypeError), err:
503 _ThrowError("Failed to parse blockdev output: %s", str(err))
504 return sz
505
507 return ("<%s: unique_id: %s, children: %s, %s:%s, %s>" %
508 (self.__class__, self.unique_id, self._children,
509 self.major, self.minor, self.dev_path))
510
513 """Logical Volume block device.
514
515 """
516 _VALID_NAME_RE = re.compile("^[a-zA-Z0-9+_.-]*$")
517 _INVALID_NAMES = compat.UniqueFrozenset([".", "..", "snapshot", "pvmove"])
518 _INVALID_SUBSTRINGS = compat.UniqueFrozenset(["_mlog", "_mimage"])
519
520 - def __init__(self, unique_id, children, size, params):
521 """Attaches to a LV device.
522
523 The unique_id is a tuple (vg_name, lv_name)
524
525 """
526 super(LogicalVolume, self).__init__(unique_id, children, size, params)
527 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
528 raise ValueError("Invalid configuration data %s" % str(unique_id))
529 self._vg_name, self._lv_name = unique_id
530 self._ValidateName(self._vg_name)
531 self._ValidateName(self._lv_name)
532 self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
533 self._degraded = True
534 self.major = self.minor = self.pe_size = self.stripe_count = None
535 self.Attach()
536
537 @staticmethod
539 """Return the the standard PV size (used with exclusive storage).
540
541 @param pvs_info: list of objects.LvmPvInfo, cannot be empty
542 @rtype: float
543 @return: size in MiB
544
545 """
546 assert len(pvs_info) > 0
547 smallest = min([pv.size for pv in pvs_info])
548 return smallest / (1 + constants.PART_MARGIN + constants.PART_RESERVED)
549
550 @staticmethod
552 """Compute the number of PVs needed for an LV (with exclusive storage).
553
554 @type size: float
555 @param size: LV size in MiB
556 @param pvs_info: list of objects.LvmPvInfo, cannot be empty
557 @rtype: integer
558 @return: number of PVs needed
559 """
560 assert len(pvs_info) > 0
561 pv_size = float(LogicalVolume._GetStdPvSize(pvs_info))
562 return int(math.ceil(float(size) / pv_size))
563
564 @staticmethod
566 """Return a list of empty PVs, by name.
567
568 """
569 empty_pvs = filter(objects.LvmPvInfo.IsEmpty, pvs_info)
570 if max_pvs is not None:
571 empty_pvs = empty_pvs[:max_pvs]
572 return map((lambda pv: pv.name), empty_pvs)
573
574 @classmethod
575 - def Create(cls, unique_id, children, size, params, excl_stor):
576 """Create a new logical volume.
577
578 """
579 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
580 raise errors.ProgrammerError("Invalid configuration data %s" %
581 str(unique_id))
582 vg_name, lv_name = unique_id
583 cls._ValidateName(vg_name)
584 cls._ValidateName(lv_name)
585 pvs_info = cls.GetPVInfo([vg_name])
586 if not pvs_info:
587 if excl_stor:
588 msg = "No (empty) PVs found"
589 else:
590 msg = "Can't compute PV info for vg %s" % vg_name
591 _ThrowError(msg)
592 pvs_info.sort(key=(lambda pv: pv.free), reverse=True)
593
594 pvlist = [pv.name for pv in pvs_info]
595 if compat.any(":" in v for v in pvlist):
596 _ThrowError("Some of your PVs have the invalid character ':' in their"
597 " name, this is not supported - please filter them out"
598 " in lvm.conf using either 'filter' or 'preferred_names'")
599
600 current_pvs = len(pvlist)
601 desired_stripes = params[constants.LDP_STRIPES]
602 stripes = min(current_pvs, desired_stripes)
603
604 if excl_stor:
605 (err_msgs, _) = utils.LvmExclusiveCheckNodePvs(pvs_info)
606 if err_msgs:
607 for m in err_msgs:
608 logging.warning(m)
609 req_pvs = cls._ComputeNumPvs(size, pvs_info)
610 pvlist = cls._GetEmptyPvNames(pvs_info, req_pvs)
611 current_pvs = len(pvlist)
612 if current_pvs < req_pvs:
613 _ThrowError("Not enough empty PVs to create a disk of %d MB:"
614 " %d available, %d needed", size, current_pvs, req_pvs)
615 assert current_pvs == len(pvlist)
616 if stripes > current_pvs:
617
618 stripes = current_pvs
619
620 else:
621 if stripes < desired_stripes:
622 logging.warning("Could not use %d stripes for VG %s, as only %d PVs are"
623 " available.", desired_stripes, vg_name, current_pvs)
624 free_size = sum([pv.free for pv in pvs_info])
625
626
627 if free_size < size:
628 _ThrowError("Not enough free space: required %s,"
629 " available %s", size, free_size)
630
631
632
633
634
635 cmd = ["lvcreate", "-L%dm" % size, "-n%s" % lv_name]
636 for stripes_arg in range(stripes, 0, -1):
637 result = utils.RunCmd(cmd + ["-i%d" % stripes_arg] + [vg_name] + pvlist)
638 if not result.failed:
639 break
640 if result.failed:
641 _ThrowError("LV create failed (%s): %s",
642 result.fail_reason, result.output)
643 return LogicalVolume(unique_id, children, size, params)
644
645 @staticmethod
647 """Returns LVM Volumen infos using lvm_cmd
648
649 @param lvm_cmd: Should be one of "pvs", "vgs" or "lvs"
650 @param fields: Fields to return
651 @return: A list of dicts each with the parsed fields
652
653 """
654 if not fields:
655 raise errors.ProgrammerError("No fields specified")
656
657 sep = "|"
658 cmd = [lvm_cmd, "--noheadings", "--nosuffix", "--units=m", "--unbuffered",
659 "--separator=%s" % sep, "-o%s" % ",".join(fields)]
660
661 result = utils.RunCmd(cmd)
662 if result.failed:
663 raise errors.CommandError("Can't get the volume information: %s - %s" %
664 (result.fail_reason, result.output))
665
666 data = []
667 for line in result.stdout.splitlines():
668 splitted_fields = line.strip().split(sep)
669
670 if len(fields) != len(splitted_fields):
671 raise errors.CommandError("Can't parse %s output: line '%s'" %
672 (lvm_cmd, line))
673
674 data.append(splitted_fields)
675
676 return data
677
678 @classmethod
679 - def GetPVInfo(cls, vg_names, filter_allocatable=True, include_lvs=False):
680 """Get the free space info for PVs in a volume group.
681
682 @param vg_names: list of volume group names, if empty all will be returned
683 @param filter_allocatable: whether to skip over unallocatable PVs
684 @param include_lvs: whether to include a list of LVs hosted on each PV
685
686 @rtype: list
687 @return: list of objects.LvmPvInfo objects
688
689 """
690
691
692
693 if include_lvs:
694 lvfield = "lv_name"
695 else:
696 lvfield = "pv_name"
697 try:
698 info = cls._GetVolumeInfo("pvs", ["pv_name", "vg_name", "pv_free",
699 "pv_attr", "pv_size", lvfield])
700 except errors.GenericError, err:
701 logging.error("Can't get PV information: %s", err)
702 return None
703
704
705
706
707 if include_lvs:
708 info.sort(key=(lambda i: (i[0], i[5])))
709 data = []
710 lastpvi = None
711 for (pv_name, vg_name, pv_free, pv_attr, pv_size, lv_name) in info:
712
713 if filter_allocatable and pv_attr[0] != "a":
714 continue
715
716 if vg_names and vg_name not in vg_names:
717 continue
718
719 if lastpvi and lastpvi.name == pv_name:
720 if include_lvs and lv_name:
721 if not lastpvi.lv_list or lastpvi.lv_list[-1] != lv_name:
722 lastpvi.lv_list.append(lv_name)
723 else:
724 if include_lvs and lv_name:
725 lvl = [lv_name]
726 else:
727 lvl = []
728 lastpvi = objects.LvmPvInfo(name=pv_name, vg_name=vg_name,
729 size=float(pv_size), free=float(pv_free),
730 attributes=pv_attr, lv_list=lvl)
731 data.append(lastpvi)
732
733 return data
734
735 @classmethod
737 """Return the free disk space in the given VG, in exclusive storage mode.
738
739 @type vg_name: string
740 @param vg_name: VG name
741 @rtype: float
742 @return: free space in MiB
743 """
744 pvs_info = cls.GetPVInfo([vg_name])
745 if not pvs_info:
746 return 0.0
747 pv_size = cls._GetStdPvSize(pvs_info)
748 num_pvs = len(cls._GetEmptyPvNames(pvs_info))
749 return pv_size * num_pvs
750
751 @classmethod
752 - def GetVGInfo(cls, vg_names, excl_stor, filter_readonly=True):
753 """Get the free space info for specific VGs.
754
755 @param vg_names: list of volume group names, if empty all will be returned
756 @param excl_stor: whether exclusive_storage is enabled
757 @param filter_readonly: whether to skip over readonly VGs
758
759 @rtype: list
760 @return: list of tuples (free_space, total_size, name) with free_space in
761 MiB
762
763 """
764 try:
765 info = cls._GetVolumeInfo("vgs", ["vg_name", "vg_free", "vg_attr",
766 "vg_size"])
767 except errors.GenericError, err:
768 logging.error("Can't get VG information: %s", err)
769 return None
770
771 data = []
772 for vg_name, vg_free, vg_attr, vg_size in info:
773
774 if filter_readonly and vg_attr[0] == "r":
775 continue
776
777 if vg_names and vg_name not in vg_names:
778 continue
779
780 if excl_stor:
781 es_free = cls._GetExclusiveStorageVgFree(vg_name)
782 assert es_free <= vg_free
783 vg_free = es_free
784 data.append((float(vg_free), float(vg_size), vg_name))
785
786 return data
787
788 @classmethod
790 """Validates that a given name is valid as VG or LV name.
791
792 The list of valid characters and restricted names is taken out of
793 the lvm(8) manpage, with the simplification that we enforce both
794 VG and LV restrictions on the names.
795
796 """
797 if (not cls._VALID_NAME_RE.match(name) or
798 name in cls._INVALID_NAMES or
799 compat.any(substring in name for substring in cls._INVALID_SUBSTRINGS)):
800 _ThrowError("Invalid LVM name '%s'", name)
801
803 """Remove this logical volume.
804
805 """
806 if not self.minor and not self.Attach():
807
808 return
809 result = utils.RunCmd(["lvremove", "-f", "%s/%s" %
810 (self._vg_name, self._lv_name)])
811 if result.failed:
812 _ThrowError("Can't lvremove: %s - %s", result.fail_reason, result.output)
813
815 """Rename this logical volume.
816
817 """
818 if not isinstance(new_id, (tuple, list)) or len(new_id) != 2:
819 raise errors.ProgrammerError("Invalid new logical id '%s'" % new_id)
820 new_vg, new_name = new_id
821 if new_vg != self._vg_name:
822 raise errors.ProgrammerError("Can't move a logical volume across"
823 " volume groups (from %s to to %s)" %
824 (self._vg_name, new_vg))
825 result = utils.RunCmd(["lvrename", new_vg, self._lv_name, new_name])
826 if result.failed:
827 _ThrowError("Failed to rename the logical volume: %s", result.output)
828 self._lv_name = new_name
829 self.dev_path = utils.PathJoin("/dev", self._vg_name, self._lv_name)
830
832 """Attach to an existing LV.
833
834 This method will try to see if an existing and active LV exists
835 which matches our name. If so, its major/minor will be
836 recorded.
837
838 """
839 self.attached = False
840 result = utils.RunCmd(["lvs", "--noheadings", "--separator=,",
841 "--units=k", "--nosuffix",
842 "-olv_attr,lv_kernel_major,lv_kernel_minor,"
843 "vg_extent_size,stripes", self.dev_path])
844 if result.failed:
845 logging.error("Can't find LV %s: %s, %s",
846 self.dev_path, result.fail_reason, result.output)
847 return False
848
849
850
851
852
853 out = result.stdout.splitlines()
854 if not out:
855
856 logging.error("Can't parse LVS output, no lines? Got '%s'", str(out))
857 return False
858 out = out[-1].strip().rstrip(",")
859 out = out.split(",")
860 if len(out) != 5:
861 logging.error("Can't parse LVS output, len(%s) != 5", str(out))
862 return False
863
864 status, major, minor, pe_size, stripes = out
865 if len(status) < 6:
866 logging.error("lvs lv_attr is not at least 6 characters (%s)", status)
867 return False
868
869 try:
870 major = int(major)
871 minor = int(minor)
872 except (TypeError, ValueError), err:
873 logging.error("lvs major/minor cannot be parsed: %s", str(err))
874
875 try:
876 pe_size = int(float(pe_size))
877 except (TypeError, ValueError), err:
878 logging.error("Can't parse vg extent size: %s", err)
879 return False
880
881 try:
882 stripes = int(stripes)
883 except (TypeError, ValueError), err:
884 logging.error("Can't parse the number of stripes: %s", err)
885 return False
886
887 self.major = major
888 self.minor = minor
889 self.pe_size = pe_size
890 self.stripe_count = stripes
891 self._degraded = status[0] == "v"
892
893 self.attached = True
894 return True
895
897 """Assemble the device.
898
899 We always run `lvchange -ay` on the LV to ensure it's active before
900 use, as there were cases when xenvg was not active after boot
901 (also possibly after disk issues).
902
903 """
904 result = utils.RunCmd(["lvchange", "-ay", self.dev_path])
905 if result.failed:
906 _ThrowError("Can't activate lv %s: %s", self.dev_path, result.output)
907
909 """Shutdown the device.
910
911 This is a no-op for the LV device type, as we don't deactivate the
912 volumes on shutdown.
913
914 """
915 pass
916
918 """Returns the sync status of the device.
919
920 If this device is a mirroring device, this function returns the
921 status of the mirror.
922
923 For logical volumes, sync_percent and estimated_time are always
924 None (no recovery in progress, as we don't handle the mirrored LV
925 case). The is_degraded parameter is the inverse of the ldisk
926 parameter.
927
928 For the ldisk parameter, we check if the logical volume has the
929 'virtual' type, which means it's not backed by existing storage
930 anymore (read from it return I/O error). This happens after a
931 physical disk failure and subsequent 'vgreduce --removemissing' on
932 the volume group.
933
934 The status was already read in Attach, so we just return it.
935
936 @rtype: objects.BlockDevStatus
937
938 """
939 if self._degraded:
940 ldisk_status = constants.LDS_FAULTY
941 else:
942 ldisk_status = constants.LDS_OKAY
943
944 return objects.BlockDevStatus(dev_path=self.dev_path,
945 major=self.major,
946 minor=self.minor,
947 sync_percent=None,
948 estimated_time=None,
949 is_degraded=self._degraded,
950 ldisk_status=ldisk_status)
951
952 - def Open(self, force=False):
953 """Make the device ready for I/O.
954
955 This is a no-op for the LV device type.
956
957 """
958 pass
959
961 """Notifies that the device will no longer be used for I/O.
962
963 This is a no-op for the LV device type.
964
965 """
966 pass
967
969 """Create a snapshot copy of an lvm block device.
970
971 @returns: tuple (vg, lv)
972
973 """
974 snap_name = self._lv_name + ".snap"
975
976
977 snap = LogicalVolume((self._vg_name, snap_name), None, size, self.params)
978 _IgnoreError(snap.Remove)
979
980 vg_info = self.GetVGInfo([self._vg_name], False)
981 if not vg_info:
982 _ThrowError("Can't compute VG info for vg %s", self._vg_name)
983 free_size, _, _ = vg_info[0]
984 if free_size < size:
985 _ThrowError("Not enough free space: required %s,"
986 " available %s", size, free_size)
987
988 _CheckResult(utils.RunCmd(["lvcreate", "-L%dm" % size, "-s",
989 "-n%s" % snap_name, self.dev_path]))
990
991 return (self._vg_name, snap_name)
992
994 """Try to remove old tags from the lv.
995
996 """
997 result = utils.RunCmd(["lvs", "-o", "tags", "--noheadings", "--nosuffix",
998 self.dev_path])
999 _CheckResult(result)
1000
1001 raw_tags = result.stdout.strip()
1002 if raw_tags:
1003 for tag in raw_tags.split(","):
1004 _CheckResult(utils.RunCmd(["lvchange", "--deltag",
1005 tag.strip(), self.dev_path]))
1006
1023
1024 - def Grow(self, amount, dryrun, backingstore):
1025 """Grow the logical volume.
1026
1027 """
1028 if not backingstore:
1029 return
1030 if self.pe_size is None or self.stripe_count is None:
1031 if not self.Attach():
1032 _ThrowError("Can't attach to LV during Grow()")
1033 full_stripe_size = self.pe_size * self.stripe_count
1034
1035 amount *= 1024
1036 rest = amount % full_stripe_size
1037 if rest != 0:
1038 amount += full_stripe_size - rest
1039 cmd = ["lvextend", "-L", "+%dk" % amount]
1040 if dryrun:
1041 cmd.append("--test")
1042
1043
1044
1045
1046 for alloc_policy in "contiguous", "cling", "normal":
1047 result = utils.RunCmd(cmd + ["--alloc", alloc_policy, self.dev_path])
1048 if not result.failed:
1049 return
1050 _ThrowError("Can't grow LV %s: %s", self.dev_path, result.output)
1051
1054 """A DRBD status representation class.
1055
1056 Note that this doesn't support unconfigured devices (cs:Unconfigured).
1057
1058 """
1059 UNCONF_RE = re.compile(r"\s*[0-9]+:\s*cs:Unconfigured$")
1060 LINE_RE = re.compile(r"\s*[0-9]+:\s*cs:(\S+)\s+(?:st|ro):([^/]+)/(\S+)"
1061 "\s+ds:([^/]+)/(\S+)\s+.*$")
1062 SYNC_RE = re.compile(r"^.*\ssync'ed:\s*([0-9.]+)%.*"
1063
1064
1065 "(?:\s|M)"
1066 "finish: ([0-9]+):([0-9]+):([0-9]+)\s.*$")
1067
1068 CS_UNCONFIGURED = "Unconfigured"
1069 CS_STANDALONE = "StandAlone"
1070 CS_WFCONNECTION = "WFConnection"
1071 CS_WFREPORTPARAMS = "WFReportParams"
1072 CS_CONNECTED = "Connected"
1073 CS_STARTINGSYNCS = "StartingSyncS"
1074 CS_STARTINGSYNCT = "StartingSyncT"
1075 CS_WFBITMAPS = "WFBitMapS"
1076 CS_WFBITMAPT = "WFBitMapT"
1077 CS_WFSYNCUUID = "WFSyncUUID"
1078 CS_SYNCSOURCE = "SyncSource"
1079 CS_SYNCTARGET = "SyncTarget"
1080 CS_PAUSEDSYNCS = "PausedSyncS"
1081 CS_PAUSEDSYNCT = "PausedSyncT"
1082 CSET_SYNC = compat.UniqueFrozenset([
1083 CS_WFREPORTPARAMS,
1084 CS_STARTINGSYNCS,
1085 CS_STARTINGSYNCT,
1086 CS_WFBITMAPS,
1087 CS_WFBITMAPT,
1088 CS_WFSYNCUUID,
1089 CS_SYNCSOURCE,
1090 CS_SYNCTARGET,
1091 CS_PAUSEDSYNCS,
1092 CS_PAUSEDSYNCT,
1093 ])
1094
1095 DS_DISKLESS = "Diskless"
1096 DS_ATTACHING = "Attaching"
1097 DS_FAILED = "Failed"
1098 DS_NEGOTIATING = "Negotiating"
1099 DS_INCONSISTENT = "Inconsistent"
1100 DS_OUTDATED = "Outdated"
1101 DS_DUNKNOWN = "DUnknown"
1102 DS_CONSISTENT = "Consistent"
1103 DS_UPTODATE = "UpToDate"
1104
1105 RO_PRIMARY = "Primary"
1106 RO_SECONDARY = "Secondary"
1107 RO_UNKNOWN = "Unknown"
1108
1110 u = self.UNCONF_RE.match(procline)
1111 if u:
1112 self.cstatus = self.CS_UNCONFIGURED
1113 self.lrole = self.rrole = self.ldisk = self.rdisk = None
1114 else:
1115 m = self.LINE_RE.match(procline)
1116 if not m:
1117 raise errors.BlockDeviceError("Can't parse input data '%s'" % procline)
1118 self.cstatus = m.group(1)
1119 self.lrole = m.group(2)
1120 self.rrole = m.group(3)
1121 self.ldisk = m.group(4)
1122 self.rdisk = m.group(5)
1123
1124
1125
1126 self.is_standalone = self.cstatus == self.CS_STANDALONE
1127 self.is_wfconn = self.cstatus == self.CS_WFCONNECTION
1128 self.is_connected = self.cstatus == self.CS_CONNECTED
1129 self.is_primary = self.lrole == self.RO_PRIMARY
1130 self.is_secondary = self.lrole == self.RO_SECONDARY
1131 self.peer_primary = self.rrole == self.RO_PRIMARY
1132 self.peer_secondary = self.rrole == self.RO_SECONDARY
1133 self.both_primary = self.is_primary and self.peer_primary
1134 self.both_secondary = self.is_secondary and self.peer_secondary
1135
1136 self.is_diskless = self.ldisk == self.DS_DISKLESS
1137 self.is_disk_uptodate = self.ldisk == self.DS_UPTODATE
1138
1139 self.is_in_resync = self.cstatus in self.CSET_SYNC
1140 self.is_in_use = self.cstatus != self.CS_UNCONFIGURED
1141
1142 m = self.SYNC_RE.match(procline)
1143 if m:
1144 self.sync_percent = float(m.group(1))
1145 hours = int(m.group(2))
1146 minutes = int(m.group(3))
1147 seconds = int(m.group(4))
1148 self.est_time = hours * 3600 + minutes * 60 + seconds
1149 else:
1150
1151
1152
1153
1154 if self.is_in_resync:
1155 self.sync_percent = 0
1156 else:
1157 self.sync_percent = None
1158 self.est_time = None
1159
1162 """Base DRBD class.
1163
1164 This class contains a few bits of common functionality between the
1165 0.7 and 8.x versions of DRBD.
1166
1167 """
1168 _VERSION_RE = re.compile(r"^version: (\d+)\.(\d+)\.(\d+)(?:\.\d+)?"
1169 r" \(api:(\d+)/proto:(\d+)(?:-(\d+))?\)")
1170 _VALID_LINE_RE = re.compile("^ *([0-9]+): cs:([^ ]+).*$")
1171 _UNUSED_LINE_RE = re.compile("^ *([0-9]+): cs:Unconfigured$")
1172
1173 _DRBD_MAJOR = 147
1174 _ST_UNCONFIGURED = "Unconfigured"
1175 _ST_WFCONNECTION = "WFConnection"
1176 _ST_CONNECTED = "Connected"
1177
1178 _STATUS_FILE = constants.DRBD_STATUS_FILE
1179 _USERMODE_HELPER_FILE = "/sys/module/drbd/parameters/usermode_helper"
1180
1181 @staticmethod
1183 """Return data from /proc/drbd.
1184
1185 """
1186 try:
1187 data = utils.ReadFile(filename).splitlines()
1188 except EnvironmentError, err:
1189 if err.errno == errno.ENOENT:
1190 _ThrowError("The file %s cannot be opened, check if the module"
1191 " is loaded (%s)", filename, str(err))
1192 else:
1193 _ThrowError("Can't read the DRBD proc file %s: %s", filename, str(err))
1194 if not data:
1195 _ThrowError("Can't read any data from %s", filename)
1196 return data
1197
1198 @classmethod
1200 """Transform the output of _GetProdData into a nicer form.
1201
1202 @return: a dictionary of minor: joined lines from /proc/drbd
1203 for that minor
1204
1205 """
1206 results = {}
1207 old_minor = old_line = None
1208 for line in data:
1209 if not line:
1210 continue
1211 lresult = cls._VALID_LINE_RE.match(line)
1212 if lresult is not None:
1213 if old_minor is not None:
1214 results[old_minor] = old_line
1215 old_minor = int(lresult.group(1))
1216 old_line = line
1217 else:
1218 if old_minor is not None:
1219 old_line += " " + line.strip()
1220
1221 if old_minor is not None:
1222 results[old_minor] = old_line
1223 return results
1224
1225 @classmethod
1227 """Return the DRBD version.
1228
1229 This will return a dict with keys:
1230 - k_major
1231 - k_minor
1232 - k_point
1233 - api
1234 - proto
1235 - proto2 (only on drbd > 8.2.X)
1236
1237 """
1238 first_line = proc_data[0].strip()
1239 version = cls._VERSION_RE.match(first_line)
1240 if not version:
1241 raise errors.BlockDeviceError("Can't parse DRBD version from '%s'" %
1242 first_line)
1243
1244 values = version.groups()
1245 retval = {
1246 "k_major": int(values[0]),
1247 "k_minor": int(values[1]),
1248 "k_point": int(values[2]),
1249 "api": int(values[3]),
1250 "proto": int(values[4]),
1251 }
1252 if values[5] is not None:
1253 retval["proto2"] = values[5]
1254
1255 return retval
1256
1257 @staticmethod
1259 """Returns DRBD usermode_helper currently set.
1260
1261 """
1262 try:
1263 helper = utils.ReadFile(filename).splitlines()[0]
1264 except EnvironmentError, err:
1265 if err.errno == errno.ENOENT:
1266 _ThrowError("The file %s cannot be opened, check if the module"
1267 " is loaded (%s)", filename, str(err))
1268 else:
1269 _ThrowError("Can't read DRBD helper file %s: %s", filename, str(err))
1270 if not helper:
1271 _ThrowError("Can't read any data from %s", filename)
1272 return helper
1273
1274 @staticmethod
1276 """Return the path to a drbd device for a given minor.
1277
1278 """
1279 return "/dev/drbd%d" % minor
1280
1281 @classmethod
1283 """Compute the list of used DRBD devices.
1284
1285 """
1286 data = cls._GetProcData()
1287
1288 used_devs = {}
1289 for line in data:
1290 match = cls._VALID_LINE_RE.match(line)
1291 if not match:
1292 continue
1293 minor = int(match.group(1))
1294 state = match.group(2)
1295 if state == cls._ST_UNCONFIGURED:
1296 continue
1297 used_devs[minor] = state, line
1298
1299 return used_devs
1300
1302 """Set our parameters based on the given minor.
1303
1304 This sets our minor variable and our dev_path.
1305
1306 """
1307 if minor is None:
1308 self.minor = self.dev_path = None
1309 self.attached = False
1310 else:
1311 self.minor = minor
1312 self.dev_path = self._DevPath(minor)
1313 self.attached = True
1314
1315 @staticmethod
1342
1344 """Rename a device.
1345
1346 This is not supported for drbd devices.
1347
1348 """
1349 raise errors.ProgrammerError("Can't rename a drbd device")
1350
1351
1352 -class DRBD8(BaseDRBD):
1353 """DRBD v8.x block device.
1354
1355 This implements the local host part of the DRBD device, i.e. it
1356 doesn't do anything to the supposed peer. If you need a fully
1357 connected DRBD pair, you need to use this class on both hosts.
1358
1359 The unique_id for the drbd device is a (local_ip, local_port,
1360 remote_ip, remote_port, local_minor, secret) tuple, and it must have
1361 two children: the data device and the meta_device. The meta device
1362 is checked for valid size and is zeroed on create.
1363
1364 """
1365 _MAX_MINORS = 255
1366 _PARSE_SHOW = None
1367
1368
1369 _NET_RECONFIG_TIMEOUT = 60
1370
1371
1372 _DISABLE_DISK_OPTION = "--no-disk-barrier"
1373 _DISABLE_DRAIN_OPTION = "--no-disk-drain"
1374 _DISABLE_FLUSH_OPTION = "--no-disk-flushes"
1375 _DISABLE_META_FLUSH_OPTION = "--no-md-flushes"
1376
1377 - def __init__(self, unique_id, children, size, params):
1378 if children and children.count(None) > 0:
1379 children = []
1380 if len(children) not in (0, 2):
1381 raise ValueError("Invalid configuration data %s" % str(children))
1382 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 6:
1383 raise ValueError("Invalid configuration data %s" % str(unique_id))
1384 (self._lhost, self._lport,
1385 self._rhost, self._rport,
1386 self._aminor, self._secret) = unique_id
1387 if children:
1388 if not _CanReadDevice(children[1].dev_path):
1389 logging.info("drbd%s: Ignoring unreadable meta device", self._aminor)
1390 children = []
1391 super(DRBD8, self).__init__(unique_id, children, size, params)
1392 self.major = self._DRBD_MAJOR
1393 version = self._GetVersion(self._GetProcData())
1394 if version["k_major"] != 8:
1395 _ThrowError("Mismatch in DRBD kernel version and requested ganeti"
1396 " usage: kernel is %s.%s, ganeti wants 8.x",
1397 version["k_major"], version["k_minor"])
1398
1399 if (self._lhost is not None and self._lhost == self._rhost and
1400 self._lport == self._rport):
1401 raise ValueError("Invalid configuration data, same local/remote %s" %
1402 (unique_id,))
1403 self.Attach()
1404
1405 @classmethod
1427
1428 @classmethod
1430 """Find an unused DRBD device.
1431
1432 This is specific to 8.x as the minors are allocated dynamically,
1433 so non-existing numbers up to a max minor count are actually free.
1434
1435 """
1436 data = cls._GetProcData()
1437
1438 highest = None
1439 for line in data:
1440 match = cls._UNUSED_LINE_RE.match(line)
1441 if match:
1442 return int(match.group(1))
1443 match = cls._VALID_LINE_RE.match(line)
1444 if match:
1445 minor = int(match.group(1))
1446 highest = max(highest, minor)
1447 if highest is None:
1448 return 0
1449 if highest >= cls._MAX_MINORS:
1450 logging.error("Error: no free drbd minors!")
1451 raise errors.BlockDeviceError("Can't find a free DRBD minor")
1452 return highest + 1
1453
1454 @classmethod
1456 """Return a parser for `drbd show` output.
1457
1458 This will either create or return an already-created parser for the
1459 output of the command `drbd show`.
1460
1461 """
1462 if cls._PARSE_SHOW is not None:
1463 return cls._PARSE_SHOW
1464
1465
1466 lbrace = pyp.Literal("{").suppress()
1467 rbrace = pyp.Literal("}").suppress()
1468 lbracket = pyp.Literal("[").suppress()
1469 rbracket = pyp.Literal("]").suppress()
1470 semi = pyp.Literal(";").suppress()
1471 colon = pyp.Literal(":").suppress()
1472
1473 number = pyp.Word(pyp.nums).setParseAction(lambda s, l, t: int(t[0]))
1474
1475 comment = pyp.Literal("#") + pyp.Optional(pyp.restOfLine)
1476 defa = pyp.Literal("_is_default").suppress()
1477 dbl_quote = pyp.Literal('"').suppress()
1478
1479 keyword = pyp.Word(pyp.alphanums + "-")
1480
1481
1482 value = pyp.Word(pyp.alphanums + "_-/.:")
1483 quoted = dbl_quote + pyp.CharsNotIn('"') + dbl_quote
1484 ipv4_addr = (pyp.Optional(pyp.Literal("ipv4")).suppress() +
1485 pyp.Word(pyp.nums + ".") + colon + number)
1486 ipv6_addr = (pyp.Optional(pyp.Literal("ipv6")).suppress() +
1487 pyp.Optional(lbracket) + pyp.Word(pyp.hexnums + ":") +
1488 pyp.Optional(rbracket) + colon + number)
1489
1490 meta_value = ((value ^ quoted) + lbracket + number + rbracket)
1491
1492 device_value = pyp.Literal("minor").suppress() + number
1493
1494
1495 stmt = (~rbrace + keyword + ~lbrace +
1496 pyp.Optional(ipv4_addr ^ ipv6_addr ^ value ^ quoted ^ meta_value ^
1497 device_value) +
1498 pyp.Optional(defa) + semi +
1499 pyp.Optional(pyp.restOfLine).suppress())
1500
1501
1502 section_name = pyp.Word(pyp.alphas + "_")
1503 section = section_name + lbrace + pyp.ZeroOrMore(pyp.Group(stmt)) + rbrace
1504
1505 bnf = pyp.ZeroOrMore(pyp.Group(section ^ stmt))
1506 bnf.ignore(comment)
1507
1508 cls._PARSE_SHOW = bnf
1509
1510 return bnf
1511
1512 @classmethod
1514 """Return the `drbdsetup show` data for a minor.
1515
1516 """
1517 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "show"])
1518 if result.failed:
1519 logging.error("Can't display the drbd config: %s - %s",
1520 result.fail_reason, result.output)
1521 return None
1522 return result.stdout
1523
1524 @classmethod
1526 """Parse details about a given DRBD minor.
1527
1528 This return, if available, the local backing device (as a path)
1529 and the local and remote (ip, port) information from a string
1530 containing the output of the `drbdsetup show` command as returned
1531 by _GetShowData.
1532
1533 """
1534 data = {}
1535 if not out:
1536 return data
1537
1538 bnf = cls._GetShowParser()
1539
1540
1541 try:
1542 results = bnf.parseString(out)
1543 except pyp.ParseException, err:
1544 _ThrowError("Can't parse drbdsetup show output: %s", str(err))
1545
1546
1547 for section in results:
1548 sname = section[0]
1549 if sname == "_this_host":
1550 for lst in section[1:]:
1551 if lst[0] == "disk":
1552 data["local_dev"] = lst[1]
1553 elif lst[0] == "meta-disk":
1554 data["meta_dev"] = lst[1]
1555 data["meta_index"] = lst[2]
1556 elif lst[0] == "address":
1557 data["local_addr"] = tuple(lst[1:])
1558 elif sname == "_remote_host":
1559 for lst in section[1:]:
1560 if lst[0] == "address":
1561 data["remote_addr"] = tuple(lst[1:])
1562 return data
1563
1565 """Test if our local config matches with an existing device.
1566
1567 The parameter should be as returned from `_GetDevInfo()`. This
1568 method tests if our local backing device is the same as the one in
1569 the info parameter, in effect testing if we look like the given
1570 device.
1571
1572 """
1573 if self._children:
1574 backend, meta = self._children
1575 else:
1576 backend = meta = None
1577
1578 if backend is not None:
1579 retval = ("local_dev" in info and info["local_dev"] == backend.dev_path)
1580 else:
1581 retval = ("local_dev" not in info)
1582
1583 if meta is not None:
1584 retval = retval and ("meta_dev" in info and
1585 info["meta_dev"] == meta.dev_path)
1586 retval = retval and ("meta_index" in info and
1587 info["meta_index"] == 0)
1588 else:
1589 retval = retval and ("meta_dev" not in info and
1590 "meta_index" not in info)
1591 return retval
1592
1594 """Test if our network config matches with an existing device.
1595
1596 The parameter should be as returned from `_GetDevInfo()`. This
1597 method tests if our network configuration is the same as the one
1598 in the info parameter, in effect testing if we look like the given
1599 device.
1600
1601 """
1602 if (((self._lhost is None and not ("local_addr" in info)) and
1603 (self._rhost is None and not ("remote_addr" in info)))):
1604 return True
1605
1606 if self._lhost is None:
1607 return False
1608
1609 if not ("local_addr" in info and
1610 "remote_addr" in info):
1611 return False
1612
1613 retval = (info["local_addr"] == (self._lhost, self._lport))
1614 retval = (retval and
1615 info["remote_addr"] == (self._rhost, self._rport))
1616 return retval
1617
1619 """Configure the local part of a DRBD device.
1620
1621 """
1622 args = ["drbdsetup", self._DevPath(minor), "disk",
1623 backend, meta, "0",
1624 "-e", "detach",
1625 "--create-device"]
1626 if size:
1627 args.extend(["-d", "%sm" % size])
1628
1629 version = self._GetVersion(self._GetProcData())
1630 vmaj = version["k_major"]
1631 vmin = version["k_minor"]
1632 vrel = version["k_point"]
1633
1634 barrier_args = \
1635 self._ComputeDiskBarrierArgs(vmaj, vmin, vrel,
1636 self.params[constants.LDP_BARRIERS],
1637 self.params[constants.LDP_NO_META_FLUSH])
1638 args.extend(barrier_args)
1639
1640 if self.params[constants.LDP_DISK_CUSTOM]:
1641 args.extend(shlex.split(self.params[constants.LDP_DISK_CUSTOM]))
1642
1643 result = utils.RunCmd(args)
1644 if result.failed:
1645 _ThrowError("drbd%d: can't attach local disk: %s", minor, result.output)
1646
1647 @classmethod
1650 """Compute the DRBD command line parameters for disk barriers
1651
1652 Returns a list of the disk barrier parameters as requested via the
1653 disabled_barriers and disable_meta_flush arguments, and according to the
1654 supported ones in the DRBD version vmaj.vmin.vrel
1655
1656 If the desired option is unsupported, raises errors.BlockDeviceError.
1657
1658 """
1659 disabled_barriers_set = frozenset(disabled_barriers)
1660 if not disabled_barriers_set in constants.DRBD_VALID_BARRIER_OPT:
1661 raise errors.BlockDeviceError("%s is not a valid option set for DRBD"
1662 " barriers" % disabled_barriers)
1663
1664 args = []
1665
1666
1667
1668 if not vmaj == 8 and vmin in (0, 2, 3):
1669 raise errors.BlockDeviceError("Unsupported DRBD version: %d.%d.%d" %
1670 (vmaj, vmin, vrel))
1671
1672 def _AppendOrRaise(option, min_version):
1673 """Helper for DRBD options"""
1674 if min_version is not None and vrel >= min_version:
1675 args.append(option)
1676 else:
1677 raise errors.BlockDeviceError("Could not use the option %s as the"
1678 " DRBD version %d.%d.%d does not support"
1679 " it." % (option, vmaj, vmin, vrel))
1680
1681
1682
1683
1684 meta_flush_supported = disk_flush_supported = {
1685 0: 12,
1686 2: 7,
1687 3: 0,
1688 }
1689
1690 disk_drain_supported = {
1691 2: 7,
1692 3: 0,
1693 }
1694
1695 disk_barriers_supported = {
1696 3: 0,
1697 }
1698
1699
1700 if disable_meta_flush:
1701 _AppendOrRaise(cls._DISABLE_META_FLUSH_OPTION,
1702 meta_flush_supported.get(vmin, None))
1703
1704
1705 if constants.DRBD_B_DISK_FLUSH in disabled_barriers_set:
1706 _AppendOrRaise(cls._DISABLE_FLUSH_OPTION,
1707 disk_flush_supported.get(vmin, None))
1708
1709
1710 if constants.DRBD_B_DISK_DRAIN in disabled_barriers_set:
1711 _AppendOrRaise(cls._DISABLE_DRAIN_OPTION,
1712 disk_drain_supported.get(vmin, None))
1713
1714
1715 if constants.DRBD_B_DISK_BARRIERS in disabled_barriers_set:
1716 _AppendOrRaise(cls._DISABLE_DISK_OPTION,
1717 disk_barriers_supported.get(vmin, None))
1718
1719 return args
1720
1721 - def _AssembleNet(self, minor, net_info, protocol,
1722 dual_pri=False, hmac=None, secret=None):
1723 """Configure the network part of the device.
1724
1725 """
1726 lhost, lport, rhost, rport = net_info
1727 if None in net_info:
1728
1729
1730 self._ShutdownNet(minor)
1731 return
1732
1733
1734
1735
1736
1737
1738
1739 sync_errors = self._SetMinorSyncParams(minor, self.params)
1740 if sync_errors:
1741 _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
1742 (minor, utils.CommaJoin(sync_errors)))
1743
1744 if netutils.IP6Address.IsValid(lhost):
1745 if not netutils.IP6Address.IsValid(rhost):
1746 _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1747 (minor, lhost, rhost))
1748 family = "ipv6"
1749 elif netutils.IP4Address.IsValid(lhost):
1750 if not netutils.IP4Address.IsValid(rhost):
1751 _ThrowError("drbd%d: can't connect ip %s to ip %s" %
1752 (minor, lhost, rhost))
1753 family = "ipv4"
1754 else:
1755 _ThrowError("drbd%d: Invalid ip %s" % (minor, lhost))
1756
1757 args = ["drbdsetup", self._DevPath(minor), "net",
1758 "%s:%s:%s" % (family, lhost, lport),
1759 "%s:%s:%s" % (family, rhost, rport), protocol,
1760 "-A", "discard-zero-changes",
1761 "-B", "consensus",
1762 "--create-device",
1763 ]
1764 if dual_pri:
1765 args.append("-m")
1766 if hmac and secret:
1767 args.extend(["-a", hmac, "-x", secret])
1768
1769 if self.params[constants.LDP_NET_CUSTOM]:
1770 args.extend(shlex.split(self.params[constants.LDP_NET_CUSTOM]))
1771
1772 result = utils.RunCmd(args)
1773 if result.failed:
1774 _ThrowError("drbd%d: can't setup network: %s - %s",
1775 minor, result.fail_reason, result.output)
1776
1777 def _CheckNetworkConfig():
1778 info = self._GetDevInfo(self._GetShowData(minor))
1779 if not "local_addr" in info or not "remote_addr" in info:
1780 raise utils.RetryAgain()
1781
1782 if (info["local_addr"] != (lhost, lport) or
1783 info["remote_addr"] != (rhost, rport)):
1784 raise utils.RetryAgain()
1785
1786 try:
1787 utils.Retry(_CheckNetworkConfig, 1.0, 10.0)
1788 except utils.RetryTimeout:
1789 _ThrowError("drbd%d: timeout while configuring network", minor)
1790
1792 """Add a disk to the DRBD device.
1793
1794 """
1795 if self.minor is None:
1796 _ThrowError("drbd%d: can't attach to dbrd8 during AddChildren",
1797 self._aminor)
1798 if len(devices) != 2:
1799 _ThrowError("drbd%d: need two devices for AddChildren", self.minor)
1800 info = self._GetDevInfo(self._GetShowData(self.minor))
1801 if "local_dev" in info:
1802 _ThrowError("drbd%d: already attached to a local disk", self.minor)
1803 backend, meta = devices
1804 if backend.dev_path is None or meta.dev_path is None:
1805 _ThrowError("drbd%d: children not ready during AddChildren", self.minor)
1806 backend.Open()
1807 meta.Open()
1808 self._CheckMetaSize(meta.dev_path)
1809 self._InitMeta(self._FindUnusedMinor(), meta.dev_path)
1810
1811 self._AssembleLocal(self.minor, backend.dev_path, meta.dev_path, self.size)
1812 self._children = devices
1813
1815 """Detach the drbd device from local storage.
1816
1817 """
1818 if self.minor is None:
1819 _ThrowError("drbd%d: can't attach to drbd8 during RemoveChildren",
1820 self._aminor)
1821
1822 info = self._GetDevInfo(self._GetShowData(self.minor))
1823 if "local_dev" not in info:
1824 return
1825 if len(self._children) != 2:
1826 _ThrowError("drbd%d: we don't have two children: %s", self.minor,
1827 self._children)
1828 if self._children.count(None) == 2:
1829 logging.warning("drbd%d: requested detach while detached", self.minor)
1830 return
1831 if len(devices) != 2:
1832 _ThrowError("drbd%d: we need two children in RemoveChildren", self.minor)
1833 for child, dev in zip(self._children, devices):
1834 if dev != child.dev_path:
1835 _ThrowError("drbd%d: mismatch in local storage (%s != %s) in"
1836 " RemoveChildren", self.minor, dev, child.dev_path)
1837
1838 self._ShutdownLocal(self.minor)
1839 self._children = []
1840
1841 @classmethod
1843 """Set the parameters of the DRBD syncer.
1844
1845 This is the low-level implementation.
1846
1847 @type minor: int
1848 @param minor: the drbd minor whose settings we change
1849 @type params: dict
1850 @param params: LD level disk parameters related to the synchronization
1851 @rtype: list
1852 @return: a list of error messages
1853
1854 """
1855
1856 args = ["drbdsetup", cls._DevPath(minor), "syncer"]
1857 if params[constants.LDP_DYNAMIC_RESYNC]:
1858 version = cls._GetVersion(cls._GetProcData())
1859 vmin = version["k_minor"]
1860 vrel = version["k_point"]
1861
1862
1863
1864 if vmin != 3 or vrel < 9:
1865 msg = ("The current DRBD version (8.%d.%d) does not support the "
1866 "dynamic resync speed controller" % (vmin, vrel))
1867 logging.error(msg)
1868 return [msg]
1869
1870 if params[constants.LDP_PLAN_AHEAD] == 0:
1871 msg = ("A value of 0 for c-plan-ahead disables the dynamic sync speed"
1872 " controller at DRBD level. If you want to disable it, please"
1873 " set the dynamic-resync disk parameter to False.")
1874 logging.error(msg)
1875 return [msg]
1876
1877
1878 args.extend(["--c-plan-ahead", params[constants.LDP_PLAN_AHEAD],
1879 "--c-fill-target", params[constants.LDP_FILL_TARGET],
1880 "--c-delay-target", params[constants.LDP_DELAY_TARGET],
1881 "--c-max-rate", params[constants.LDP_MAX_RATE],
1882 "--c-min-rate", params[constants.LDP_MIN_RATE],
1883 ])
1884
1885 else:
1886 args.extend(["-r", "%d" % params[constants.LDP_RESYNC_RATE]])
1887
1888 args.append("--create-device")
1889 result = utils.RunCmd(args)
1890 if result.failed:
1891 msg = ("Can't change syncer rate: %s - %s" %
1892 (result.fail_reason, result.output))
1893 logging.error(msg)
1894 return [msg]
1895
1896 return []
1897
1899 """Set the synchronization parameters of the DRBD syncer.
1900
1901 @type params: dict
1902 @param params: LD level disk parameters related to the synchronization
1903 @rtype: list
1904 @return: a list of error messages, emitted both by the current node and by
1905 children. An empty list means no errors
1906
1907 """
1908 if self.minor is None:
1909 err = "Not attached during SetSyncParams"
1910 logging.info(err)
1911 return [err]
1912
1913 children_result = super(DRBD8, self).SetSyncParams(params)
1914 children_result.extend(self._SetMinorSyncParams(self.minor, params))
1915 return children_result
1916
1918 """Pauses or resumes the sync of a DRBD device.
1919
1920 @param pause: Wether to pause or resume
1921 @return: the success of the operation
1922
1923 """
1924 if self.minor is None:
1925 logging.info("Not attached during PauseSync")
1926 return False
1927
1928 children_result = super(DRBD8, self).PauseResumeSync(pause)
1929
1930 if pause:
1931 cmd = "pause-sync"
1932 else:
1933 cmd = "resume-sync"
1934
1935 result = utils.RunCmd(["drbdsetup", self.dev_path, cmd])
1936 if result.failed:
1937 logging.error("Can't %s: %s - %s", cmd,
1938 result.fail_reason, result.output)
1939 return not result.failed and children_result
1940
1942 """Return device data from /proc.
1943
1944 """
1945 if self.minor is None:
1946 _ThrowError("drbd%d: GetStats() called while not attached", self._aminor)
1947 proc_info = self._MassageProcData(self._GetProcData())
1948 if self.minor not in proc_info:
1949 _ThrowError("drbd%d: can't find myself in /proc", self.minor)
1950 return DRBD8Status(proc_info[self.minor])
1951
1953 """Returns the sync status of the device.
1954
1955
1956 If sync_percent is None, it means all is ok
1957 If estimated_time is None, it means we can't estimate
1958 the time needed, otherwise it's the time left in seconds.
1959
1960
1961 We set the is_degraded parameter to True on two conditions:
1962 network not connected or local disk missing.
1963
1964 We compute the ldisk parameter based on whether we have a local
1965 disk or not.
1966
1967 @rtype: objects.BlockDevStatus
1968
1969 """
1970 if self.minor is None and not self.Attach():
1971 _ThrowError("drbd%d: can't Attach() in GetSyncStatus", self._aminor)
1972
1973 stats = self.GetProcStatus()
1974 is_degraded = not stats.is_connected or not stats.is_disk_uptodate
1975
1976 if stats.is_disk_uptodate:
1977 ldisk_status = constants.LDS_OKAY
1978 elif stats.is_diskless:
1979 ldisk_status = constants.LDS_FAULTY
1980 else:
1981 ldisk_status = constants.LDS_UNKNOWN
1982
1983 return objects.BlockDevStatus(dev_path=self.dev_path,
1984 major=self.major,
1985 minor=self.minor,
1986 sync_percent=stats.sync_percent,
1987 estimated_time=stats.est_time,
1988 is_degraded=is_degraded,
1989 ldisk_status=ldisk_status)
1990
1991 - def Open(self, force=False):
1992 """Make the local state primary.
1993
1994 If the 'force' parameter is given, the '-o' option is passed to
1995 drbdsetup. Since this is a potentially dangerous operation, the
1996 force flag should be only given after creation, when it actually
1997 is mandatory.
1998
1999 """
2000 if self.minor is None and not self.Attach():
2001 logging.error("DRBD cannot attach to a device during open")
2002 return False
2003 cmd = ["drbdsetup", self.dev_path, "primary"]
2004 if force:
2005 cmd.append("-o")
2006 result = utils.RunCmd(cmd)
2007 if result.failed:
2008 _ThrowError("drbd%d: can't make drbd device primary: %s", self.minor,
2009 result.output)
2010
2012 """Make the local state secondary.
2013
2014 This will, of course, fail if the device is in use.
2015
2016 """
2017 if self.minor is None and not self.Attach():
2018 _ThrowError("drbd%d: can't Attach() in Close()", self._aminor)
2019 result = utils.RunCmd(["drbdsetup", self.dev_path, "secondary"])
2020 if result.failed:
2021 _ThrowError("drbd%d: can't switch drbd device to secondary: %s",
2022 self.minor, result.output)
2023
2025 """Removes network configuration.
2026
2027 This method shutdowns the network side of the device.
2028
2029 The method will wait up to a hardcoded timeout for the device to
2030 go into standalone after the 'disconnect' command before
2031 re-configuring it, as sometimes it takes a while for the
2032 disconnect to actually propagate and thus we might issue a 'net'
2033 command while the device is still connected. If the device will
2034 still be attached to the network and we time out, we raise an
2035 exception.
2036
2037 """
2038 if self.minor is None:
2039 _ThrowError("drbd%d: disk not attached in re-attach net", self._aminor)
2040
2041 if None in (self._lhost, self._lport, self._rhost, self._rport):
2042 _ThrowError("drbd%d: DRBD disk missing network info in"
2043 " DisconnectNet()", self.minor)
2044
2045 class _DisconnectStatus:
2046 def __init__(self, ever_disconnected):
2047 self.ever_disconnected = ever_disconnected
2048
2049 dstatus = _DisconnectStatus(_IgnoreError(self._ShutdownNet, self.minor))
2050
2051 def _WaitForDisconnect():
2052 if self.GetProcStatus().is_standalone:
2053 return
2054
2055
2056
2057
2058 dstatus.ever_disconnected = \
2059 _IgnoreError(self._ShutdownNet, self.minor) or dstatus.ever_disconnected
2060
2061 raise utils.RetryAgain()
2062
2063
2064 start_time = time.time()
2065
2066 try:
2067
2068 utils.Retry(_WaitForDisconnect, (0.1, 1.5, 2.0),
2069 self._NET_RECONFIG_TIMEOUT)
2070 except utils.RetryTimeout:
2071 if dstatus.ever_disconnected:
2072 msg = ("drbd%d: device did not react to the"
2073 " 'disconnect' command in a timely manner")
2074 else:
2075 msg = "drbd%d: can't shutdown network, even after multiple retries"
2076
2077 _ThrowError(msg, self.minor)
2078
2079 reconfig_time = time.time() - start_time
2080 if reconfig_time > (self._NET_RECONFIG_TIMEOUT * 0.25):
2081 logging.info("drbd%d: DisconnectNet: detach took %.3f seconds",
2082 self.minor, reconfig_time)
2083
2085 """Reconnects the network.
2086
2087 This method connects the network side of the device with a
2088 specified multi-master flag. The device needs to be 'Standalone'
2089 but have valid network configuration data.
2090
2091 Args:
2092 - multimaster: init the network in dual-primary mode
2093
2094 """
2095 if self.minor is None:
2096 _ThrowError("drbd%d: device not attached in AttachNet", self._aminor)
2097
2098 if None in (self._lhost, self._lport, self._rhost, self._rport):
2099 _ThrowError("drbd%d: missing network info in AttachNet()", self.minor)
2100
2101 status = self.GetProcStatus()
2102
2103 if not status.is_standalone:
2104 _ThrowError("drbd%d: device is not standalone in AttachNet", self.minor)
2105
2106 self._AssembleNet(self.minor,
2107 (self._lhost, self._lport, self._rhost, self._rport),
2108 constants.DRBD_NET_PROTOCOL, dual_pri=multimaster,
2109 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2110
2112 """Check if our minor is configured.
2113
2114 This doesn't do any device configurations - it only checks if the
2115 minor is in a state different from Unconfigured.
2116
2117 Note that this function will not change the state of the system in
2118 any way (except in case of side-effects caused by reading from
2119 /proc).
2120
2121 """
2122 used_devs = self.GetUsedDevs()
2123 if self._aminor in used_devs:
2124 minor = self._aminor
2125 else:
2126 minor = None
2127
2128 self._SetFromMinor(minor)
2129 return minor is not None
2130
2132 """Assemble the drbd.
2133
2134 Method:
2135 - if we have a configured device, we try to ensure that it matches
2136 our config
2137 - if not, we create it from zero
2138 - anyway, set the device parameters
2139
2140 """
2141 super(DRBD8, self).Assemble()
2142
2143 self.Attach()
2144 if self.minor is None:
2145
2146 self._FastAssemble()
2147 else:
2148
2149
2150 self._SlowAssemble()
2151
2152 sync_errors = self.SetSyncParams(self.params)
2153 if sync_errors:
2154 _ThrowError("drbd%d: can't set the synchronization parameters: %s" %
2155 (self.minor, utils.CommaJoin(sync_errors)))
2156
2158 """Assembles the DRBD device from a (partially) configured device.
2159
2160 In case of partially attached (local device matches but no network
2161 setup), we perform the network attach. If successful, we re-test
2162 the attach if can return success.
2163
2164 """
2165
2166
2167 net_data = (self._lhost, self._lport, self._rhost, self._rport)
2168 for minor in (self._aminor,):
2169 info = self._GetDevInfo(self._GetShowData(minor))
2170 match_l = self._MatchesLocal(info)
2171 match_r = self._MatchesNet(info)
2172
2173 if match_l and match_r:
2174
2175 break
2176
2177 if match_l and not match_r and "local_addr" not in info:
2178
2179 self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2180 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2181 if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2182 break
2183 else:
2184 _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2185 " show' disagrees", minor)
2186
2187 if match_r and "local_dev" not in info:
2188
2189 self._AssembleLocal(minor, self._children[0].dev_path,
2190 self._children[1].dev_path, self.size)
2191 if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2192 break
2193 else:
2194 _ThrowError("drbd%d: disk attach successful, but 'drbdsetup"
2195 " show' disagrees", minor)
2196
2197
2198
2199
2200
2201 if (match_l and "local_dev" in info and
2202 not match_r and "local_addr" in info):
2203
2204
2205
2206
2207 try:
2208 self._ShutdownNet(minor)
2209 except errors.BlockDeviceError, err:
2210 _ThrowError("drbd%d: device has correct local storage, wrong"
2211 " remote peer and is unable to disconnect in order"
2212 " to attach to the correct peer: %s", minor, str(err))
2213
2214
2215
2216 self._AssembleNet(minor, net_data, constants.DRBD_NET_PROTOCOL,
2217 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2218 if self._MatchesNet(self._GetDevInfo(self._GetShowData(minor))):
2219 break
2220 else:
2221 _ThrowError("drbd%d: network attach successful, but 'drbdsetup"
2222 " show' disagrees", minor)
2223
2224 else:
2225 minor = None
2226
2227 self._SetFromMinor(minor)
2228 if minor is None:
2229 _ThrowError("drbd%d: cannot activate, unknown or unhandled reason",
2230 self._aminor)
2231
2233 """Assemble the drbd device from zero.
2234
2235 This is run when in Assemble we detect our minor is unused.
2236
2237 """
2238 minor = self._aminor
2239 if self._children and self._children[0] and self._children[1]:
2240 self._AssembleLocal(minor, self._children[0].dev_path,
2241 self._children[1].dev_path, self.size)
2242 if self._lhost and self._lport and self._rhost and self._rport:
2243 self._AssembleNet(minor,
2244 (self._lhost, self._lport, self._rhost, self._rport),
2245 constants.DRBD_NET_PROTOCOL,
2246 hmac=constants.DRBD_HMAC_ALG, secret=self._secret)
2247 self._SetFromMinor(minor)
2248
2249 @classmethod
2251 """Detach from the local device.
2252
2253 I/Os will continue to be served from the remote device. If we
2254 don't have a remote device, this operation will fail.
2255
2256 """
2257 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "detach"])
2258 if result.failed:
2259 _ThrowError("drbd%d: can't detach local disk: %s", minor, result.output)
2260
2261 @classmethod
2263 """Disconnect from the remote peer.
2264
2265 This fails if we don't have a local device.
2266
2267 """
2268 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "disconnect"])
2269 if result.failed:
2270 _ThrowError("drbd%d: can't shutdown network: %s", minor, result.output)
2271
2272 @classmethod
2274 """Deactivate the device.
2275
2276 This will, of course, fail if the device is in use.
2277
2278 """
2279 result = utils.RunCmd(["drbdsetup", cls._DevPath(minor), "down"])
2280 if result.failed:
2281 _ThrowError("drbd%d: can't shutdown drbd device: %s",
2282 minor, result.output)
2283
2285 """Shutdown the DRBD device.
2286
2287 """
2288 if self.minor is None and not self.Attach():
2289 logging.info("drbd%d: not attached during Shutdown()", self._aminor)
2290 return
2291 minor = self.minor
2292 self.minor = None
2293 self.dev_path = None
2294 self._ShutdownAll(minor)
2295
2297 """Stub remove for DRBD devices.
2298
2299 """
2300 self.Shutdown()
2301
2302 @classmethod
2303 - def Create(cls, unique_id, children, size, params, excl_stor):
2304 """Create a new DRBD8 device.
2305
2306 Since DRBD devices are not created per se, just assembled, this
2307 function only initializes the metadata.
2308
2309 """
2310 if len(children) != 2:
2311 raise errors.ProgrammerError("Invalid setup for the drbd device")
2312 if excl_stor:
2313 raise errors.ProgrammerError("DRBD device requested with"
2314 " exclusive_storage")
2315
2316 aminor = unique_id[4]
2317 proc_info = cls._MassageProcData(cls._GetProcData())
2318 if aminor in proc_info:
2319 status = DRBD8Status(proc_info[aminor])
2320 in_use = status.is_in_use
2321 else:
2322 in_use = False
2323 if in_use:
2324 _ThrowError("drbd%d: minor is already in use at Create() time", aminor)
2325 meta = children[1]
2326 meta.Assemble()
2327 if not meta.Attach():
2328 _ThrowError("drbd%d: can't attach to meta device '%s'",
2329 aminor, meta)
2330 cls._CheckMetaSize(meta.dev_path)
2331 cls._InitMeta(aminor, meta.dev_path)
2332 return cls(unique_id, children, size, params)
2333
2334 - def Grow(self, amount, dryrun, backingstore):
2335 """Resize the DRBD device and its backing storage.
2336
2337 """
2338 if self.minor is None:
2339 _ThrowError("drbd%d: Grow called while not attached", self._aminor)
2340 if len(self._children) != 2 or None in self._children:
2341 _ThrowError("drbd%d: cannot grow diskless device", self.minor)
2342 self._children[0].Grow(amount, dryrun, backingstore)
2343 if dryrun or backingstore:
2344
2345
2346 return
2347 result = utils.RunCmd(["drbdsetup", self.dev_path, "resize", "-s",
2348 "%dm" % (self.size + amount)])
2349 if result.failed:
2350 _ThrowError("drbd%d: resize failed: %s", self.minor, result.output)
2351
2354 """File device.
2355
2356 This class represents the a file storage backend device.
2357
2358 The unique_id for the file device is a (file_driver, file_path) tuple.
2359
2360 """
2361 - def __init__(self, unique_id, children, size, params):
2362 """Initalizes a file device backend.
2363
2364 """
2365 if children:
2366 raise errors.BlockDeviceError("Invalid setup for file device")
2367 super(FileStorage, self).__init__(unique_id, children, size, params)
2368 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2369 raise ValueError("Invalid configuration data %s" % str(unique_id))
2370 self.driver = unique_id[0]
2371 self.dev_path = unique_id[1]
2372
2373 CheckFileStoragePath(self.dev_path)
2374
2375 self.Attach()
2376
2378 """Assemble the device.
2379
2380 Checks whether the file device exists, raises BlockDeviceError otherwise.
2381
2382 """
2383 if not os.path.exists(self.dev_path):
2384 _ThrowError("File device '%s' does not exist" % self.dev_path)
2385
2387 """Shutdown the device.
2388
2389 This is a no-op for the file type, as we don't deactivate
2390 the file on shutdown.
2391
2392 """
2393 pass
2394
2395 - def Open(self, force=False):
2396 """Make the device ready for I/O.
2397
2398 This is a no-op for the file type.
2399
2400 """
2401 pass
2402
2404 """Notifies that the device will no longer be used for I/O.
2405
2406 This is a no-op for the file type.
2407
2408 """
2409 pass
2410
2412 """Remove the file backing the block device.
2413
2414 @rtype: boolean
2415 @return: True if the removal was successful
2416
2417 """
2418 try:
2419 os.remove(self.dev_path)
2420 except OSError, err:
2421 if err.errno != errno.ENOENT:
2422 _ThrowError("Can't remove file '%s': %s", self.dev_path, err)
2423
2425 """Renames the file.
2426
2427 """
2428
2429 _ThrowError("Rename is not supported for file-based storage")
2430
2431 - def Grow(self, amount, dryrun, backingstore):
2432 """Grow the file
2433
2434 @param amount: the amount (in mebibytes) to grow with
2435
2436 """
2437 if not backingstore:
2438 return
2439
2440 self.Assemble()
2441 current_size = self.GetActualSize()
2442 new_size = current_size + amount * 1024 * 1024
2443 assert new_size > current_size, "Cannot Grow with a negative amount"
2444
2445 if dryrun:
2446 return
2447 try:
2448 f = open(self.dev_path, "a+")
2449 f.truncate(new_size)
2450 f.close()
2451 except EnvironmentError, err:
2452 _ThrowError("Error in file growth: %", str(err))
2453
2455 """Attach to an existing file.
2456
2457 Check if this file already exists.
2458
2459 @rtype: boolean
2460 @return: True if file exists
2461
2462 """
2463 self.attached = os.path.exists(self.dev_path)
2464 return self.attached
2465
2467 """Return the actual disk size.
2468
2469 @note: the device needs to be active when this is called
2470
2471 """
2472 assert self.attached, "BlockDevice not attached in GetActualSize()"
2473 try:
2474 st = os.stat(self.dev_path)
2475 return st.st_size
2476 except OSError, err:
2477 _ThrowError("Can't stat %s: %s", self.dev_path, err)
2478
2479 @classmethod
2480 - def Create(cls, unique_id, children, size, params, excl_stor):
2481 """Create a new file.
2482
2483 @param size: the size of file in MiB
2484
2485 @rtype: L{bdev.FileStorage}
2486 @return: an instance of FileStorage
2487
2488 """
2489 if excl_stor:
2490 raise errors.ProgrammerError("FileStorage device requested with"
2491 " exclusive_storage")
2492 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2493 raise ValueError("Invalid configuration data %s" % str(unique_id))
2494
2495 dev_path = unique_id[1]
2496
2497 CheckFileStoragePath(dev_path)
2498
2499 try:
2500 fd = os.open(dev_path, os.O_RDWR | os.O_CREAT | os.O_EXCL)
2501 f = os.fdopen(fd, "w")
2502 f.truncate(size * 1024 * 1024)
2503 f.close()
2504 except EnvironmentError, err:
2505 if err.errno == errno.EEXIST:
2506 _ThrowError("File already existing: %s", dev_path)
2507 _ThrowError("Error in file creation: %", str(err))
2508
2509 return FileStorage(unique_id, children, size, params)
2510
2513 """A block device with persistent node
2514
2515 May be either directly attached, or exposed through DM (e.g. dm-multipath).
2516 udev helpers are probably required to give persistent, human-friendly
2517 names.
2518
2519 For the time being, pathnames are required to lie under /dev.
2520
2521 """
2522 - def __init__(self, unique_id, children, size, params):
2523 """Attaches to a static block device.
2524
2525 The unique_id is a path under /dev.
2526
2527 """
2528 super(PersistentBlockDevice, self).__init__(unique_id, children, size,
2529 params)
2530 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2531 raise ValueError("Invalid configuration data %s" % str(unique_id))
2532 self.dev_path = unique_id[1]
2533 if not os.path.realpath(self.dev_path).startswith("/dev/"):
2534 raise ValueError("Full path '%s' lies outside /dev" %
2535 os.path.realpath(self.dev_path))
2536
2537
2538
2539
2540 if unique_id[0] != constants.BLOCKDEV_DRIVER_MANUAL:
2541 raise ValueError("Got persistent block device of invalid type: %s" %
2542 unique_id[0])
2543
2544 self.major = self.minor = None
2545 self.Attach()
2546
2547 @classmethod
2548 - def Create(cls, unique_id, children, size, params, excl_stor):
2549 """Create a new device
2550
2551 This is a noop, we only return a PersistentBlockDevice instance
2552
2553 """
2554 if excl_stor:
2555 raise errors.ProgrammerError("Persistent block device requested with"
2556 " exclusive_storage")
2557 return PersistentBlockDevice(unique_id, children, 0, params)
2558
2560 """Remove a device
2561
2562 This is a noop
2563
2564 """
2565 pass
2566
2568 """Rename this device.
2569
2570 """
2571 _ThrowError("Rename is not supported for PersistentBlockDev storage")
2572
2574 """Attach to an existing block device.
2575
2576
2577 """
2578 self.attached = False
2579 try:
2580 st = os.stat(self.dev_path)
2581 except OSError, err:
2582 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2583 return False
2584
2585 if not stat.S_ISBLK(st.st_mode):
2586 logging.error("%s is not a block device", self.dev_path)
2587 return False
2588
2589 self.major = os.major(st.st_rdev)
2590 self.minor = os.minor(st.st_rdev)
2591 self.attached = True
2592
2593 return True
2594
2596 """Assemble the device.
2597
2598 """
2599 pass
2600
2602 """Shutdown the device.
2603
2604 """
2605 pass
2606
2607 - def Open(self, force=False):
2608 """Make the device ready for I/O.
2609
2610 """
2611 pass
2612
2614 """Notifies that the device will no longer be used for I/O.
2615
2616 """
2617 pass
2618
2619 - def Grow(self, amount, dryrun, backingstore):
2620 """Grow the logical volume.
2621
2622 """
2623 _ThrowError("Grow is not supported for PersistentBlockDev storage")
2624
2627 """A RADOS Block Device (rbd).
2628
2629 This class implements the RADOS Block Device for the backend. You need
2630 the rbd kernel driver, the RADOS Tools and a working RADOS cluster for
2631 this to be functional.
2632
2633 """
2634 - def __init__(self, unique_id, children, size, params):
2635 """Attaches to an rbd device.
2636
2637 """
2638 super(RADOSBlockDevice, self).__init__(unique_id, children, size, params)
2639 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2640 raise ValueError("Invalid configuration data %s" % str(unique_id))
2641
2642 self.driver, self.rbd_name = unique_id
2643
2644 self.major = self.minor = None
2645 self.Attach()
2646
2647 @classmethod
2648 - def Create(cls, unique_id, children, size, params, excl_stor):
2649 """Create a new rbd device.
2650
2651 Provision a new rbd volume inside a RADOS pool.
2652
2653 """
2654 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2655 raise errors.ProgrammerError("Invalid configuration data %s" %
2656 str(unique_id))
2657 if excl_stor:
2658 raise errors.ProgrammerError("RBD device requested with"
2659 " exclusive_storage")
2660 rbd_pool = params[constants.LDP_POOL]
2661 rbd_name = unique_id[1]
2662
2663
2664 cmd = [constants.RBD_CMD, "create", "-p", rbd_pool,
2665 rbd_name, "--size", "%s" % size]
2666 result = utils.RunCmd(cmd)
2667 if result.failed:
2668 _ThrowError("rbd creation failed (%s): %s",
2669 result.fail_reason, result.output)
2670
2671 return RADOSBlockDevice(unique_id, children, size, params)
2672
2674 """Remove the rbd device.
2675
2676 """
2677 rbd_pool = self.params[constants.LDP_POOL]
2678 rbd_name = self.unique_id[1]
2679
2680 if not self.minor and not self.Attach():
2681
2682 return
2683
2684
2685 self.Shutdown()
2686
2687
2688 cmd = [constants.RBD_CMD, "rm", "-p", rbd_pool, rbd_name]
2689 result = utils.RunCmd(cmd)
2690 if result.failed:
2691 _ThrowError("Can't remove Volume from cluster with rbd rm: %s - %s",
2692 result.fail_reason, result.output)
2693
2695 """Rename this device.
2696
2697 """
2698 pass
2699
2701 """Attach to an existing rbd device.
2702
2703 This method maps the rbd volume that matches our name with
2704 an rbd device and then attaches to this device.
2705
2706 """
2707 self.attached = False
2708
2709
2710 self.dev_path = self._MapVolumeToBlockdev(self.unique_id)
2711
2712 try:
2713 st = os.stat(self.dev_path)
2714 except OSError, err:
2715 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
2716 return False
2717
2718 if not stat.S_ISBLK(st.st_mode):
2719 logging.error("%s is not a block device", self.dev_path)
2720 return False
2721
2722 self.major = os.major(st.st_rdev)
2723 self.minor = os.minor(st.st_rdev)
2724 self.attached = True
2725
2726 return True
2727
2729 """Maps existing rbd volumes to block devices.
2730
2731 This method should be idempotent if the mapping already exists.
2732
2733 @rtype: string
2734 @return: the block device path that corresponds to the volume
2735
2736 """
2737 pool = self.params[constants.LDP_POOL]
2738 name = unique_id[1]
2739
2740
2741 rbd_dev = self._VolumeToBlockdev(pool, name)
2742 if rbd_dev:
2743
2744 return rbd_dev
2745
2746
2747 map_cmd = [constants.RBD_CMD, "map", "-p", pool, name]
2748 result = utils.RunCmd(map_cmd)
2749 if result.failed:
2750 _ThrowError("rbd map failed (%s): %s",
2751 result.fail_reason, result.output)
2752
2753
2754 rbd_dev = self._VolumeToBlockdev(pool, name)
2755 if not rbd_dev:
2756 _ThrowError("rbd map succeeded, but could not find the rbd block"
2757 " device in output of showmapped, for volume: %s", name)
2758
2759
2760 return rbd_dev
2761
2762 @classmethod
2764 """Do the 'volume name'-to-'rbd block device' resolving.
2765
2766 @type pool: string
2767 @param pool: RADOS pool to use
2768 @type volume_name: string
2769 @param volume_name: the name of the volume whose device we search for
2770 @rtype: string or None
2771 @return: block device path if the volume is mapped, else None
2772
2773 """
2774 try:
2775
2776
2777 showmap_cmd = [
2778 constants.RBD_CMD,
2779 "showmapped",
2780 "-p",
2781 pool,
2782 "--format",
2783 "json"
2784 ]
2785 result = utils.RunCmd(showmap_cmd)
2786 if result.failed:
2787 logging.error("rbd JSON output formatting returned error (%s): %s,"
2788 "falling back to plain output parsing",
2789 result.fail_reason, result.output)
2790 raise RbdShowmappedJsonError
2791
2792 return cls._ParseRbdShowmappedJson(result.output, volume_name)
2793 except RbdShowmappedJsonError:
2794
2795
2796 showmap_cmd = [constants.RBD_CMD, "showmapped", "-p", pool]
2797 result = utils.RunCmd(showmap_cmd)
2798 if result.failed:
2799 _ThrowError("rbd showmapped failed (%s): %s",
2800 result.fail_reason, result.output)
2801
2802 return cls._ParseRbdShowmappedPlain(result.output, volume_name)
2803
2804 @staticmethod
2806 """Parse the json output of `rbd showmapped'.
2807
2808 This method parses the json output of `rbd showmapped' and returns the rbd
2809 block device path (e.g. /dev/rbd0) that matches the given rbd volume.
2810
2811 @type output: string
2812 @param output: the json output of `rbd showmapped'
2813 @type volume_name: string
2814 @param volume_name: the name of the volume whose device we search for
2815 @rtype: string or None
2816 @return: block device path if the volume is mapped, else None
2817
2818 """
2819 try:
2820 devices = serializer.LoadJson(output)
2821 except ValueError, err:
2822 _ThrowError("Unable to parse JSON data: %s" % err)
2823
2824 rbd_dev = None
2825 for d in devices.values():
2826 try:
2827 name = d["name"]
2828 except KeyError:
2829 _ThrowError("'name' key missing from json object %s", devices)
2830
2831 if name == volume_name:
2832 if rbd_dev is not None:
2833 _ThrowError("rbd volume %s is mapped more than once", volume_name)
2834
2835 rbd_dev = d["device"]
2836
2837 return rbd_dev
2838
2839 @staticmethod
2841 """Parse the (plain / text) output of `rbd showmapped'.
2842
2843 This method parses the output of `rbd showmapped' and returns
2844 the rbd block device path (e.g. /dev/rbd0) that matches the
2845 given rbd volume.
2846
2847 @type output: string
2848 @param output: the plain text output of `rbd showmapped'
2849 @type volume_name: string
2850 @param volume_name: the name of the volume whose device we search for
2851 @rtype: string or None
2852 @return: block device path if the volume is mapped, else None
2853
2854 """
2855 allfields = 5
2856 volumefield = 2
2857 devicefield = 4
2858
2859 lines = output.splitlines()
2860
2861
2862 splitted_lines = map(lambda l: l.split(), lines)
2863
2864
2865 if not splitted_lines:
2866 return None
2867
2868
2869 field_cnt = len(splitted_lines[0])
2870 if field_cnt != allfields:
2871
2872
2873 splitted_lines = map(lambda l: l.split("\t"), lines)
2874 if field_cnt != allfields:
2875 _ThrowError("Cannot parse rbd showmapped output expected %s fields,"
2876 " found %s", allfields, field_cnt)
2877
2878 matched_lines = \
2879 filter(lambda l: len(l) == allfields and l[volumefield] == volume_name,
2880 splitted_lines)
2881
2882 if len(matched_lines) > 1:
2883 _ThrowError("rbd volume %s mapped more than once", volume_name)
2884
2885 if matched_lines:
2886
2887 rbd_dev = matched_lines[0][devicefield]
2888 return rbd_dev
2889
2890
2891 return None
2892
2894 """Assemble the device.
2895
2896 """
2897 pass
2898
2900 """Shutdown the device.
2901
2902 """
2903 if not self.minor and not self.Attach():
2904
2905 return
2906
2907
2908 self._UnmapVolumeFromBlockdev(self.unique_id)
2909
2910 self.minor = None
2911 self.dev_path = None
2912
2914 """Unmaps the rbd device from the Volume it is mapped.
2915
2916 Unmaps the rbd device from the Volume it was previously mapped to.
2917 This method should be idempotent if the Volume isn't mapped.
2918
2919 """
2920 pool = self.params[constants.LDP_POOL]
2921 name = unique_id[1]
2922
2923
2924 rbd_dev = self._VolumeToBlockdev(pool, name)
2925
2926 if rbd_dev:
2927
2928 unmap_cmd = [constants.RBD_CMD, "unmap", "%s" % rbd_dev]
2929 result = utils.RunCmd(unmap_cmd)
2930 if result.failed:
2931 _ThrowError("rbd unmap failed (%s): %s",
2932 result.fail_reason, result.output)
2933
2934 - def Open(self, force=False):
2935 """Make the device ready for I/O.
2936
2937 """
2938 pass
2939
2941 """Notifies that the device will no longer be used for I/O.
2942
2943 """
2944 pass
2945
2946 - def Grow(self, amount, dryrun, backingstore):
2947 """Grow the Volume.
2948
2949 @type amount: integer
2950 @param amount: the amount (in mebibytes) to grow with
2951 @type dryrun: boolean
2952 @param dryrun: whether to execute the operation in simulation mode
2953 only, without actually increasing the size
2954
2955 """
2956 if not backingstore:
2957 return
2958 if not self.Attach():
2959 _ThrowError("Can't attach to rbd device during Grow()")
2960
2961 if dryrun:
2962
2963
2964
2965 return
2966
2967 rbd_pool = self.params[constants.LDP_POOL]
2968 rbd_name = self.unique_id[1]
2969 new_size = self.size + amount
2970
2971
2972 cmd = [constants.RBD_CMD, "resize", "-p", rbd_pool,
2973 rbd_name, "--size", "%s" % new_size]
2974 result = utils.RunCmd(cmd)
2975 if result.failed:
2976 _ThrowError("rbd resize failed (%s): %s",
2977 result.fail_reason, result.output)
2978
2981 """A block device provided by an ExtStorage Provider.
2982
2983 This class implements the External Storage Interface, which means
2984 handling of the externally provided block devices.
2985
2986 """
2987 - def __init__(self, unique_id, children, size, params):
2988 """Attaches to an extstorage block device.
2989
2990 """
2991 super(ExtStorageDevice, self).__init__(unique_id, children, size, params)
2992 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
2993 raise ValueError("Invalid configuration data %s" % str(unique_id))
2994
2995 self.driver, self.vol_name = unique_id
2996 self.ext_params = params
2997
2998 self.major = self.minor = None
2999 self.Attach()
3000
3001 @classmethod
3002 - def Create(cls, unique_id, children, size, params, excl_stor):
3003 """Create a new extstorage device.
3004
3005 Provision a new volume using an extstorage provider, which will
3006 then be mapped to a block device.
3007
3008 """
3009 if not isinstance(unique_id, (tuple, list)) or len(unique_id) != 2:
3010 raise errors.ProgrammerError("Invalid configuration data %s" %
3011 str(unique_id))
3012 if excl_stor:
3013 raise errors.ProgrammerError("extstorage device requested with"
3014 " exclusive_storage")
3015
3016
3017
3018 _ExtStorageAction(constants.ES_ACTION_CREATE, unique_id,
3019 params, str(size))
3020
3021 return ExtStorageDevice(unique_id, children, size, params)
3022
3038
3040 """Rename this device.
3041
3042 """
3043 pass
3044
3046 """Attach to an existing extstorage device.
3047
3048 This method maps the extstorage volume that matches our name with
3049 a corresponding block device and then attaches to this device.
3050
3051 """
3052 self.attached = False
3053
3054
3055
3056 self.dev_path = _ExtStorageAction(constants.ES_ACTION_ATTACH,
3057 self.unique_id, self.ext_params)
3058
3059 try:
3060 st = os.stat(self.dev_path)
3061 except OSError, err:
3062 logging.error("Error stat()'ing %s: %s", self.dev_path, str(err))
3063 return False
3064
3065 if not stat.S_ISBLK(st.st_mode):
3066 logging.error("%s is not a block device", self.dev_path)
3067 return False
3068
3069 self.major = os.major(st.st_rdev)
3070 self.minor = os.minor(st.st_rdev)
3071 self.attached = True
3072
3073 return True
3074
3076 """Assemble the device.
3077
3078 """
3079 pass
3080
3082 """Shutdown the device.
3083
3084 """
3085 if not self.minor and not self.Attach():
3086
3087 return
3088
3089
3090
3091 _ExtStorageAction(constants.ES_ACTION_DETACH, self.unique_id,
3092 self.ext_params)
3093
3094 self.minor = None
3095 self.dev_path = None
3096
3097 - def Open(self, force=False):
3098 """Make the device ready for I/O.
3099
3100 """
3101 pass
3102
3104 """Notifies that the device will no longer be used for I/O.
3105
3106 """
3107 pass
3108
3109 - def Grow(self, amount, dryrun, backingstore):
3110 """Grow the Volume.
3111
3112 @type amount: integer
3113 @param amount: the amount (in mebibytes) to grow with
3114 @type dryrun: boolean
3115 @param dryrun: whether to execute the operation in simulation mode
3116 only, without actually increasing the size
3117
3118 """
3119 if not backingstore:
3120 return
3121 if not self.Attach():
3122 _ThrowError("Can't attach to extstorage device during Grow()")
3123
3124 if dryrun:
3125
3126 return
3127
3128 new_size = self.size + amount
3129
3130
3131
3132 _ExtStorageAction(constants.ES_ACTION_GROW, self.unique_id,
3133 self.ext_params, str(self.size), grow=str(new_size))
3134
3150
3151
3152 -def _ExtStorageAction(action, unique_id, ext_params,
3153 size=None, grow=None, metadata=None):
3154 """Take an External Storage action.
3155
3156 Take an External Storage action concerning or affecting
3157 a specific Volume inside the External Storage.
3158
3159 @type action: string
3160 @param action: which action to perform. One of:
3161 create / remove / grow / attach / detach
3162 @type unique_id: tuple (driver, vol_name)
3163 @param unique_id: a tuple containing the type of ExtStorage (driver)
3164 and the Volume name
3165 @type ext_params: dict
3166 @param ext_params: ExtStorage parameters
3167 @type size: integer
3168 @param size: the size of the Volume in mebibytes
3169 @type grow: integer
3170 @param grow: the new size in mebibytes (after grow)
3171 @type metadata: string
3172 @param metadata: metadata info of the Volume, for use by the provider
3173 @rtype: None or a block device path (during attach)
3174
3175 """
3176 driver, vol_name = unique_id
3177
3178
3179 status, inst_es = ExtStorageFromDisk(driver)
3180 if not status:
3181 _ThrowError("%s" % inst_es)
3182
3183
3184 create_env = _ExtStorageEnvironment(unique_id, ext_params, size,
3185 grow, metadata)
3186
3187
3188
3189
3190 logfile = None
3191 if action is not constants.ES_ACTION_ATTACH:
3192 logfile = _VolumeLogName(action, driver, vol_name)
3193
3194
3195 if action not in constants.ES_SCRIPTS:
3196 _ThrowError("Action '%s' doesn't result in a valid ExtStorage script" %
3197 action)
3198
3199
3200 script_name = action + "_script"
3201 script = getattr(inst_es, script_name)
3202
3203
3204 result = utils.RunCmd([script], env=create_env,
3205 cwd=inst_es.path, output=logfile,)
3206 if result.failed:
3207 logging.error("External storage's %s command '%s' returned"
3208 " error: %s, logfile: %s, output: %s",
3209 action, result.cmd, result.fail_reason,
3210 logfile, result.output)
3211
3212
3213
3214 if action is not constants.ES_ACTION_ATTACH:
3215 lines = [utils.SafeEncode(val)
3216 for val in utils.TailFile(logfile, lines=20)]
3217 else:
3218 lines = result.output[-20:]
3219
3220 _ThrowError("External storage's %s script failed (%s), last"
3221 " lines of output:\n%s",
3222 action, result.fail_reason, "\n".join(lines))
3223
3224 if action == constants.ES_ACTION_ATTACH:
3225 return result.stdout
3226
3229 """Create an ExtStorage instance from disk.
3230
3231 This function will return an ExtStorage instance
3232 if the given name is a valid ExtStorage name.
3233
3234 @type base_dir: string
3235 @keyword base_dir: Base directory containing ExtStorage installations.
3236 Defaults to a search in all the ES_SEARCH_PATH dirs.
3237 @rtype: tuple
3238 @return: True and the ExtStorage instance if we find a valid one, or
3239 False and the diagnose message on error
3240
3241 """
3242 if base_dir is None:
3243 es_base_dir = pathutils.ES_SEARCH_PATH
3244 else:
3245 es_base_dir = [base_dir]
3246
3247 es_dir = utils.FindFile(name, es_base_dir, os.path.isdir)
3248
3249 if es_dir is None:
3250 return False, ("Directory for External Storage Provider %s not"
3251 " found in search path" % name)
3252
3253
3254
3255
3256 es_files = dict.fromkeys(constants.ES_SCRIPTS, True)
3257
3258 es_files[constants.ES_PARAMETERS_FILE] = True
3259
3260 for (filename, _) in es_files.items():
3261 es_files[filename] = utils.PathJoin(es_dir, filename)
3262
3263 try:
3264 st = os.stat(es_files[filename])
3265 except EnvironmentError, err:
3266 return False, ("File '%s' under path '%s' is missing (%s)" %
3267 (filename, es_dir, utils.ErrnoOrStr(err)))
3268
3269 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
3270 return False, ("File '%s' under path '%s' is not a regular file" %
3271 (filename, es_dir))
3272
3273 if filename in constants.ES_SCRIPTS:
3274 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
3275 return False, ("File '%s' under path '%s' is not executable" %
3276 (filename, es_dir))
3277
3278 parameters = []
3279 if constants.ES_PARAMETERS_FILE in es_files:
3280 parameters_file = es_files[constants.ES_PARAMETERS_FILE]
3281 try:
3282 parameters = utils.ReadFile(parameters_file).splitlines()
3283 except EnvironmentError, err:
3284 return False, ("Error while reading the EXT parameters file at %s: %s" %
3285 (parameters_file, utils.ErrnoOrStr(err)))
3286 parameters = [v.split(None, 1) for v in parameters]
3287
3288 es_obj = \
3289 objects.ExtStorage(name=name, path=es_dir,
3290 create_script=es_files[constants.ES_SCRIPT_CREATE],
3291 remove_script=es_files[constants.ES_SCRIPT_REMOVE],
3292 grow_script=es_files[constants.ES_SCRIPT_GROW],
3293 attach_script=es_files[constants.ES_SCRIPT_ATTACH],
3294 detach_script=es_files[constants.ES_SCRIPT_DETACH],
3295 setinfo_script=es_files[constants.ES_SCRIPT_SETINFO],
3296 verify_script=es_files[constants.ES_SCRIPT_VERIFY],
3297 supported_parameters=parameters)
3298 return True, es_obj
3299
3303 """Calculate the environment for an External Storage script.
3304
3305 @type unique_id: tuple (driver, vol_name)
3306 @param unique_id: ExtStorage pool and name of the Volume
3307 @type ext_params: dict
3308 @param ext_params: the EXT parameters
3309 @type size: string
3310 @param size: size of the Volume (in mebibytes)
3311 @type grow: string
3312 @param grow: new size of Volume after grow (in mebibytes)
3313 @type metadata: string
3314 @param metadata: metadata info of the Volume
3315 @rtype: dict
3316 @return: dict of environment variables
3317
3318 """
3319 vol_name = unique_id[1]
3320
3321 result = {}
3322 result["VOL_NAME"] = vol_name
3323
3324
3325 for pname, pvalue in ext_params.items():
3326 result["EXTP_%s" % pname.upper()] = str(pvalue)
3327
3328 if size is not None:
3329 result["VOL_SIZE"] = size
3330
3331 if grow is not None:
3332 result["VOL_NEW_SIZE"] = grow
3333
3334 if metadata is not None:
3335 result["VOL_METADATA"] = metadata
3336
3337 return result
3338
3341 """Compute the ExtStorage log filename for a given Volume and operation.
3342
3343 @type kind: string
3344 @param kind: the operation type (e.g. create, remove etc.)
3345 @type es_name: string
3346 @param es_name: the ExtStorage name
3347 @type volume: string
3348 @param volume: the name of the Volume inside the External Storage
3349
3350 """
3351
3352 if not os.path.isdir(pathutils.LOG_ES_DIR):
3353 _ThrowError("Cannot find log directory: %s", pathutils.LOG_ES_DIR)
3354
3355
3356 base = ("%s-%s-%s-%s.log" %
3357 (kind, es_name, volume, utils.TimestampForFilename()))
3358 return utils.PathJoin(pathutils.LOG_ES_DIR, base)
3359
3360
3361 DEV_MAP = {
3362 constants.LD_LV: LogicalVolume,
3363 constants.LD_DRBD8: DRBD8,
3364 constants.LD_BLOCKDEV: PersistentBlockDevice,
3365 constants.LD_RBD: RADOSBlockDevice,
3366 constants.LD_EXT: ExtStorageDevice,
3367 }
3368
3369 if constants.ENABLE_FILE_STORAGE or constants.ENABLE_SHARED_FILE_STORAGE:
3370 DEV_MAP[constants.LD_FILE] = FileStorage
3376
3379 """Verifies if all disk parameters are set.
3380
3381 """
3382 missing = set(constants.DISK_LD_DEFAULTS[disk.dev_type]) - set(disk.params)
3383 if missing:
3384 raise errors.ProgrammerError("Block device is missing disk parameters: %s" %
3385 missing)
3386
3389 """Search for an existing, assembled device.
3390
3391 This will succeed only if the device exists and is assembled, but it
3392 does not do any actions in order to activate the device.
3393
3394 @type disk: L{objects.Disk}
3395 @param disk: the disk object to find
3396 @type children: list of L{bdev.BlockDev}
3397 @param children: the list of block devices that are children of the device
3398 represented by the disk parameter
3399
3400 """
3401 _VerifyDiskType(disk.dev_type)
3402 device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3403 disk.params)
3404 if not device.attached:
3405 return None
3406 return device
3407
3410 """Try to attach or assemble an existing device.
3411
3412 This will attach to assemble the device, as needed, to bring it
3413 fully up. It must be safe to run on already-assembled devices.
3414
3415 @type disk: L{objects.Disk}
3416 @param disk: the disk object to assemble
3417 @type children: list of L{bdev.BlockDev}
3418 @param children: the list of block devices that are children of the device
3419 represented by the disk parameter
3420
3421 """
3422 _VerifyDiskType(disk.dev_type)
3423 _VerifyDiskParams(disk)
3424 device = DEV_MAP[disk.dev_type](disk.physical_id, children, disk.size,
3425 disk.params)
3426 device.Assemble()
3427 return device
3428
3429
3430 -def Create(disk, children, excl_stor):
3431 """Create a device.
3432
3433 @type disk: L{objects.Disk}
3434 @param disk: the disk object to create
3435 @type children: list of L{bdev.BlockDev}
3436 @param children: the list of block devices that are children of the device
3437 represented by the disk parameter
3438 @type excl_stor: boolean
3439 @param excl_stor: Whether exclusive_storage is active
3440
3441 """
3442 _VerifyDiskType(disk.dev_type)
3443 _VerifyDiskParams(disk)
3444 device = DEV_MAP[disk.dev_type].Create(disk.physical_id, children, disk.size,
3445 disk.params, excl_stor)
3446 return device
3447