1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Functions used by the node daemon
32
33 @var _ALLOWED_UPLOAD_FILES: denotes which files are accepted in
34 the L{UploadFile} function
35 @var _ALLOWED_CLEAN_DIRS: denotes which directories are accepted
36 in the L{_CleanDirectory} function
37
38 """
39
40
41
42
43
44
45
46
47
48
49 import base64
50 import errno
51 import logging
52 import os
53 import os.path
54 import pycurl
55 import random
56 import re
57 import shutil
58 import signal
59 import socket
60 import stat
61 import tempfile
62 import time
63 import zlib
64
65 from ganeti import errors
66 from ganeti import http
67 from ganeti import utils
68 from ganeti import ssh
69 from ganeti import hypervisor
70 from ganeti.hypervisor import hv_base
71 from ganeti import constants
72 from ganeti.storage import bdev
73 from ganeti.storage import drbd
74 from ganeti.storage import filestorage
75 from ganeti import objects
76 from ganeti import ssconf
77 from ganeti import serializer
78 from ganeti import netutils
79 from ganeti import runtime
80 from ganeti import compat
81 from ganeti import pathutils
82 from ganeti import vcluster
83 from ganeti import ht
84 from ganeti.storage.base import BlockDev
85 from ganeti.storage.drbd import DRBD8
86 from ganeti import hooksmaster
87 from ganeti.rpc import transport
88 from ganeti.rpc.errors import NoMasterError, TimeoutError
89
90
91 _BOOT_ID_PATH = "/proc/sys/kernel/random/boot_id"
92 _ALLOWED_CLEAN_DIRS = compat.UniqueFrozenset([
93 pathutils.DATA_DIR,
94 pathutils.JOB_QUEUE_ARCHIVE_DIR,
95 pathutils.QUEUE_DIR,
96 pathutils.CRYPTO_KEYS_DIR,
97 ])
98 _MAX_SSL_CERT_VALIDITY = 7 * 24 * 60 * 60
99 _X509_KEY_FILE = "key"
100 _X509_CERT_FILE = "cert"
101 _IES_STATUS_FILE = "status"
102 _IES_PID_FILE = "pid"
103 _IES_CA_FILE = "ca"
104
105
106 _LVSLINE_REGEX = re.compile(r"^ *([^|]+)\|([^|]+)\|([0-9.]+)\|([^|]{6,})\|?$")
107
108
109 _MASTER_START = "start"
110 _MASTER_STOP = "stop"
111
112
113 _RCMD_MAX_MODE = (stat.S_IRWXU |
114 stat.S_IRGRP | stat.S_IXGRP |
115 stat.S_IROTH | stat.S_IXOTH)
116
117
118 _RCMD_INVALID_DELAY = 10
119
120
121
122
123 _RCMD_LOCK_TIMEOUT = _RCMD_INVALID_DELAY * 0.8
127 """Class denoting RPC failure.
128
129 Its argument is the error message.
130
131 """
132
135 """Path of the file containing the reason of the instance status change.
136
137 @type instance_name: string
138 @param instance_name: The name of the instance
139 @rtype: string
140 @return: The path of the file
141
142 """
143 return utils.PathJoin(pathutils.INSTANCE_REASON_DIR, instance_name)
144
147 """Serialize a reason trail related to an instance change of state to file.
148
149 The exact location of the file depends on the name of the instance and on
150 the configuration of the Ganeti cluster defined at deploy time.
151
152 @type instance_name: string
153 @param instance_name: The name of the instance
154
155 @type trail: list of reasons
156 @param trail: reason trail
157
158 @rtype: None
159
160 """
161 json = serializer.DumpJson(trail)
162 filename = _GetInstReasonFilename(instance_name)
163 utils.WriteFile(filename, data=json)
164
165
166 -def _Fail(msg, *args, **kwargs):
167 """Log an error and the raise an RPCFail exception.
168
169 This exception is then handled specially in the ganeti daemon and
170 turned into a 'failed' return type. As such, this function is a
171 useful shortcut for logging the error and returning it to the master
172 daemon.
173
174 @type msg: string
175 @param msg: the text of the exception
176 @raise RPCFail
177
178 """
179 if args:
180 msg = msg % args
181 if "log" not in kwargs or kwargs["log"]:
182 if "exc" in kwargs and kwargs["exc"]:
183 logging.exception(msg)
184 else:
185 logging.error(msg)
186 raise RPCFail(msg)
187
190 """Simple wrapper to return a SimpleStore.
191
192 @rtype: L{ssconf.SimpleStore}
193 @return: a SimpleStore instance
194
195 """
196 return ssconf.SimpleStore()
197
200 """Simple wrapper to return an SshRunner.
201
202 @type cluster_name: str
203 @param cluster_name: the cluster name, which is needed
204 by the SshRunner constructor
205 @rtype: L{ssh.SshRunner}
206 @return: an SshRunner instance
207
208 """
209 return ssh.SshRunner(cluster_name)
210
213 """Unpacks data compressed by the RPC client.
214
215 @type data: list or tuple
216 @param data: Data sent by RPC client
217 @rtype: str
218 @return: Decompressed data
219
220 """
221 assert isinstance(data, (list, tuple))
222 assert len(data) == 2
223 (encoding, content) = data
224 if encoding == constants.RPC_ENCODING_NONE:
225 return content
226 elif encoding == constants.RPC_ENCODING_ZLIB_BASE64:
227 return zlib.decompress(base64.b64decode(content))
228 else:
229 raise AssertionError("Unknown data encoding")
230
233 """Removes all regular files in a directory.
234
235 @type path: str
236 @param path: the directory to clean
237 @type exclude: list
238 @param exclude: list of files to be excluded, defaults
239 to the empty list
240
241 """
242 if path not in _ALLOWED_CLEAN_DIRS:
243 _Fail("Path passed to _CleanDirectory not in allowed clean targets: '%s'",
244 path)
245
246 if not os.path.isdir(path):
247 return
248 if exclude is None:
249 exclude = []
250 else:
251
252 exclude = [os.path.normpath(i) for i in exclude]
253
254 for rel_name in utils.ListVisibleFiles(path):
255 full_name = utils.PathJoin(path, rel_name)
256 if full_name in exclude:
257 continue
258 if os.path.isfile(full_name) and not os.path.islink(full_name):
259 utils.RemoveFile(full_name)
260
263 """Build the list of allowed upload files.
264
265 This is abstracted so that it's built only once at module import time.
266
267 """
268 allowed_files = set([
269 pathutils.CLUSTER_CONF_FILE,
270 pathutils.ETC_HOSTS,
271 pathutils.SSH_KNOWN_HOSTS_FILE,
272 pathutils.VNC_PASSWORD_FILE,
273 pathutils.RAPI_CERT_FILE,
274 pathutils.SPICE_CERT_FILE,
275 pathutils.SPICE_CACERT_FILE,
276 pathutils.RAPI_USERS_FILE,
277 pathutils.CONFD_HMAC_KEY,
278 pathutils.CLUSTER_DOMAIN_SECRET_FILE,
279 ])
280
281 for hv_name in constants.HYPER_TYPES:
282 hv_class = hypervisor.GetHypervisorClass(hv_name)
283 allowed_files.update(hv_class.GetAncillaryFiles()[0])
284
285 assert pathutils.FILE_STORAGE_PATHS_FILE not in allowed_files, \
286 "Allowed file storage paths should never be uploaded via RPC"
287
288 return frozenset(allowed_files)
289
290
291 _ALLOWED_UPLOAD_FILES = _BuildUploadFileList()
303
306 """Returns the master node name.
307
308 @rtype: string
309 @return: name of the master node
310 @raise RPCFail: in case of errors
311
312 """
313 try:
314 return _GetConfig().GetMasterNode()
315 except errors.ConfigurationError, err:
316 _Fail("Cluster configuration incomplete: %s", err, exc=True)
317
320 """Decorator that runs hooks before and after the decorated function.
321
322 @type hook_opcode: string
323 @param hook_opcode: opcode of the hook
324 @type hooks_path: string
325 @param hooks_path: path of the hooks
326 @type env_builder_fn: function
327 @param env_builder_fn: function that returns a dictionary containing the
328 environment variables for the hooks. Will get all the parameters of the
329 decorated function.
330 @raise RPCFail: in case of pre-hook failure
331
332 """
333 def decorator(fn):
334 def wrapper(*args, **kwargs):
335 _, myself = ssconf.GetMasterAndMyself()
336 nodes = ([myself], [myself])
337
338 env_fn = compat.partial(env_builder_fn, *args, **kwargs)
339
340 cfg = _GetConfig()
341 hr = HooksRunner()
342 hm = hooksmaster.HooksMaster(hook_opcode, hooks_path, nodes,
343 hr.RunLocalHooks, None, env_fn, None,
344 logging.warning, cfg.GetClusterName(),
345 cfg.GetMasterNode())
346 hm.RunPhase(constants.HOOKS_PHASE_PRE)
347 result = fn(*args, **kwargs)
348 hm.RunPhase(constants.HOOKS_PHASE_POST)
349
350 return result
351 return wrapper
352 return decorator
353
356 """Builds environment variables for master IP hooks.
357
358 @type master_params: L{objects.MasterNetworkParameters}
359 @param master_params: network parameters of the master
360 @type use_external_mip_script: boolean
361 @param use_external_mip_script: whether to use an external master IP
362 address setup script (unused, but necessary per the implementation of the
363 _RunLocalHooks decorator)
364
365 """
366
367 ver = netutils.IPAddress.GetVersionFromAddressFamily(master_params.ip_family)
368 env = {
369 "MASTER_NETDEV": master_params.netdev,
370 "MASTER_IP": master_params.ip,
371 "MASTER_NETMASK": str(master_params.netmask),
372 "CLUSTER_IP_VERSION": str(ver),
373 }
374
375 return env
376
379 """Execute the master IP address setup script.
380
381 @type master_params: L{objects.MasterNetworkParameters}
382 @param master_params: network parameters of the master
383 @type action: string
384 @param action: action to pass to the script. Must be one of
385 L{backend._MASTER_START} or L{backend._MASTER_STOP}
386 @type use_external_mip_script: boolean
387 @param use_external_mip_script: whether to use an external master IP
388 address setup script
389 @raise backend.RPCFail: if there are errors during the execution of the
390 script
391
392 """
393 env = _BuildMasterIpEnv(master_params)
394
395 if use_external_mip_script:
396 setup_script = pathutils.EXTERNAL_MASTER_SETUP_SCRIPT
397 else:
398 setup_script = pathutils.DEFAULT_MASTER_SETUP_SCRIPT
399
400 result = utils.RunCmd([setup_script, action], env=env, reset_env=True)
401
402 if result.failed:
403 _Fail("Failed to %s the master IP. Script return value: %s, output: '%s'" %
404 (action, result.exit_code, result.output), log=True)
405
406
407 @RunLocalHooks(constants.FAKE_OP_MASTER_TURNUP, "master-ip-turnup",
408 _BuildMasterIpEnv)
410 """Activate the IP address of the master daemon.
411
412 @type master_params: L{objects.MasterNetworkParameters}
413 @param master_params: network parameters of the master
414 @type use_external_mip_script: boolean
415 @param use_external_mip_script: whether to use an external master IP
416 address setup script
417 @raise RPCFail: in case of errors during the IP startup
418
419 """
420 _RunMasterSetupScript(master_params, _MASTER_START,
421 use_external_mip_script)
422
425 """Activate local node as master node.
426
427 The function will start the master daemons (ganeti-masterd and ganeti-rapi).
428
429 @type no_voting: boolean
430 @param no_voting: whether to start ganeti-masterd without a node vote
431 but still non-interactively
432 @rtype: None
433
434 """
435
436 if no_voting:
437 daemon_args = "--no-voting --yes-do-it"
438 else:
439 daemon_args = ""
440
441 env = {
442 "EXTRA_LUXID_ARGS": daemon_args,
443 "EXTRA_WCONFD_ARGS": daemon_args,
444 }
445
446 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-master"], env=env)
447 if result.failed:
448 msg = "Can't start Ganeti master: %s" % result.output
449 logging.error(msg)
450 _Fail(msg)
451
452
453 @RunLocalHooks(constants.FAKE_OP_MASTER_TURNDOWN, "master-ip-turndown",
454 _BuildMasterIpEnv)
456 """Deactivate the master IP on this node.
457
458 @type master_params: L{objects.MasterNetworkParameters}
459 @param master_params: network parameters of the master
460 @type use_external_mip_script: boolean
461 @param use_external_mip_script: whether to use an external master IP
462 address setup script
463 @raise RPCFail: in case of errors during the IP turndown
464
465 """
466 _RunMasterSetupScript(master_params, _MASTER_STOP,
467 use_external_mip_script)
468
471 """Stop the master daemons on this node.
472
473 Stop the master daemons (ganeti-masterd and ganeti-rapi) on this node.
474
475 @rtype: None
476
477 """
478
479
480
481 result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-master"])
482 if result.failed:
483 logging.error("Could not stop Ganeti master, command %s had exitcode %s"
484 " and error %s",
485 result.cmd, result.exit_code, result.output)
486
489 """Change the netmask of the master IP.
490
491 @param old_netmask: the old value of the netmask
492 @param netmask: the new value of the netmask
493 @param master_ip: the master IP
494 @param master_netdev: the master network device
495
496 """
497 if old_netmask == netmask:
498 return
499
500 if not netutils.IPAddress.Own(master_ip):
501 _Fail("The master IP address is not up, not attempting to change its"
502 " netmask")
503
504 result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "add",
505 "%s/%s" % (master_ip, netmask),
506 "dev", master_netdev, "label",
507 "%s:0" % master_netdev])
508 if result.failed:
509 _Fail("Could not set the new netmask on the master IP address")
510
511 result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "del",
512 "%s/%s" % (master_ip, old_netmask),
513 "dev", master_netdev, "label",
514 "%s:0" % master_netdev])
515 if result.failed:
516 _Fail("Could not bring down the master IP address with the old netmask")
517
520 """Modify a host entry in /etc/hosts.
521
522 @param mode: The mode to operate. Either add or remove entry
523 @param host: The host to operate on
524 @param ip: The ip associated with the entry
525
526 """
527 if mode == constants.ETC_HOSTS_ADD:
528 if not ip:
529 RPCFail("Mode 'add' needs 'ip' parameter, but parameter not"
530 " present")
531 utils.AddHostToEtcHosts(host, ip)
532 elif mode == constants.ETC_HOSTS_REMOVE:
533 if ip:
534 RPCFail("Mode 'remove' does not allow 'ip' parameter, but"
535 " parameter is present")
536 utils.RemoveHostFromEtcHosts(host)
537 else:
538 RPCFail("Mode not supported")
539
584
587 """Performs sanity checks for storage parameters.
588
589 @type params: list
590 @param params: list of storage parameters
591 @type num_params: int
592 @param num_params: expected number of parameters
593
594 """
595 if params is None:
596 raise errors.ProgrammerError("No storage parameters for storage"
597 " reporting is provided.")
598 if not isinstance(params, list):
599 raise errors.ProgrammerError("The storage parameters are not of type"
600 " list: '%s'" % params)
601 if not len(params) == num_params:
602 raise errors.ProgrammerError("Did not receive the expected number of"
603 "storage parameters: expected %s,"
604 " received '%s'" % (num_params, len(params)))
605
608 """Performs sanity check for the 'exclusive storage' flag.
609
610 @see: C{_CheckStorageParams}
611
612 """
613 _CheckStorageParams(params, 1)
614 excl_stor = params[0]
615 if not isinstance(params[0], bool):
616 raise errors.ProgrammerError("Exclusive storage parameter is not"
617 " boolean: '%s'." % excl_stor)
618 return excl_stor
619
622 """Wrapper around C{_GetVgInfo} which checks the storage parameters.
623
624 @type name: string
625 @param name: name of the volume group
626 @type params: list
627 @param params: list of storage parameters, which in this case should be
628 containing only one for exclusive storage
629
630 """
631 excl_stor = _CheckLvmStorageParams(params)
632 return _GetVgInfo(name, excl_stor)
633
637 """Retrieves information about a LVM volume group.
638
639 """
640
641 vginfo = info_fn([name], excl_stor)
642 if vginfo:
643 vg_free = int(round(vginfo[0][0], 0))
644 vg_size = int(round(vginfo[0][1], 0))
645 else:
646 vg_free = None
647 vg_size = None
648
649 return {
650 "type": constants.ST_LVM_VG,
651 "name": name,
652 "storage_free": vg_free,
653 "storage_size": vg_size,
654 }
655
665
669 """Retrieves information about spindles in an LVM volume group.
670
671 @type name: string
672 @param name: VG name
673 @type excl_stor: bool
674 @param excl_stor: exclusive storage
675 @rtype: dict
676 @return: dictionary whose keys are "name", "vg_free", "vg_size" for VG name,
677 free spindles, total spindles respectively
678
679 """
680 if excl_stor:
681 (vg_free, vg_size) = info_fn(name)
682 else:
683 vg_free = 0
684 vg_size = 0
685 return {
686 "type": constants.ST_LVM_PV,
687 "name": name,
688 "storage_free": vg_free,
689 "storage_size": vg_size,
690 }
691
694 """Retrieves node information from a hypervisor.
695
696 The information returned depends on the hypervisor. Common items:
697
698 - vg_size is the size of the configured volume group in MiB
699 - vg_free is the free size of the volume group in MiB
700 - memory_dom0 is the memory allocated for domain0 in MiB
701 - memory_free is the currently available (free) ram in MiB
702 - memory_total is the total number of ram in MiB
703 - hv_version: the hypervisor version, if available
704
705 @type hvparams: dict of string
706 @param hvparams: the hypervisor's hvparams
707
708 """
709 return get_hv_fn(name).GetNodeInfo(hvparams=hvparams)
710
713 """Retrieves node information for all hypervisors.
714
715 See C{_GetHvInfo} for information on the output.
716
717 @type hv_specs: list of pairs (string, dict of strings)
718 @param hv_specs: list of pairs of a hypervisor's name and its hvparams
719
720 """
721 if hv_specs is None:
722 return None
723
724 result = []
725 for hvname, hvparams in hv_specs:
726 result.append(_GetHvInfo(hvname, hvparams, get_hv_fn))
727 return result
728
731 """Calls C{fn} for all names in C{names} and returns a dictionary.
732
733 @rtype: None or dict
734
735 """
736 if names is None:
737 return None
738 else:
739 return map(fn, names)
740
743 """Gives back a hash with different information about the node.
744
745 @type storage_units: list of tuples (string, string, list)
746 @param storage_units: List of tuples (storage unit, identifier, parameters) to
747 ask for disk space information. In case of lvm-vg, the identifier is
748 the VG name. The parameters can contain additional, storage-type-specific
749 parameters, for example exclusive storage for lvm storage.
750 @type hv_specs: list of pairs (string, dict of strings)
751 @param hv_specs: list of pairs of a hypervisor's name and its hvparams
752 @rtype: tuple; (string, None/dict, None/dict)
753 @return: Tuple containing boot ID, volume group information and hypervisor
754 information
755
756 """
757 bootid = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n")
758 storage_info = _GetNamedNodeInfo(
759 storage_units,
760 (lambda (storage_type, storage_key, storage_params):
761 _ApplyStorageInfoFunction(storage_type, storage_key, storage_params)))
762 hv_info = _GetHvInfoAll(hv_specs)
763 return (bootid, storage_info, hv_info)
764
767 """Wrapper around filestorage.GetSpaceInfo.
768
769 The purpose of this wrapper is to call filestorage.GetFileStorageSpaceInfo
770 and ignore the *args parameter to not leak it into the filestorage
771 module's code.
772
773 @see: C{filestorage.GetFileStorageSpaceInfo} for description of the
774 parameters.
775
776 """
777 _CheckStorageParams(params, 0)
778 return filestorage.GetFileStorageSpaceInfo(path)
779
780
781
782 _STORAGE_TYPE_INFO_FN = {
783 constants.ST_BLOCK: None,
784 constants.ST_DISKLESS: None,
785 constants.ST_EXT: None,
786 constants.ST_FILE: _GetFileStorageSpaceInfo,
787 constants.ST_LVM_PV: _GetLvmPvSpaceInfo,
788 constants.ST_LVM_VG: _GetLvmVgSpaceInfo,
789 constants.ST_SHARED_FILE: None,
790 constants.ST_GLUSTER: None,
791 constants.ST_RADOS: None,
792 }
796 """Looks up and applies the correct function to calculate free and total
797 storage for the given storage type.
798
799 @type storage_type: string
800 @param storage_type: the storage type for which the storage shall be reported.
801 @type storage_key: string
802 @param storage_key: identifier of a storage unit, e.g. the volume group name
803 of an LVM storage unit
804 @type args: any
805 @param args: various parameters that can be used for storage reporting. These
806 parameters and their semantics vary from storage type to storage type and
807 are just propagated in this function.
808 @return: the results of the application of the storage space function (see
809 _STORAGE_TYPE_INFO_FN) if storage space reporting is implemented for that
810 storage type
811 @raises NotImplementedError: for storage types who don't support space
812 reporting yet
813 """
814 fn = _STORAGE_TYPE_INFO_FN[storage_type]
815 if fn is not None:
816 return fn(storage_key, *args)
817 else:
818 raise NotImplementedError
819
822 """Check that PVs are not shared among LVs
823
824 @type pvi_list: list of L{objects.LvmPvInfo} objects
825 @param pvi_list: information about the PVs
826
827 @rtype: list of tuples (string, list of strings)
828 @return: offending volumes, as tuples: (pv_name, [lv1_name, lv2_name...])
829
830 """
831 res = []
832 for pvi in pvi_list:
833 if len(pvi.lv_list) > 1:
834 res.append((pvi.name, pvi.lv_list))
835 return res
836
840 """Verifies the hypervisor. Appends the results to the 'results' list.
841
842 @type what: C{dict}
843 @param what: a dictionary of things to check
844 @type vm_capable: boolean
845 @param vm_capable: whether or not this node is vm capable
846 @type result: dict
847 @param result: dictionary of verification results; results of the
848 verifications in this function will be added here
849 @type all_hvparams: dict of dict of string
850 @param all_hvparams: dictionary mapping hypervisor names to hvparams
851 @type get_hv_fn: function
852 @param get_hv_fn: function to retrieve the hypervisor, to improve testability
853
854 """
855 if not vm_capable:
856 return
857
858 if constants.NV_HYPERVISOR in what:
859 result[constants.NV_HYPERVISOR] = {}
860 for hv_name in what[constants.NV_HYPERVISOR]:
861 hvparams = all_hvparams[hv_name]
862 try:
863 val = get_hv_fn(hv_name).Verify(hvparams=hvparams)
864 except errors.HypervisorError, err:
865 val = "Error while checking hypervisor: %s" % str(err)
866 result[constants.NV_HYPERVISOR][hv_name] = val
867
871 """Verifies the hvparams. Appends the results to the 'results' list.
872
873 @type what: C{dict}
874 @param what: a dictionary of things to check
875 @type vm_capable: boolean
876 @param vm_capable: whether or not this node is vm capable
877 @type result: dict
878 @param result: dictionary of verification results; results of the
879 verifications in this function will be added here
880 @type get_hv_fn: function
881 @param get_hv_fn: function to retrieve the hypervisor, to improve testability
882
883 """
884 if not vm_capable:
885 return
886
887 if constants.NV_HVPARAMS in what:
888 result[constants.NV_HVPARAMS] = []
889 for source, hv_name, hvparms in what[constants.NV_HVPARAMS]:
890 try:
891 logging.info("Validating hv %s, %s", hv_name, hvparms)
892 get_hv_fn(hv_name).ValidateParameters(hvparms)
893 except errors.HypervisorError, err:
894 result[constants.NV_HVPARAMS].append((source, hv_name, str(err)))
895
898 """Verifies the instance list.
899
900 @type what: C{dict}
901 @param what: a dictionary of things to check
902 @type vm_capable: boolean
903 @param vm_capable: whether or not this node is vm capable
904 @type result: dict
905 @param result: dictionary of verification results; results of the
906 verifications in this function will be added here
907 @type all_hvparams: dict of dict of string
908 @param all_hvparams: dictionary mapping hypervisor names to hvparams
909
910 """
911 if constants.NV_INSTANCELIST in what and vm_capable:
912
913 try:
914 val = GetInstanceList(what[constants.NV_INSTANCELIST],
915 all_hvparams=all_hvparams)
916 except RPCFail, err:
917 val = str(err)
918 result[constants.NV_INSTANCELIST] = val
919
922 """Verifies the node info.
923
924 @type what: C{dict}
925 @param what: a dictionary of things to check
926 @type vm_capable: boolean
927 @param vm_capable: whether or not this node is vm capable
928 @type result: dict
929 @param result: dictionary of verification results; results of the
930 verifications in this function will be added here
931 @type all_hvparams: dict of dict of string
932 @param all_hvparams: dictionary mapping hypervisor names to hvparams
933
934 """
935 if constants.NV_HVINFO in what and vm_capable:
936 hvname = what[constants.NV_HVINFO]
937 hyper = hypervisor.GetHypervisor(hvname)
938 hvparams = all_hvparams[hvname]
939 result[constants.NV_HVINFO] = hyper.GetNodeInfo(hvparams=hvparams)
940
943 """Verify the existance and validity of the client SSL certificate.
944
945 Also, verify that the client certificate is not self-signed. Self-
946 signed client certificates stem from Ganeti versions 2.12.0 - 2.12.4
947 and should be replaced by client certificates signed by the server
948 certificate. Hence we output a warning when we encounter a self-signed
949 one.
950
951 """
952 create_cert_cmd = "gnt-cluster renew-crypto --new-node-certificates"
953 if not os.path.exists(cert_file):
954 return (constants.CV_ERROR,
955 "The client certificate does not exist. Run '%s' to create"
956 " client certificates for all nodes." % create_cert_cmd)
957
958 (errcode, msg) = utils.VerifyCertificate(cert_file)
959 if errcode is not None:
960 return (errcode, msg)
961
962 (errcode, msg) = utils.IsCertificateSelfSigned(cert_file)
963 if errcode is not None:
964 return (errcode, msg)
965
966
967 return (None, utils.GetCertificateDigest(cert_filename=cert_file))
968
969
970 -def VerifyNode(what, cluster_name, all_hvparams, node_groups, groups_cfg):
971 """Verify the status of the local node.
972
973 Based on the input L{what} parameter, various checks are done on the
974 local node.
975
976 If the I{filelist} key is present, this list of
977 files is checksummed and the file/checksum pairs are returned.
978
979 If the I{nodelist} key is present, we check that we have
980 connectivity via ssh with the target nodes (and check the hostname
981 report).
982
983 If the I{node-net-test} key is present, we check that we have
984 connectivity to the given nodes via both primary IP and, if
985 applicable, secondary IPs.
986
987 @type what: C{dict}
988 @param what: a dictionary of things to check:
989 - filelist: list of files for which to compute checksums
990 - nodelist: list of nodes we should check ssh communication with
991 - node-net-test: list of nodes we should check node daemon port
992 connectivity with
993 - hypervisor: list with hypervisors to run the verify for
994 @type cluster_name: string
995 @param cluster_name: the cluster's name
996 @type all_hvparams: dict of dict of strings
997 @param all_hvparams: a dictionary mapping hypervisor names to hvparams
998 @type node_groups: a dict of strings
999 @param node_groups: node _names_ mapped to their group uuids (it's enough to
1000 have only those nodes that are in `what["nodelist"]`)
1001 @type groups_cfg: a dict of dict of strings
1002 @param groups_cfg: a dictionary mapping group uuids to their configuration
1003 @rtype: dict
1004 @return: a dictionary with the same keys as the input dict, and
1005 values representing the result of the checks
1006
1007 """
1008 result = {}
1009 my_name = netutils.Hostname.GetSysName()
1010 port = netutils.GetDaemonPort(constants.NODED)
1011 vm_capable = my_name not in what.get(constants.NV_NONVMNODES, [])
1012
1013 _VerifyHypervisors(what, vm_capable, result, all_hvparams)
1014 _VerifyHvparams(what, vm_capable, result)
1015
1016 if constants.NV_FILELIST in what:
1017 fingerprints = utils.FingerprintFiles(map(vcluster.LocalizeVirtualPath,
1018 what[constants.NV_FILELIST]))
1019 result[constants.NV_FILELIST] = \
1020 dict((vcluster.MakeVirtualPath(key), value)
1021 for (key, value) in fingerprints.items())
1022
1023 if constants.NV_CLIENT_CERT in what:
1024 result[constants.NV_CLIENT_CERT] = _VerifyClientCertificate()
1025
1026 if constants.NV_NODELIST in what:
1027 (nodes, bynode) = what[constants.NV_NODELIST]
1028
1029
1030 try:
1031 nodes.extend(bynode[my_name])
1032 except KeyError:
1033 pass
1034
1035
1036 random.shuffle(nodes)
1037
1038
1039 val = {}
1040 for node in nodes:
1041 params = groups_cfg.get(node_groups.get(node))
1042 ssh_port = params["ndparams"].get(constants.ND_SSH_PORT)
1043 logging.debug("Ssh port %s (None = default) for node %s",
1044 str(ssh_port), node)
1045 success, message = _GetSshRunner(cluster_name). \
1046 VerifyNodeHostname(node, ssh_port)
1047 if not success:
1048 val[node] = message
1049
1050 result[constants.NV_NODELIST] = val
1051
1052 if constants.NV_NODENETTEST in what:
1053 result[constants.NV_NODENETTEST] = tmp = {}
1054 my_pip = my_sip = None
1055 for name, pip, sip in what[constants.NV_NODENETTEST]:
1056 if name == my_name:
1057 my_pip = pip
1058 my_sip = sip
1059 break
1060 if not my_pip:
1061 tmp[my_name] = ("Can't find my own primary/secondary IP"
1062 " in the node list")
1063 else:
1064 for name, pip, sip in what[constants.NV_NODENETTEST]:
1065 fail = []
1066 if not netutils.TcpPing(pip, port, source=my_pip):
1067 fail.append("primary")
1068 if sip != pip:
1069 if not netutils.TcpPing(sip, port, source=my_sip):
1070 fail.append("secondary")
1071 if fail:
1072 tmp[name] = ("failure using the %s interface(s)" %
1073 " and ".join(fail))
1074
1075 if constants.NV_MASTERIP in what:
1076
1077
1078 master_name, master_ip = what[constants.NV_MASTERIP]
1079 if master_name == my_name:
1080 source = constants.IP4_ADDRESS_LOCALHOST
1081 else:
1082 source = None
1083 result[constants.NV_MASTERIP] = netutils.TcpPing(master_ip, port,
1084 source=source)
1085
1086 if constants.NV_USERSCRIPTS in what:
1087 result[constants.NV_USERSCRIPTS] = \
1088 [script for script in what[constants.NV_USERSCRIPTS]
1089 if not utils.IsExecutable(script)]
1090
1091 if constants.NV_OOB_PATHS in what:
1092 result[constants.NV_OOB_PATHS] = tmp = []
1093 for path in what[constants.NV_OOB_PATHS]:
1094 try:
1095 st = os.stat(path)
1096 except OSError, err:
1097 tmp.append("error stating out of band helper: %s" % err)
1098 else:
1099 if stat.S_ISREG(st.st_mode):
1100 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR:
1101 tmp.append(None)
1102 else:
1103 tmp.append("out of band helper %s is not executable" % path)
1104 else:
1105 tmp.append("out of band helper %s is not a file" % path)
1106
1107 if constants.NV_LVLIST in what and vm_capable:
1108 try:
1109 val = GetVolumeList(utils.ListVolumeGroups().keys())
1110 except RPCFail, err:
1111 val = str(err)
1112 result[constants.NV_LVLIST] = val
1113
1114 _VerifyInstanceList(what, vm_capable, result, all_hvparams)
1115
1116 if constants.NV_VGLIST in what and vm_capable:
1117 result[constants.NV_VGLIST] = utils.ListVolumeGroups()
1118
1119 if constants.NV_PVLIST in what and vm_capable:
1120 check_exclusive_pvs = constants.NV_EXCLUSIVEPVS in what
1121 val = bdev.LogicalVolume.GetPVInfo(what[constants.NV_PVLIST],
1122 filter_allocatable=False,
1123 include_lvs=check_exclusive_pvs)
1124 if check_exclusive_pvs:
1125 result[constants.NV_EXCLUSIVEPVS] = _CheckExclusivePvs(val)
1126 for pvi in val:
1127
1128 pvi.lv_list = []
1129 result[constants.NV_PVLIST] = map(objects.LvmPvInfo.ToDict, val)
1130
1131 if constants.NV_VERSION in what:
1132 result[constants.NV_VERSION] = (constants.PROTOCOL_VERSION,
1133 constants.RELEASE_VERSION)
1134
1135 _VerifyNodeInfo(what, vm_capable, result, all_hvparams)
1136
1137 if constants.NV_DRBDVERSION in what and vm_capable:
1138 try:
1139 drbd_version = DRBD8.GetProcInfo().GetVersionString()
1140 except errors.BlockDeviceError, err:
1141 logging.warning("Can't get DRBD version", exc_info=True)
1142 drbd_version = str(err)
1143 result[constants.NV_DRBDVERSION] = drbd_version
1144
1145 if constants.NV_DRBDLIST in what and vm_capable:
1146 try:
1147 used_minors = drbd.DRBD8.GetUsedDevs()
1148 except errors.BlockDeviceError, err:
1149 logging.warning("Can't get used minors list", exc_info=True)
1150 used_minors = str(err)
1151 result[constants.NV_DRBDLIST] = used_minors
1152
1153 if constants.NV_DRBDHELPER in what and vm_capable:
1154 status = True
1155 try:
1156 payload = drbd.DRBD8.GetUsermodeHelper()
1157 except errors.BlockDeviceError, err:
1158 logging.error("Can't get DRBD usermode helper: %s", str(err))
1159 status = False
1160 payload = str(err)
1161 result[constants.NV_DRBDHELPER] = (status, payload)
1162
1163 if constants.NV_NODESETUP in what:
1164 result[constants.NV_NODESETUP] = tmpr = []
1165 if not os.path.isdir("/sys/block") or not os.path.isdir("/sys/class/net"):
1166 tmpr.append("The sysfs filesytem doesn't seem to be mounted"
1167 " under /sys, missing required directories /sys/block"
1168 " and /sys/class/net")
1169 if (not os.path.isdir("/proc/sys") or
1170 not os.path.isfile("/proc/sysrq-trigger")):
1171 tmpr.append("The procfs filesystem doesn't seem to be mounted"
1172 " under /proc, missing required directory /proc/sys and"
1173 " the file /proc/sysrq-trigger")
1174
1175 if constants.NV_TIME in what:
1176 result[constants.NV_TIME] = utils.SplitTime(time.time())
1177
1178 if constants.NV_OSLIST in what and vm_capable:
1179 result[constants.NV_OSLIST] = DiagnoseOS()
1180
1181 if constants.NV_BRIDGES in what and vm_capable:
1182 result[constants.NV_BRIDGES] = [bridge
1183 for bridge in what[constants.NV_BRIDGES]
1184 if not utils.BridgeExists(bridge)]
1185
1186 if what.get(constants.NV_ACCEPTED_STORAGE_PATHS) == my_name:
1187 result[constants.NV_ACCEPTED_STORAGE_PATHS] = \
1188 filestorage.ComputeWrongFileStoragePaths()
1189
1190 if what.get(constants.NV_FILE_STORAGE_PATH):
1191 pathresult = filestorage.CheckFileStoragePath(
1192 what[constants.NV_FILE_STORAGE_PATH])
1193 if pathresult:
1194 result[constants.NV_FILE_STORAGE_PATH] = pathresult
1195
1196 if what.get(constants.NV_SHARED_FILE_STORAGE_PATH):
1197 pathresult = filestorage.CheckFileStoragePath(
1198 what[constants.NV_SHARED_FILE_STORAGE_PATH])
1199 if pathresult:
1200 result[constants.NV_SHARED_FILE_STORAGE_PATH] = pathresult
1201
1202 return result
1203
1206 """Perform actions on the node's cryptographic tokens.
1207
1208 Token types can be 'ssl' or 'ssh'. So far only some actions are implemented
1209 for 'ssl'. Action 'get' returns the digest of the public client ssl
1210 certificate. Action 'create' creates a new client certificate and private key
1211 and also returns the digest of the certificate. The third parameter of a
1212 token request are optional parameters for the actions, so far only the
1213 filename is supported.
1214
1215 @type token_requests: list of tuples of (string, string, dict), where the
1216 first string is in constants.CRYPTO_TYPES, the second in
1217 constants.CRYPTO_ACTIONS. The third parameter is a dictionary of string
1218 to string.
1219 @param token_requests: list of requests of cryptographic tokens and actions
1220 to perform on them. The actions come with a dictionary of options.
1221 @rtype: list of tuples (string, string)
1222 @return: list of tuples of the token type and the public crypto token
1223
1224 """
1225 tokens = []
1226 for (token_type, action, _) in token_requests:
1227 if token_type not in constants.CRYPTO_TYPES:
1228 raise errors.ProgrammerError("Token type '%s' not supported." %
1229 token_type)
1230 if action not in constants.CRYPTO_ACTIONS:
1231 raise errors.ProgrammerError("Action '%s' is not supported." %
1232 action)
1233 if token_type == constants.CRYPTO_TYPE_SSL_DIGEST:
1234 tokens.append((token_type,
1235 utils.GetCertificateDigest()))
1236 return tokens
1237
1240 """Ensures the given daemon is running or stopped.
1241
1242 @type daemon_name: string
1243 @param daemon_name: name of the daemon (e.g., constants.KVMD)
1244
1245 @type run: bool
1246 @param run: whether to start or stop the daemon
1247
1248 @rtype: bool
1249 @return: 'True' if daemon successfully started/stopped,
1250 'False' otherwise
1251
1252 """
1253 allowed_daemons = [constants.KVMD]
1254
1255 if daemon_name not in allowed_daemons:
1256 fn = lambda _: False
1257 elif run:
1258 fn = utils.EnsureDaemon
1259 else:
1260 fn = utils.StopDaemon
1261
1262 return fn(daemon_name)
1263
1266 """Return the size of the given block devices
1267
1268 @type devices: list
1269 @param devices: list of block device nodes to query
1270 @rtype: dict
1271 @return:
1272 dictionary of all block devices under /dev (key). The value is their
1273 size in MiB.
1274
1275 {'/dev/disk/by-uuid/123456-12321231-312312-312': 124}
1276
1277 """
1278 DEV_PREFIX = "/dev/"
1279 blockdevs = {}
1280
1281 for devpath in devices:
1282 if not utils.IsBelowDir(DEV_PREFIX, devpath):
1283 continue
1284
1285 try:
1286 st = os.stat(devpath)
1287 except EnvironmentError, err:
1288 logging.warning("Error stat()'ing device %s: %s", devpath, str(err))
1289 continue
1290
1291 if stat.S_ISBLK(st.st_mode):
1292 result = utils.RunCmd(["blockdev", "--getsize64", devpath])
1293 if result.failed:
1294
1295 logging.warning("Cannot get size for block device %s", devpath)
1296 continue
1297
1298 size = int(result.stdout) / (1024 * 1024)
1299 blockdevs[devpath] = size
1300 return blockdevs
1301
1304 """Compute list of logical volumes and their size.
1305
1306 @type vg_names: list
1307 @param vg_names: the volume groups whose LVs we should list, or
1308 empty for all volume groups
1309 @rtype: dict
1310 @return:
1311 dictionary of all partions (key) with value being a tuple of
1312 their size (in MiB), inactive and online status::
1313
1314 {'xenvg/test1': ('20.06', True, True)}
1315
1316 in case of errors, a string is returned with the error
1317 details.
1318
1319 """
1320 lvs = {}
1321 sep = "|"
1322 if not vg_names:
1323 vg_names = []
1324 result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
1325 "--separator=%s" % sep,
1326 "-ovg_name,lv_name,lv_size,lv_attr"] + vg_names)
1327 if result.failed:
1328 _Fail("Failed to list logical volumes, lvs output: %s", result.output)
1329
1330 for line in result.stdout.splitlines():
1331 line = line.strip()
1332 match = _LVSLINE_REGEX.match(line)
1333 if not match:
1334 logging.error("Invalid line returned from lvs output: '%s'", line)
1335 continue
1336 vg_name, name, size, attr = match.groups()
1337 inactive = attr[4] == "-"
1338 online = attr[5] == "o"
1339 virtual = attr[0] == "v"
1340 if virtual:
1341
1342
1343 continue
1344 lvs[vg_name + "/" + name] = (size, inactive, online)
1345
1346 return lvs
1347
1350 """List the volume groups and their size.
1351
1352 @rtype: dict
1353 @return: dictionary with keys volume name and values the
1354 size of the volume
1355
1356 """
1357 return utils.ListVolumeGroups()
1358
1361 """List all volumes on this node.
1362
1363 @rtype: list
1364 @return:
1365 A list of dictionaries, each having four keys:
1366 - name: the logical volume name,
1367 - size: the size of the logical volume
1368 - dev: the physical device on which the LV lives
1369 - vg: the volume group to which it belongs
1370
1371 In case of errors, we return an empty list and log the
1372 error.
1373
1374 Note that since a logical volume can live on multiple physical
1375 volumes, the resulting list might include a logical volume
1376 multiple times.
1377
1378 """
1379 result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
1380 "--separator=|",
1381 "--options=lv_name,lv_size,devices,vg_name"])
1382 if result.failed:
1383 _Fail("Failed to list logical volumes, lvs output: %s",
1384 result.output)
1385
1386 def parse_dev(dev):
1387 return dev.split("(")[0]
1388
1389 def handle_dev(dev):
1390 return [parse_dev(x) for x in dev.split(",")]
1391
1392 def map_line(line):
1393 line = [v.strip() for v in line]
1394 return [{"name": line[0], "size": line[1],
1395 "dev": dev, "vg": line[3]} for dev in handle_dev(line[2])]
1396
1397 all_devs = []
1398 for line in result.stdout.splitlines():
1399 if line.count("|") >= 3:
1400 all_devs.extend(map_line(line.split("|")))
1401 else:
1402 logging.warning("Strange line in the output from lvs: '%s'", line)
1403 return all_devs
1404
1407 """Check if a list of bridges exist on the current node.
1408
1409 @rtype: boolean
1410 @return: C{True} if all of them exist, C{False} otherwise
1411
1412 """
1413 missing = []
1414 for bridge in bridges_list:
1415 if not utils.BridgeExists(bridge):
1416 missing.append(bridge)
1417
1418 if missing:
1419 _Fail("Missing bridges %s", utils.CommaJoin(missing))
1420
1424 """Provides a list of instances of the given hypervisor.
1425
1426 @type hname: string
1427 @param hname: name of the hypervisor
1428 @type hvparams: dict of strings
1429 @param hvparams: hypervisor parameters for the given hypervisor
1430 @type get_hv_fn: function
1431 @param get_hv_fn: function that returns a hypervisor for the given hypervisor
1432 name; optional parameter to increase testability
1433
1434 @rtype: list
1435 @return: a list of all running instances on the current node
1436 - instance1.example.com
1437 - instance2.example.com
1438
1439 """
1440 try:
1441 return get_hv_fn(hname).ListInstances(hvparams=hvparams)
1442 except errors.HypervisorError, err:
1443 _Fail("Error enumerating instances (hypervisor %s): %s",
1444 hname, err, exc=True)
1445
1449 """Provides a list of instances.
1450
1451 @type hypervisor_list: list
1452 @param hypervisor_list: the list of hypervisors to query information
1453 @type all_hvparams: dict of dict of strings
1454 @param all_hvparams: a dictionary mapping hypervisor types to respective
1455 cluster-wide hypervisor parameters
1456 @type get_hv_fn: function
1457 @param get_hv_fn: function that returns a hypervisor for the given hypervisor
1458 name; optional parameter to increase testability
1459
1460 @rtype: list
1461 @return: a list of all running instances on the current node
1462 - instance1.example.com
1463 - instance2.example.com
1464
1465 """
1466 results = []
1467 for hname in hypervisor_list:
1468 hvparams = all_hvparams[hname]
1469 results.extend(GetInstanceListForHypervisor(hname, hvparams=hvparams,
1470 get_hv_fn=get_hv_fn))
1471 return results
1472
1475 """Gives back the information about an instance as a dictionary.
1476
1477 @type instance: string
1478 @param instance: the instance name
1479 @type hname: string
1480 @param hname: the hypervisor type of the instance
1481 @type hvparams: dict of strings
1482 @param hvparams: the instance's hvparams
1483
1484 @rtype: dict
1485 @return: dictionary with the following keys:
1486 - memory: memory size of instance (int)
1487 - state: state of instance (HvInstanceState)
1488 - time: cpu time of instance (float)
1489 - vcpus: the number of vcpus (int)
1490
1491 """
1492 output = {}
1493
1494 iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance,
1495 hvparams=hvparams)
1496 if iinfo is not None:
1497 output["memory"] = iinfo[2]
1498 output["vcpus"] = iinfo[3]
1499 output["state"] = iinfo[4]
1500 output["time"] = iinfo[5]
1501
1502 return output
1503
1506 """Computes whether an instance can be migrated.
1507
1508 @type instance: L{objects.Instance}
1509 @param instance: object representing the instance to be checked.
1510
1511 @rtype: tuple
1512 @return: tuple of (result, description) where:
1513 - result: whether the instance can be migrated or not
1514 - description: a description of the issue, if relevant
1515
1516 """
1517 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1518 iname = instance.name
1519 if iname not in hyper.ListInstances(hvparams=instance.hvparams):
1520 _Fail("Instance %s is not running", iname)
1521
1522 for idx in range(len(instance.disks_info)):
1523 link_name = _GetBlockDevSymlinkPath(iname, idx)
1524 if not os.path.islink(link_name):
1525 logging.warning("Instance %s is missing symlink %s for disk %d",
1526 iname, link_name, idx)
1527
1530 """Gather data about all instances.
1531
1532 This is the equivalent of L{GetInstanceInfo}, except that it
1533 computes data for all instances at once, thus being faster if one
1534 needs data about more than one instance.
1535
1536 @type hypervisor_list: list
1537 @param hypervisor_list: list of hypervisors to query for instance data
1538 @type all_hvparams: dict of dict of strings
1539 @param all_hvparams: mapping of hypervisor names to hvparams
1540
1541 @rtype: dict
1542 @return: dictionary of instance: data, with data having the following keys:
1543 - memory: memory size of instance (int)
1544 - state: xen state of instance (string)
1545 - time: cpu time of instance (float)
1546 - vcpus: the number of vcpus
1547
1548 """
1549 output = {}
1550 for hname in hypervisor_list:
1551 hvparams = all_hvparams[hname]
1552 iinfo = hypervisor.GetHypervisor(hname).GetAllInstancesInfo(hvparams)
1553 if iinfo:
1554 for name, _, memory, vcpus, state, times in iinfo:
1555 value = {
1556 "memory": memory,
1557 "vcpus": vcpus,
1558 "state": state,
1559 "time": times,
1560 }
1561 if name in output:
1562
1563
1564
1565 for key in "memory", "vcpus":
1566 if value[key] != output[name][key]:
1567 _Fail("Instance %s is running twice"
1568 " with different parameters", name)
1569 output[name] = value
1570
1571 return output
1572
1576 """Gather data about the console access of a set of instances of this node.
1577
1578 This function assumes that the caller already knows which instances are on
1579 this node, by calling a function such as L{GetAllInstancesInfo} or
1580 L{GetInstanceList}.
1581
1582 For every instance, a large amount of configuration data needs to be
1583 provided to the hypervisor interface in order to receive the console
1584 information. Whether this could or should be cut down can be discussed.
1585 The information is provided in a dictionary indexed by instance name,
1586 allowing any number of instance queries to be done.
1587
1588 @type instance_param_dict: dict of string to tuple of dictionaries, where the
1589 dictionaries represent: L{objects.Instance}, L{objects.Node},
1590 L{objects.NodeGroup}, HvParams, BeParams
1591 @param instance_param_dict: mapping of instance name to parameters necessary
1592 for console information retrieval
1593
1594 @rtype: dict
1595 @return: dictionary of instance: data, with data having the following keys:
1596 - instance: instance name
1597 - kind: console kind
1598 - message: used with kind == CONS_MESSAGE, indicates console to be
1599 unavailable, supplies error message
1600 - host: host to connect to
1601 - port: port to use
1602 - user: user for login
1603 - command: the command, broken into parts as an array
1604 - display: unknown, potentially unused?
1605
1606 """
1607
1608 output = {}
1609 for inst_name in instance_param_dict:
1610 instance = instance_param_dict[inst_name]["instance"]
1611 pnode = instance_param_dict[inst_name]["node"]
1612 group = instance_param_dict[inst_name]["group"]
1613 hvparams = instance_param_dict[inst_name]["hvParams"]
1614 beparams = instance_param_dict[inst_name]["beParams"]
1615
1616 instance = objects.Instance.FromDict(instance)
1617 pnode = objects.Node.FromDict(pnode)
1618 group = objects.NodeGroup.FromDict(group)
1619
1620 h = get_hv_fn(instance.hypervisor)
1621 output[inst_name] = h.GetInstanceConsole(instance, pnode, group,
1622 hvparams, beparams).ToDict()
1623
1624 return output
1625
1628 """Compute the OS log filename for a given instance and operation.
1629
1630 The instance name and os name are passed in as strings since not all
1631 operations have these as part of an instance object.
1632
1633 @type kind: string
1634 @param kind: the operation type (e.g. add, import, etc.)
1635 @type os_name: string
1636 @param os_name: the os name
1637 @type instance: string
1638 @param instance: the name of the instance being imported/added/etc.
1639 @type component: string or None
1640 @param component: the name of the component of the instance being
1641 transferred
1642
1643 """
1644
1645 if component:
1646 assert "/" not in component
1647 c_msg = "-%s" % component
1648 else:
1649 c_msg = ""
1650 base = ("%s-%s-%s%s-%s.log" %
1651 (kind, os_name, instance, c_msg, utils.TimestampForFilename()))
1652 return utils.PathJoin(pathutils.LOG_OS_DIR, base)
1653
1656 """Add an OS to an instance.
1657
1658 @type instance: L{objects.Instance}
1659 @param instance: Instance whose OS is to be installed
1660 @type reinstall: boolean
1661 @param reinstall: whether this is an instance reinstall
1662 @type debug: integer
1663 @param debug: debug level, passed to the OS scripts
1664 @rtype: None
1665
1666 """
1667 inst_os = OSFromDisk(instance.os)
1668
1669 create_env = OSEnvironment(instance, inst_os, debug)
1670 if reinstall:
1671 create_env["INSTANCE_REINSTALL"] = "1"
1672
1673 logfile = _InstanceLogName("add", instance.os, instance.name, None)
1674
1675 result = utils.RunCmd([inst_os.create_script], env=create_env,
1676 cwd=inst_os.path, output=logfile, reset_env=True)
1677 if result.failed:
1678 logging.error("os create command '%s' returned error: %s, logfile: %s,"
1679 " output: %s", result.cmd, result.fail_reason, logfile,
1680 result.output)
1681 lines = [utils.SafeEncode(val)
1682 for val in utils.TailFile(logfile, lines=20)]
1683 _Fail("OS create script failed (%s), last lines in the"
1684 " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
1685
1688 """Run the OS rename script for an instance.
1689
1690 @type instance: L{objects.Instance}
1691 @param instance: Instance whose OS is to be installed
1692 @type old_name: string
1693 @param old_name: previous instance name
1694 @type debug: integer
1695 @param debug: debug level, passed to the OS scripts
1696 @rtype: boolean
1697 @return: the success of the operation
1698
1699 """
1700 inst_os = OSFromDisk(instance.os)
1701
1702 rename_env = OSEnvironment(instance, inst_os, debug)
1703 rename_env["OLD_INSTANCE_NAME"] = old_name
1704
1705 logfile = _InstanceLogName("rename", instance.os,
1706 "%s-%s" % (old_name, instance.name), None)
1707
1708 result = utils.RunCmd([inst_os.rename_script], env=rename_env,
1709 cwd=inst_os.path, output=logfile, reset_env=True)
1710
1711 if result.failed:
1712 logging.error("os create command '%s' returned error: %s output: %s",
1713 result.cmd, result.fail_reason, result.output)
1714 lines = [utils.SafeEncode(val)
1715 for val in utils.TailFile(logfile, lines=20)]
1716 _Fail("OS rename script failed (%s), last lines in the"
1717 " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
1718
1730
1733 """Set up symlinks to a instance's block device.
1734
1735 This is an auxiliary function run when an instance is start (on the primary
1736 node) or when an instance is migrated (on the target node).
1737
1738
1739 @param instance_name: the name of the target instance
1740 @param device_path: path of the physical block device, on the node
1741 @param idx: the disk index
1742 @return: absolute path to the disk's symlink
1743
1744 """
1745 link_name = _GetBlockDevSymlinkPath(instance_name, idx)
1746 try:
1747 os.symlink(device_path, link_name)
1748 except OSError, err:
1749 if err.errno == errno.EEXIST:
1750 if (not os.path.islink(link_name) or
1751 os.readlink(link_name) != device_path):
1752 os.remove(link_name)
1753 os.symlink(device_path, link_name)
1754 else:
1755 raise
1756
1757 return link_name
1758
1761 """Remove the block device symlinks belonging to the given instance.
1762
1763 """
1764 for idx, _ in enumerate(disks):
1765 link_name = _GetBlockDevSymlinkPath(instance_name, idx)
1766 if os.path.islink(link_name):
1767 try:
1768 os.remove(link_name)
1769 except OSError:
1770 logging.exception("Can't remove symlink '%s'", link_name)
1771
1774 """Get the URI for the device.
1775
1776 @type instance: L{objects.Instance}
1777 @param instance: the instance which disk belongs to
1778 @type disk: L{objects.Disk}
1779 @param disk: the target disk object
1780 @type device: L{bdev.BlockDev}
1781 @param device: the corresponding BlockDevice
1782 @rtype: string
1783 @return: the device uri if any else None
1784
1785 """
1786 access_mode = disk.params.get(constants.LDP_ACCESS,
1787 constants.DISK_KERNELSPACE)
1788 if access_mode == constants.DISK_USERSPACE:
1789
1790 return device.GetUserspaceAccessUri(instance.hypervisor)
1791 else:
1792 return None
1793
1796 """Set up an instance's block device(s).
1797
1798 This is run on the primary node at instance startup. The block
1799 devices must be already assembled.
1800
1801 @type instance: L{objects.Instance}
1802 @param instance: the instance whose disks we should assemble
1803 @rtype: list
1804 @return: list of (disk_object, link_name, drive_uri)
1805
1806 """
1807 block_devices = []
1808 for idx, disk in enumerate(instance.disks_info):
1809 device = _RecursiveFindBD(disk)
1810 if device is None:
1811 raise errors.BlockDeviceError("Block device '%s' is not set up." %
1812 str(disk))
1813 device.Open()
1814 try:
1815 link_name = _SymlinkBlockDev(instance.name, device.dev_path, idx)
1816 except OSError, e:
1817 raise errors.BlockDeviceError("Cannot create block device symlink: %s" %
1818 e.strerror)
1819 uri = _CalculateDeviceURI(instance, disk, device)
1820
1821 block_devices.append((disk, link_name, uri))
1822
1823 return block_devices
1824
1830
1836
1837
1838 -def StartInstance(instance, startup_paused, reason, store_reason=True):
1839 """Start an instance.
1840
1841 @type instance: L{objects.Instance}
1842 @param instance: the instance object
1843 @type startup_paused: bool
1844 @param instance: pause instance at startup?
1845 @type reason: list of reasons
1846 @param reason: the reason trail for this startup
1847 @type store_reason: boolean
1848 @param store_reason: whether to store the shutdown reason trail on file
1849 @rtype: None
1850
1851 """
1852 instance_info = _GetInstanceInfo(instance)
1853
1854 if instance_info and not _IsInstanceUserDown(instance_info):
1855 logging.info("Instance '%s' already running, not starting", instance.name)
1856 return
1857
1858 try:
1859 block_devices = _GatherAndLinkBlockDevs(instance)
1860 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1861 hyper.StartInstance(instance, block_devices, startup_paused)
1862 if store_reason:
1863 _StoreInstReasonTrail(instance.name, reason)
1864 except errors.BlockDeviceError, err:
1865 _Fail("Block device error: %s", err, exc=True)
1866 except errors.HypervisorError, err:
1867 _RemoveBlockDevLinks(instance.name, instance.disks_info)
1868 _Fail("Hypervisor error: %s", err, exc=True)
1869
1872 """Shut an instance down.
1873
1874 @note: this functions uses polling with a hardcoded timeout.
1875
1876 @type instance: L{objects.Instance}
1877 @param instance: the instance object
1878 @type timeout: integer
1879 @param timeout: maximum timeout for soft shutdown
1880 @type reason: list of reasons
1881 @param reason: the reason trail for this shutdown
1882 @type store_reason: boolean
1883 @param store_reason: whether to store the shutdown reason trail on file
1884 @rtype: None
1885
1886 """
1887 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1888
1889 if not _GetInstanceInfo(instance):
1890 logging.info("Instance '%s' not running, doing nothing", instance.name)
1891 return
1892
1893 class _TryShutdown(object):
1894 def __init__(self):
1895 self.tried_once = False
1896
1897 def __call__(self):
1898 if not _GetInstanceInfo(instance):
1899 return
1900
1901 try:
1902 hyper.StopInstance(instance, retry=self.tried_once, timeout=timeout)
1903 if store_reason:
1904 _StoreInstReasonTrail(instance.name, reason)
1905 except errors.HypervisorError, err:
1906
1907
1908 if not _GetInstanceInfo(instance):
1909 return
1910
1911 _Fail("Failed to stop instance '%s': %s", instance.name, err)
1912
1913 self.tried_once = True
1914
1915 raise utils.RetryAgain()
1916
1917 try:
1918 utils.Retry(_TryShutdown(), 5, timeout)
1919 except utils.RetryTimeout:
1920
1921 logging.error("Shutdown of '%s' unsuccessful, forcing", instance.name)
1922
1923 try:
1924 hyper.StopInstance(instance, force=True)
1925 except errors.HypervisorError, err:
1926
1927
1928 if _GetInstanceInfo(instance):
1929 _Fail("Failed to force stop instance '%s': %s", instance.name, err)
1930
1931 time.sleep(1)
1932
1933 if _GetInstanceInfo(instance):
1934 _Fail("Could not shutdown instance '%s' even by destroy", instance.name)
1935
1936 try:
1937 hyper.CleanupInstance(instance.name)
1938 except errors.HypervisorError, err:
1939 logging.warning("Failed to execute post-shutdown cleanup step: %s", err)
1940
1941 _RemoveBlockDevLinks(instance.name, instance.disks_info)
1942
1943
1944 -def InstanceReboot(instance, reboot_type, shutdown_timeout, reason):
1945 """Reboot an instance.
1946
1947 @type instance: L{objects.Instance}
1948 @param instance: the instance object to reboot
1949 @type reboot_type: str
1950 @param reboot_type: the type of reboot, one the following
1951 constants:
1952 - L{constants.INSTANCE_REBOOT_SOFT}: only reboot the
1953 instance OS, do not recreate the VM
1954 - L{constants.INSTANCE_REBOOT_HARD}: tear down and
1955 restart the VM (at the hypervisor level)
1956 - the other reboot type (L{constants.INSTANCE_REBOOT_FULL}) is
1957 not accepted here, since that mode is handled differently, in
1958 cmdlib, and translates into full stop and start of the
1959 instance (instead of a call_instance_reboot RPC)
1960 @type shutdown_timeout: integer
1961 @param shutdown_timeout: maximum timeout for soft shutdown
1962 @type reason: list of reasons
1963 @param reason: the reason trail for this reboot
1964 @rtype: None
1965
1966 """
1967
1968
1969
1970 if not _GetInstanceInfo(instance):
1971 _Fail("Cannot reboot instance '%s' that is not running", instance.name)
1972
1973 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1974 if reboot_type == constants.INSTANCE_REBOOT_SOFT:
1975 try:
1976 hyper.RebootInstance(instance)
1977 except errors.HypervisorError, err:
1978 _Fail("Failed to soft reboot instance '%s': %s", instance.name, err)
1979 elif reboot_type == constants.INSTANCE_REBOOT_HARD:
1980 try:
1981 InstanceShutdown(instance, shutdown_timeout, reason, store_reason=False)
1982 result = StartInstance(instance, False, reason, store_reason=False)
1983 _StoreInstReasonTrail(instance.name, reason)
1984 return result
1985 except errors.HypervisorError, err:
1986 _Fail("Failed to hard reboot instance '%s': %s", instance.name, err)
1987 else:
1988 _Fail("Invalid reboot_type received: '%s'", reboot_type)
1989
2010
2025
2028 """Prepare the node to accept an instance.
2029
2030 @type instance: L{objects.Instance}
2031 @param instance: the instance definition
2032 @type info: string/data (opaque)
2033 @param info: migration information, from the source node
2034 @type target: string
2035 @param target: target host (usually ip), on this node
2036
2037 """
2038
2039 if instance.disk_template in constants.DTS_EXT_MIRROR:
2040
2041
2042 try:
2043 _GatherAndLinkBlockDevs(instance)
2044 except errors.BlockDeviceError, err:
2045 _Fail("Block device error: %s", err, exc=True)
2046
2047 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2048 try:
2049 hyper.AcceptInstance(instance, info, target)
2050 except errors.HypervisorError, err:
2051 if instance.disk_template in constants.DTS_EXT_MIRROR:
2052 _RemoveBlockDevLinks(instance.name, instance.disks_info)
2053 _Fail("Failed to accept instance: %s", err, exc=True)
2054
2057 """Finalize any preparation to accept an instance.
2058
2059 @type instance: L{objects.Instance}
2060 @param instance: the instance definition
2061 @type info: string/data (opaque)
2062 @param info: migration information, from the source node
2063 @type success: boolean
2064 @param success: whether the migration was a success or a failure
2065
2066 """
2067 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2068 try:
2069 hyper.FinalizeMigrationDst(instance, info, success)
2070 except errors.HypervisorError, err:
2071 _Fail("Failed to finalize migration on the target node: %s", err, exc=True)
2072
2075 """Migrates an instance to another node.
2076
2077 @type cluster_name: string
2078 @param cluster_name: name of the cluster
2079 @type instance: L{objects.Instance}
2080 @param instance: the instance definition
2081 @type target: string
2082 @param target: the target node name
2083 @type live: boolean
2084 @param live: whether the migration should be done live or not (the
2085 interpretation of this parameter is left to the hypervisor)
2086 @raise RPCFail: if migration fails for some reason
2087
2088 """
2089 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2090
2091 try:
2092 hyper.MigrateInstance(cluster_name, instance, target, live)
2093 except errors.HypervisorError, err:
2094 _Fail("Failed to migrate instance: %s", err, exc=True)
2095
2098 """Finalize the instance migration on the source node.
2099
2100 @type instance: L{objects.Instance}
2101 @param instance: the instance definition of the migrated instance
2102 @type success: bool
2103 @param success: whether the migration succeeded or not
2104 @type live: bool
2105 @param live: whether the user requested a live migration or not
2106 @raise RPCFail: If the execution fails for some reason
2107
2108 """
2109 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2110
2111 try:
2112 hyper.FinalizeMigrationSource(instance, success, live)
2113 except Exception, err:
2114 _Fail("Failed to finalize the migration on the source node: %s", err,
2115 exc=True)
2116
2119 """Get the migration status
2120
2121 @type instance: L{objects.Instance}
2122 @param instance: the instance that is being migrated
2123 @rtype: L{objects.MigrationStatus}
2124 @return: the status of the current migration (one of
2125 L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
2126 progress info that can be retrieved from the hypervisor
2127 @raise RPCFail: If the migration status cannot be retrieved
2128
2129 """
2130 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2131 try:
2132 return hyper.GetMigrationStatus(instance)
2133 except Exception, err:
2134 _Fail("Failed to get migration status: %s", err, exc=True)
2135
2136
2137 -def HotplugDevice(instance, action, dev_type, device, extra, seq):
2138 """Hotplug a device
2139
2140 Hotplug is currently supported only for KVM Hypervisor.
2141 @type instance: L{objects.Instance}
2142 @param instance: the instance to which we hotplug a device
2143 @type action: string
2144 @param action: the hotplug action to perform
2145 @type dev_type: string
2146 @param dev_type: the device type to hotplug
2147 @type device: either L{objects.NIC} or L{objects.Disk}
2148 @param device: the device object to hotplug
2149 @type extra: tuple
2150 @param extra: extra info used for disk hotplug (disk link, drive uri)
2151 @type seq: int
2152 @param seq: the index of the device from master perspective
2153 @raise RPCFail: in case instance does not have KVM hypervisor
2154
2155 """
2156 hyper = hypervisor.GetHypervisor(instance.hypervisor)
2157 try:
2158 hyper.VerifyHotplugSupport(instance, action, dev_type)
2159 except errors.HotplugError, err:
2160 _Fail("Hotplug is not supported: %s", err)
2161
2162 if action == constants.HOTPLUG_ACTION_ADD:
2163 fn = hyper.HotAddDevice
2164 elif action == constants.HOTPLUG_ACTION_REMOVE:
2165 fn = hyper.HotDelDevice
2166 elif action == constants.HOTPLUG_ACTION_MODIFY:
2167 fn = hyper.HotModDevice
2168 else:
2169 assert action in constants.HOTPLUG_ALL_ACTIONS
2170
2171 return fn(instance, dev_type, device, extra, seq)
2172
2183
2210
2211 retries = 5
2212
2213 while True:
2214 try:
2215 trans = utils.Retry(_Connect, 1.0, constants.LUXI_DEF_CTMO)
2216 break
2217 except utils.RetryTimeout:
2218 raise TimeoutError("Connection to metadata daemon timed out")
2219 except (socket.error, NoMasterError), err:
2220 if retries == 0:
2221 logging.error("Failed to connect to the metadata daemon",
2222 exc_info=True)
2223 raise TimeoutError("Failed to connect to metadata daemon: %s" % err)
2224 else:
2225 retries -= 1
2226
2227 data = serializer.DumpJson(metadata,
2228 private_encoder=serializer.EncodeWithPrivateFields)
2229
2230 trans.Send(data)
2231 trans.Close()
2232
2233
2234 -def BlockdevCreate(disk, size, owner, on_primary, info, excl_stor):
2235 """Creates a block device for an instance.
2236
2237 @type disk: L{objects.Disk}
2238 @param disk: the object describing the disk we should create
2239 @type size: int
2240 @param size: the size of the physical underlying device, in MiB
2241 @type owner: str
2242 @param owner: the name of the instance for which disk is created,
2243 used for device cache data
2244 @type on_primary: boolean
2245 @param on_primary: indicates if it is the primary node or not
2246 @type info: string
2247 @param info: string that will be sent to the physical device
2248 creation, used for example to set (LVM) tags on LVs
2249 @type excl_stor: boolean
2250 @param excl_stor: Whether exclusive_storage is active
2251
2252 @return: the new unique_id of the device (this can sometime be
2253 computed only after creation), or None. On secondary nodes,
2254 it's not required to return anything.
2255
2256 """
2257
2258
2259 clist = []
2260 if disk.children:
2261 for child in disk.children:
2262 try:
2263 crdev = _RecursiveAssembleBD(child, owner, on_primary)
2264 except errors.BlockDeviceError, err:
2265 _Fail("Can't assemble device %s: %s", child, err)
2266 if on_primary or disk.AssembleOnSecondary():
2267
2268
2269 try:
2270
2271 crdev.Open()
2272 except errors.BlockDeviceError, err:
2273 _Fail("Can't make child '%s' read-write: %s", child, err)
2274 clist.append(crdev)
2275
2276 try:
2277 device = bdev.Create(disk, clist, excl_stor)
2278 except errors.BlockDeviceError, err:
2279 _Fail("Can't create block device: %s", err)
2280
2281 if on_primary or disk.AssembleOnSecondary():
2282 try:
2283 device.Assemble()
2284 except errors.BlockDeviceError, err:
2285 _Fail("Can't assemble device after creation, unusual event: %s", err)
2286 if on_primary or disk.OpenOnSecondary():
2287 try:
2288 device.Open(force=True)
2289 except errors.BlockDeviceError, err:
2290 _Fail("Can't make device r/w after creation, unusual event: %s", err)
2291 DevCacheManager.UpdateCache(device.dev_path, owner,
2292 on_primary, disk.iv_name)
2293
2294 device.SetInfo(info)
2295
2296 return device.unique_id
2297
2298
2299 -def _DumpDevice(source_path, target_path, offset, size, truncate):
2300 """This function images/wipes the device using a local file.
2301
2302 @type source_path: string
2303 @param source_path: path of the image or data source (e.g., "/dev/zero")
2304
2305 @type target_path: string
2306 @param target_path: path of the device to image/wipe
2307
2308 @type offset: int
2309 @param offset: offset in MiB in the output file
2310
2311 @type size: int
2312 @param size: maximum size in MiB to write (data source might be smaller)
2313
2314 @type truncate: bool
2315 @param truncate: whether the file should be truncated
2316
2317 @return: None
2318 @raise RPCFail: in case of failure
2319
2320 """
2321
2322
2323
2324 block_size = 1024 * 1024
2325
2326 cmd = [constants.DD_CMD, "if=%s" % source_path, "seek=%d" % offset,
2327 "bs=%s" % block_size, "oflag=direct", "of=%s" % target_path,
2328 "count=%d" % size]
2329
2330 if not truncate:
2331 cmd.append("conv=notrunc")
2332
2333 result = utils.RunCmd(cmd)
2334
2335 if result.failed:
2336 _Fail("Dump command '%s' exited with error: %s; output: %s", result.cmd,
2337 result.fail_reason, result.output)
2338
2341 """This function images a device using a downloaded image file.
2342
2343 @type source_url: string
2344 @param source_url: URL of image to dump to disk
2345
2346 @type target_path: string
2347 @param target_path: path of the device to image
2348
2349 @type size: int
2350 @param size: maximum size in MiB to write (data source might be smaller)
2351
2352 @rtype: NoneType
2353 @return: None
2354 @raise RPCFail: in case of download or write failures
2355
2356 """
2357 class DDParams(object):
2358 def __init__(self, current_size, total_size):
2359 self.current_size = current_size
2360 self.total_size = total_size
2361 self.image_size_error = False
2362
2363 def dd_write(ddparams, out):
2364 if ddparams.current_size < ddparams.total_size:
2365 ddparams.current_size += len(out)
2366 target_file.write(out)
2367 else:
2368 ddparams.image_size_error = True
2369 return -1
2370
2371 target_file = open(target_path, "r+")
2372 ddparams = DDParams(0, 1024 * 1024 * size)
2373
2374 curl = pycurl.Curl()
2375 curl.setopt(pycurl.VERBOSE, True)
2376 curl.setopt(pycurl.NOSIGNAL, True)
2377 curl.setopt(pycurl.USERAGENT, http.HTTP_GANETI_VERSION)
2378 curl.setopt(pycurl.URL, source_url)
2379 curl.setopt(pycurl.WRITEFUNCTION, lambda out: dd_write(ddparams, out))
2380
2381 try:
2382 curl.perform()
2383 except pycurl.error:
2384 if ddparams.image_size_error:
2385 _Fail("Disk image larger than the disk")
2386 else:
2387 raise
2388
2389 target_file.close()
2390
2393 """Wipes a block device.
2394
2395 @type disk: L{objects.Disk}
2396 @param disk: the disk object we want to wipe
2397 @type offset: int
2398 @param offset: The offset in MiB in the file
2399 @type size: int
2400 @param size: The size in MiB to write
2401
2402 """
2403 try:
2404 rdev = _RecursiveFindBD(disk)
2405 except errors.BlockDeviceError:
2406 rdev = None
2407
2408 if not rdev:
2409 _Fail("Cannot wipe device %s: device not found", disk.iv_name)
2410 if offset < 0:
2411 _Fail("Negative offset")
2412 if size < 0:
2413 _Fail("Negative size")
2414 if offset > rdev.size:
2415 _Fail("Wipe offset is bigger than device size")
2416 if (offset + size) > rdev.size:
2417 _Fail("Wipe offset and size are bigger than device size")
2418
2419 _DumpDevice("/dev/zero", rdev.dev_path, offset, size, True)
2420
2423 """Images a block device either by dumping a local file or
2424 downloading a URL.
2425
2426 @type disk: L{objects.Disk}
2427 @param disk: the disk object we want to image
2428
2429 @type image: string
2430 @param image: file path to the disk image be dumped
2431
2432 @type size: int
2433 @param size: The size in MiB to write
2434
2435 @rtype: NoneType
2436 @return: None
2437 @raise RPCFail: in case of failure
2438
2439 """
2440 if not (utils.IsUrl(image) or os.path.exists(image)):
2441 _Fail("Image '%s' not found", image)
2442
2443 try:
2444 rdev = _RecursiveFindBD(disk)
2445 except errors.BlockDeviceError:
2446 rdev = None
2447
2448 if not rdev:
2449 _Fail("Cannot image device %s: device not found", disk.iv_name)
2450 if size < 0:
2451 _Fail("Negative size")
2452 if size > rdev.size:
2453 _Fail("Image size is bigger than device size")
2454
2455 if utils.IsUrl(image):
2456 _DownloadAndDumpDevice(image, rdev.dev_path, size)
2457 else:
2458 _DumpDevice(image, rdev.dev_path, 0, size, False)
2459
2462 """Pause or resume the sync of the block device.
2463
2464 @type disks: list of L{objects.Disk}
2465 @param disks: the disks object we want to pause/resume
2466 @type pause: bool
2467 @param pause: Wheater to pause or resume
2468
2469 """
2470 success = []
2471 for disk in disks:
2472 try:
2473 rdev = _RecursiveFindBD(disk)
2474 except errors.BlockDeviceError:
2475 rdev = None
2476
2477 if not rdev:
2478 success.append((False, ("Cannot change sync for device %s:"
2479 " device not found" % disk.iv_name)))
2480 continue
2481
2482 result = rdev.PauseResumeSync(pause)
2483
2484 if result:
2485 success.append((result, None))
2486 else:
2487 if pause:
2488 msg = "Pause"
2489 else:
2490 msg = "Resume"
2491 success.append((result, "%s for device %s failed" % (msg, disk.iv_name)))
2492
2493 return success
2494
2497 """Remove a block device.
2498
2499 @note: This is intended to be called recursively.
2500
2501 @type disk: L{objects.Disk}
2502 @param disk: the disk object we should remove
2503 @rtype: boolean
2504 @return: the success of the operation
2505
2506 """
2507 msgs = []
2508 try:
2509 rdev = _RecursiveFindBD(disk)
2510 except errors.BlockDeviceError, err:
2511
2512 logging.info("Can't attach to device %s in remove", disk)
2513 rdev = None
2514 if rdev is not None:
2515 r_path = rdev.dev_path
2516
2517 def _TryRemove():
2518 try:
2519 rdev.Remove()
2520 return []
2521 except errors.BlockDeviceError, err:
2522 return [str(err)]
2523
2524 msgs.extend(utils.SimpleRetry([], _TryRemove,
2525 constants.DISK_REMOVE_RETRY_INTERVAL,
2526 constants.DISK_REMOVE_RETRY_TIMEOUT))
2527
2528 if not msgs:
2529 DevCacheManager.RemoveCache(r_path)
2530
2531 if disk.children:
2532 for child in disk.children:
2533 try:
2534 BlockdevRemove(child)
2535 except RPCFail, err:
2536 msgs.append(str(err))
2537
2538 if msgs:
2539 _Fail("; ".join(msgs))
2540
2543 """Activate a block device for an instance.
2544
2545 This is run on the primary and secondary nodes for an instance.
2546
2547 @note: this function is called recursively.
2548
2549 @type disk: L{objects.Disk}
2550 @param disk: the disk we try to assemble
2551 @type owner: str
2552 @param owner: the name of the instance which owns the disk
2553 @type as_primary: boolean
2554 @param as_primary: if we should make the block device
2555 read/write
2556
2557 @return: the assembled device or None (in case no device
2558 was assembled)
2559 @raise errors.BlockDeviceError: in case there is an error
2560 during the activation of the children or the device
2561 itself
2562
2563 """
2564 children = []
2565 if disk.children:
2566 mcn = disk.ChildrenNeeded()
2567 if mcn == -1:
2568 mcn = 0
2569 else:
2570 mcn = len(disk.children) - mcn
2571 for chld_disk in disk.children:
2572 try:
2573 cdev = _RecursiveAssembleBD(chld_disk, owner, as_primary)
2574 except errors.BlockDeviceError, err:
2575 if children.count(None) >= mcn:
2576 raise
2577 cdev = None
2578 logging.error("Error in child activation (but continuing): %s",
2579 str(err))
2580 children.append(cdev)
2581
2582 if as_primary or disk.AssembleOnSecondary():
2583 r_dev = bdev.Assemble(disk, children)
2584 result = r_dev
2585 if as_primary or disk.OpenOnSecondary():
2586 r_dev.Open()
2587 DevCacheManager.UpdateCache(r_dev.dev_path, owner,
2588 as_primary, disk.iv_name)
2589
2590 else:
2591 result = True
2592 return result
2593
2596 """Activate a block device for an instance.
2597
2598 This is a wrapper over _RecursiveAssembleBD.
2599
2600 @rtype: str or boolean
2601 @return: a tuple with the C{/dev/...} path and the created symlink
2602 for primary nodes, and (C{True}, C{True}) for secondary nodes
2603
2604 """
2605 try:
2606 result = _RecursiveAssembleBD(disk, instance.name, as_primary)
2607 if isinstance(result, BlockDev):
2608
2609 dev_path = result.dev_path
2610 link_name = None
2611 uri = None
2612 if as_primary:
2613 link_name = _SymlinkBlockDev(instance.name, dev_path, idx)
2614 uri = _CalculateDeviceURI(instance, disk, result)
2615 elif result:
2616 return result, result
2617 else:
2618 _Fail("Unexpected result from _RecursiveAssembleBD")
2619 except errors.BlockDeviceError, err:
2620 _Fail("Error while assembling disk: %s", err, exc=True)
2621 except OSError, err:
2622 _Fail("Error while symlinking disk: %s", err, exc=True)
2623
2624 return dev_path, link_name, uri
2625
2628 """Shut down a block device.
2629
2630 First, if the device is assembled (Attach() is successful), then
2631 the device is shutdown. Then the children of the device are
2632 shutdown.
2633
2634 This function is called recursively. Note that we don't cache the
2635 children or such, as oppossed to assemble, shutdown of different
2636 devices doesn't require that the upper device was active.
2637
2638 @type disk: L{objects.Disk}
2639 @param disk: the description of the disk we should
2640 shutdown
2641 @rtype: None
2642
2643 """
2644 msgs = []
2645 r_dev = _RecursiveFindBD(disk)
2646 if r_dev is not None:
2647 r_path = r_dev.dev_path
2648 try:
2649 r_dev.Shutdown()
2650 DevCacheManager.RemoveCache(r_path)
2651 except errors.BlockDeviceError, err:
2652 msgs.append(str(err))
2653
2654 if disk.children:
2655 for child in disk.children:
2656 try:
2657 BlockdevShutdown(child)
2658 except RPCFail, err:
2659 msgs.append(str(err))
2660
2661 if msgs:
2662 _Fail("; ".join(msgs))
2663
2666 """Extend a mirrored block device.
2667
2668 @type parent_cdev: L{objects.Disk}
2669 @param parent_cdev: the disk to which we should add children
2670 @type new_cdevs: list of L{objects.Disk}
2671 @param new_cdevs: the list of children which we should add
2672 @rtype: None
2673
2674 """
2675 parent_bdev = _RecursiveFindBD(parent_cdev)
2676 if parent_bdev is None:
2677 _Fail("Can't find parent device '%s' in add children", parent_cdev)
2678 new_bdevs = [_RecursiveFindBD(disk) for disk in new_cdevs]
2679 if new_bdevs.count(None) > 0:
2680 _Fail("Can't find new device(s) to add: %s:%s", new_bdevs, new_cdevs)
2681 parent_bdev.AddChildren(new_bdevs)
2682
2685 """Shrink a mirrored block device.
2686
2687 @type parent_cdev: L{objects.Disk}
2688 @param parent_cdev: the disk from which we should remove children
2689 @type new_cdevs: list of L{objects.Disk}
2690 @param new_cdevs: the list of children which we should remove
2691 @rtype: None
2692
2693 """
2694 parent_bdev = _RecursiveFindBD(parent_cdev)
2695 if parent_bdev is None:
2696 _Fail("Can't find parent device '%s' in remove children", parent_cdev)
2697 devs = []
2698 for disk in new_cdevs:
2699 rpath = disk.StaticDevPath()
2700 if rpath is None:
2701 bd = _RecursiveFindBD(disk)
2702 if bd is None:
2703 _Fail("Can't find device %s while removing children", disk)
2704 else:
2705 devs.append(bd.dev_path)
2706 else:
2707 if not utils.IsNormAbsPath(rpath):
2708 _Fail("Strange path returned from StaticDevPath: '%s'", rpath)
2709 devs.append(rpath)
2710 parent_bdev.RemoveChildren(devs)
2711
2714 """Get the mirroring status of a list of devices.
2715
2716 @type disks: list of L{objects.Disk}
2717 @param disks: the list of disks which we should query
2718 @rtype: disk
2719 @return: List of L{objects.BlockDevStatus}, one for each disk
2720 @raise errors.BlockDeviceError: if any of the disks cannot be
2721 found
2722
2723 """
2724 stats = []
2725 for dsk in disks:
2726 rbd = _RecursiveFindBD(dsk)
2727 if rbd is None:
2728 _Fail("Can't find device %s", dsk)
2729
2730 stats.append(rbd.CombinedSyncStatus())
2731
2732 return stats
2733
2736 """Get the mirroring status of a list of devices.
2737
2738 @type disks: list of L{objects.Disk}
2739 @param disks: the list of disks which we should query
2740 @rtype: disk
2741 @return: List of tuples, (bool, status), one for each disk; bool denotes
2742 success/failure, status is L{objects.BlockDevStatus} on success, string
2743 otherwise
2744
2745 """
2746 result = []
2747 for disk in disks:
2748 try:
2749 rbd = _RecursiveFindBD(disk)
2750 if rbd is None:
2751 result.append((False, "Can't find device %s" % disk))
2752 continue
2753
2754 status = rbd.CombinedSyncStatus()
2755 except errors.BlockDeviceError, err:
2756 logging.exception("Error while getting disk status")
2757 result.append((False, str(err)))
2758 else:
2759 result.append((True, status))
2760
2761 assert len(disks) == len(result)
2762
2763 return result
2764
2767 """Check if a device is activated.
2768
2769 If so, return information about the real device.
2770
2771 @type disk: L{objects.Disk}
2772 @param disk: the disk object we need to find
2773
2774 @return: None if the device can't be found,
2775 otherwise the device instance
2776
2777 """
2778 children = []
2779 if disk.children:
2780 for chdisk in disk.children:
2781 children.append(_RecursiveFindBD(chdisk))
2782
2783 return bdev.FindDevice(disk, children)
2784
2787 """Opens the underlying block device of a disk.
2788
2789 @type disk: L{objects.Disk}
2790 @param disk: the disk object we want to open
2791
2792 """
2793 real_disk = _RecursiveFindBD(disk)
2794 if real_disk is None:
2795 _Fail("Block device '%s' is not set up", disk)
2796
2797 real_disk.Open()
2798
2799 return real_disk
2800
2803 """Check if a device is activated.
2804
2805 If it is, return information about the real device.
2806
2807 @type disk: L{objects.Disk}
2808 @param disk: the disk to find
2809 @rtype: None or objects.BlockDevStatus
2810 @return: None if the disk cannot be found, otherwise a the current
2811 information
2812
2813 """
2814 try:
2815 rbd = _RecursiveFindBD(disk)
2816 except errors.BlockDeviceError, err:
2817 _Fail("Failed to find device: %s", err, exc=True)
2818
2819 if rbd is None:
2820 return None
2821
2822 return rbd.GetSyncStatus()
2823
2826 """Computes the size of the given disks.
2827
2828 If a disk is not found, returns None instead.
2829
2830 @type disks: list of L{objects.Disk}
2831 @param disks: the list of disk to compute the size for
2832 @rtype: list
2833 @return: list with elements None if the disk cannot be found,
2834 otherwise the pair (size, spindles), where spindles is None if the
2835 device doesn't support that
2836
2837 """
2838 result = []
2839 for cf in disks:
2840 try:
2841 rbd = _RecursiveFindBD(cf)
2842 except errors.BlockDeviceError:
2843 result.append(None)
2844 continue
2845 if rbd is None:
2846 result.append(None)
2847 else:
2848 result.append(rbd.GetActualDimensions())
2849 return result
2850
2851
2852 -def UploadFile(file_name, data, mode, uid, gid, atime, mtime):
2853 """Write a file to the filesystem.
2854
2855 This allows the master to overwrite(!) a file. It will only perform
2856 the operation if the file belongs to a list of configuration files.
2857
2858 @type file_name: str
2859 @param file_name: the target file name
2860 @type data: str
2861 @param data: the new contents of the file
2862 @type mode: int
2863 @param mode: the mode to give the file (can be None)
2864 @type uid: string
2865 @param uid: the owner of the file
2866 @type gid: string
2867 @param gid: the group of the file
2868 @type atime: float
2869 @param atime: the atime to set on the file (can be None)
2870 @type mtime: float
2871 @param mtime: the mtime to set on the file (can be None)
2872 @rtype: None
2873
2874 """
2875 file_name = vcluster.LocalizeVirtualPath(file_name)
2876
2877 if not os.path.isabs(file_name):
2878 _Fail("Filename passed to UploadFile is not absolute: '%s'", file_name)
2879
2880 if file_name not in _ALLOWED_UPLOAD_FILES:
2881 _Fail("Filename passed to UploadFile not in allowed upload targets: '%s'",
2882 file_name)
2883
2884 raw_data = _Decompress(data)
2885
2886 if not (isinstance(uid, basestring) and isinstance(gid, basestring)):
2887 _Fail("Invalid username/groupname type")
2888
2889 getents = runtime.GetEnts()
2890 uid = getents.LookupUser(uid)
2891 gid = getents.LookupGroup(gid)
2892
2893 utils.SafeWriteFile(file_name, None,
2894 data=raw_data, mode=mode, uid=uid, gid=gid,
2895 atime=atime, mtime=mtime)
2896
2897
2898 -def RunOob(oob_program, command, node, timeout):
2899 """Executes oob_program with given command on given node.
2900
2901 @param oob_program: The path to the executable oob_program
2902 @param command: The command to invoke on oob_program
2903 @param node: The node given as an argument to the program
2904 @param timeout: Timeout after which we kill the oob program
2905
2906 @return: stdout
2907 @raise RPCFail: If execution fails for some reason
2908
2909 """
2910 result = utils.RunCmd([oob_program, command, node], timeout=timeout)
2911
2912 if result.failed:
2913 _Fail("'%s' failed with reason '%s'; output: %s", result.cmd,
2914 result.fail_reason, result.output)
2915
2916 return result.stdout
2917
2920 """Compute and return the API version of a given OS.
2921
2922 This function will try to read the API version of the OS residing in
2923 the 'os_dir' directory.
2924
2925 @type os_dir: str
2926 @param os_dir: the directory in which we should look for the OS
2927 @rtype: tuple
2928 @return: tuple (status, data) with status denoting the validity and
2929 data holding either the valid versions or an error message
2930
2931 """
2932 api_file = utils.PathJoin(os_dir, constants.OS_API_FILE)
2933
2934 try:
2935 st = os.stat(api_file)
2936 except EnvironmentError, err:
2937 return False, ("Required file '%s' not found under path %s: %s" %
2938 (constants.OS_API_FILE, os_dir, utils.ErrnoOrStr(err)))
2939
2940 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
2941 return False, ("File '%s' in %s is not a regular file" %
2942 (constants.OS_API_FILE, os_dir))
2943
2944 try:
2945 api_versions = utils.ReadFile(api_file).splitlines()
2946 except EnvironmentError, err:
2947 return False, ("Error while reading the API version file at %s: %s" %
2948 (api_file, utils.ErrnoOrStr(err)))
2949
2950 try:
2951 api_versions = [int(version.strip()) for version in api_versions]
2952 except (TypeError, ValueError), err:
2953 return False, ("API version(s) can't be converted to integer: %s" %
2954 str(err))
2955
2956 return True, api_versions
2957
2960 """Compute the validity for all OSes.
2961
2962 @type top_dirs: list
2963 @param top_dirs: the list of directories in which to
2964 search (if not given defaults to
2965 L{pathutils.OS_SEARCH_PATH})
2966 @rtype: list of L{objects.OS}
2967 @return: a list of tuples (name, path, status, diagnose, variants,
2968 parameters, api_version) for all (potential) OSes under all
2969 search paths, where:
2970 - name is the (potential) OS name
2971 - path is the full path to the OS
2972 - status True/False is the validity of the OS
2973 - diagnose is the error message for an invalid OS, otherwise empty
2974 - variants is a list of supported OS variants, if any
2975 - parameters is a list of (name, help) parameters, if any
2976 - api_version is a list of support OS API versions
2977
2978 """
2979 if top_dirs is None:
2980 top_dirs = pathutils.OS_SEARCH_PATH
2981
2982 result = []
2983 for dir_name in top_dirs:
2984 if os.path.isdir(dir_name):
2985 try:
2986 f_names = utils.ListVisibleFiles(dir_name)
2987 except EnvironmentError, err:
2988 logging.exception("Can't list the OS directory %s: %s", dir_name, err)
2989 break
2990 for name in f_names:
2991 os_path = utils.PathJoin(dir_name, name)
2992 status, os_inst = _TryOSFromDisk(name, base_dir=dir_name)
2993 if status:
2994 diagnose = ""
2995 variants = os_inst.supported_variants
2996 parameters = os_inst.supported_parameters
2997 api_versions = os_inst.api_versions
2998 trusted = False if os_inst.create_script_untrusted else True
2999 else:
3000 diagnose = os_inst
3001 variants = parameters = api_versions = []
3002 trusted = True
3003 result.append((name, os_path, status, diagnose, variants,
3004 parameters, api_versions, trusted))
3005
3006 return result
3007
3010 """Create an OS instance from disk.
3011
3012 This function will return an OS instance if the given name is a
3013 valid OS name.
3014
3015 @type base_dir: string
3016 @keyword base_dir: Base directory containing OS installations.
3017 Defaults to a search in all the OS_SEARCH_PATH dirs.
3018 @rtype: tuple
3019 @return: success and either the OS instance if we find a valid one,
3020 or error message
3021
3022 """
3023 if base_dir is None:
3024 os_dir = utils.FindFile(name, pathutils.OS_SEARCH_PATH, os.path.isdir)
3025 else:
3026 os_dir = utils.FindFile(name, [base_dir], os.path.isdir)
3027
3028 if os_dir is None:
3029 return False, "Directory for OS %s not found in search path" % name
3030
3031 status, api_versions = _OSOndiskAPIVersion(os_dir)
3032 if not status:
3033
3034 return status, api_versions
3035
3036 if not constants.OS_API_VERSIONS.intersection(api_versions):
3037 return False, ("API version mismatch for path '%s': found %s, want %s." %
3038 (os_dir, api_versions, constants.OS_API_VERSIONS))
3039
3040
3041
3042
3043 os_files = dict.fromkeys(constants.OS_SCRIPTS, True)
3044
3045 os_files[constants.OS_SCRIPT_CREATE] = False
3046 os_files[constants.OS_SCRIPT_CREATE_UNTRUSTED] = False
3047
3048 if max(api_versions) >= constants.OS_API_V15:
3049 os_files[constants.OS_VARIANTS_FILE] = False
3050
3051 if max(api_versions) >= constants.OS_API_V20:
3052 os_files[constants.OS_PARAMETERS_FILE] = True
3053 else:
3054 del os_files[constants.OS_SCRIPT_VERIFY]
3055
3056 for (filename, required) in os_files.items():
3057 os_files[filename] = utils.PathJoin(os_dir, filename)
3058
3059 try:
3060 st = os.stat(os_files[filename])
3061 except EnvironmentError, err:
3062 if err.errno == errno.ENOENT and not required:
3063 del os_files[filename]
3064 continue
3065 return False, ("File '%s' under path '%s' is missing (%s)" %
3066 (filename, os_dir, utils.ErrnoOrStr(err)))
3067
3068 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
3069 return False, ("File '%s' under path '%s' is not a regular file" %
3070 (filename, os_dir))
3071
3072 if filename in constants.OS_SCRIPTS:
3073 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
3074 return False, ("File '%s' under path '%s' is not executable" %
3075 (filename, os_dir))
3076
3077 if not constants.OS_SCRIPT_CREATE in os_files and \
3078 not constants.OS_SCRIPT_CREATE_UNTRUSTED in os_files:
3079 return False, ("A create script (trusted or untrusted) under path '%s'"
3080 " must exist" % os_dir)
3081
3082 create_script = os_files.get(constants.OS_SCRIPT_CREATE, None)
3083 create_script_untrusted = os_files.get(constants.OS_SCRIPT_CREATE_UNTRUSTED,
3084 None)
3085
3086 variants = []
3087 if constants.OS_VARIANTS_FILE in os_files:
3088 variants_file = os_files[constants.OS_VARIANTS_FILE]
3089 try:
3090 variants = \
3091 utils.FilterEmptyLinesAndComments(utils.ReadFile(variants_file))
3092 except EnvironmentError, err:
3093
3094 if err.errno != errno.ENOENT:
3095 return False, ("Error while reading the OS variants file at %s: %s" %
3096 (variants_file, utils.ErrnoOrStr(err)))
3097
3098 parameters = []
3099 if constants.OS_PARAMETERS_FILE in os_files:
3100 parameters_file = os_files[constants.OS_PARAMETERS_FILE]
3101 try:
3102 parameters = utils.ReadFile(parameters_file).splitlines()
3103 except EnvironmentError, err:
3104 return False, ("Error while reading the OS parameters file at %s: %s" %
3105 (parameters_file, utils.ErrnoOrStr(err)))
3106 parameters = [v.split(None, 1) for v in parameters]
3107
3108 os_obj = objects.OS(name=name, path=os_dir,
3109 create_script=create_script,
3110 create_script_untrusted=create_script_untrusted,
3111 export_script=os_files[constants.OS_SCRIPT_EXPORT],
3112 import_script=os_files[constants.OS_SCRIPT_IMPORT],
3113 rename_script=os_files[constants.OS_SCRIPT_RENAME],
3114 verify_script=os_files.get(constants.OS_SCRIPT_VERIFY,
3115 None),
3116 supported_variants=variants,
3117 supported_parameters=parameters,
3118 api_versions=api_versions)
3119 return True, os_obj
3120
3123 """Create an OS instance from disk.
3124
3125 This function will return an OS instance if the given name is a
3126 valid OS name. Otherwise, it will raise an appropriate
3127 L{RPCFail} exception, detailing why this is not a valid OS.
3128
3129 This is just a wrapper over L{_TryOSFromDisk}, which doesn't raise
3130 an exception but returns true/false status data.
3131
3132 @type base_dir: string
3133 @keyword base_dir: Base directory containing OS installations.
3134 Defaults to a search in all the OS_SEARCH_PATH dirs.
3135 @rtype: L{objects.OS}
3136 @return: the OS instance if we find a valid one
3137 @raise RPCFail: if we don't find a valid OS
3138
3139 """
3140 name_only = objects.OS.GetName(name)
3141 status, payload = _TryOSFromDisk(name_only, base_dir)
3142
3143 if not status:
3144 _Fail(payload)
3145
3146 return payload
3147
3148
3149 -def OSCoreEnv(os_name, inst_os, os_params, debug=0):
3150 """Calculate the basic environment for an os script.
3151
3152 @type os_name: str
3153 @param os_name: full operating system name (including variant)
3154 @type inst_os: L{objects.OS}
3155 @param inst_os: operating system for which the environment is being built
3156 @type os_params: dict
3157 @param os_params: the OS parameters
3158 @type debug: integer
3159 @param debug: debug level (0 or 1, for OS Api 10)
3160 @rtype: dict
3161 @return: dict of environment variables
3162 @raise errors.BlockDeviceError: if the block device
3163 cannot be found
3164
3165 """
3166 result = {}
3167 api_version = \
3168 max(constants.OS_API_VERSIONS.intersection(inst_os.api_versions))
3169 result["OS_API_VERSION"] = "%d" % api_version
3170 result["OS_NAME"] = inst_os.name
3171 result["DEBUG_LEVEL"] = "%d" % debug
3172
3173
3174 if api_version >= constants.OS_API_V15 and inst_os.supported_variants:
3175 variant = objects.OS.GetVariant(os_name)
3176 if not variant:
3177 variant = inst_os.supported_variants[0]
3178 else:
3179 variant = ""
3180 result["OS_VARIANT"] = variant
3181
3182
3183 for pname, pvalue in os_params.items():
3184 result["OSP_%s" % pname.upper().replace("-", "_")] = pvalue
3185
3186
3187
3188
3189 result["PATH"] = constants.HOOKS_PATH
3190
3191 return result
3192
3195 """Calculate the environment for an os script.
3196
3197 @type instance: L{objects.Instance}
3198 @param instance: target instance for the os script run
3199 @type inst_os: L{objects.OS}
3200 @param inst_os: operating system for which the environment is being built
3201 @type debug: integer
3202 @param debug: debug level (0 or 1, for OS Api 10)
3203 @rtype: dict
3204 @return: dict of environment variables
3205 @raise errors.BlockDeviceError: if the block device
3206 cannot be found
3207
3208 """
3209 result = OSCoreEnv(instance.os, inst_os, objects.FillDict(instance.osparams,
3210 instance.osparams_private.Unprivate()), debug=debug)
3211
3212 for attr in ["name", "os", "uuid", "ctime", "mtime", "primary_node"]:
3213 result["INSTANCE_%s" % attr.upper()] = str(getattr(instance, attr))
3214
3215 result["HYPERVISOR"] = instance.hypervisor
3216 result["DISK_COUNT"] = "%d" % len(instance.disks_info)
3217 result["NIC_COUNT"] = "%d" % len(instance.nics)
3218 result["INSTANCE_SECONDARY_NODES"] = \
3219 ("%s" % " ".join(instance.secondary_nodes))
3220
3221
3222 for idx, disk in enumerate(instance.disks_info):
3223 real_disk = _OpenRealBD(disk)
3224 result["DISK_%d_PATH" % idx] = real_disk.dev_path
3225 result["DISK_%d_ACCESS" % idx] = disk.mode
3226 result["DISK_%d_UUID" % idx] = disk.uuid
3227 if disk.name:
3228 result["DISK_%d_NAME" % idx] = disk.name
3229 if constants.HV_DISK_TYPE in instance.hvparams:
3230 result["DISK_%d_FRONTEND_TYPE" % idx] = \
3231 instance.hvparams[constants.HV_DISK_TYPE]
3232 if disk.dev_type in constants.DTS_BLOCK:
3233 result["DISK_%d_BACKEND_TYPE" % idx] = "block"
3234 elif disk.dev_type in constants.DTS_FILEBASED:
3235 result["DISK_%d_BACKEND_TYPE" % idx] = \
3236 "file:%s" % disk.logical_id[0]
3237
3238
3239 for idx, nic in enumerate(instance.nics):
3240 result["NIC_%d_MAC" % idx] = nic.mac
3241 result["NIC_%d_UUID" % idx] = nic.uuid
3242 if nic.name:
3243 result["NIC_%d_NAME" % idx] = nic.name
3244 if nic.ip:
3245 result["NIC_%d_IP" % idx] = nic.ip
3246 result["NIC_%d_MODE" % idx] = nic.nicparams[constants.NIC_MODE]
3247 if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
3248 result["NIC_%d_BRIDGE" % idx] = nic.nicparams[constants.NIC_LINK]
3249 if nic.nicparams[constants.NIC_LINK]:
3250 result["NIC_%d_LINK" % idx] = nic.nicparams[constants.NIC_LINK]
3251 if nic.netinfo:
3252 nobj = objects.Network.FromDict(nic.netinfo)
3253 result.update(nobj.HooksDict("NIC_%d_" % idx))
3254 if constants.HV_NIC_TYPE in instance.hvparams:
3255 result["NIC_%d_FRONTEND_TYPE" % idx] = \
3256 instance.hvparams[constants.HV_NIC_TYPE]
3257
3258
3259 for source, kind in [(instance.beparams, "BE"), (instance.hvparams, "HV")]:
3260 for key, value in source.items():
3261 result["INSTANCE_%s_%s" % (kind, key)] = str(value)
3262
3263 return result
3264
3267 """Compute the validity for all ExtStorage Providers.
3268
3269 @type top_dirs: list
3270 @param top_dirs: the list of directories in which to
3271 search (if not given defaults to
3272 L{pathutils.ES_SEARCH_PATH})
3273 @rtype: list of L{objects.ExtStorage}
3274 @return: a list of tuples (name, path, status, diagnose, parameters)
3275 for all (potential) ExtStorage Providers under all
3276 search paths, where:
3277 - name is the (potential) ExtStorage Provider
3278 - path is the full path to the ExtStorage Provider
3279 - status True/False is the validity of the ExtStorage Provider
3280 - diagnose is the error message for an invalid ExtStorage Provider,
3281 otherwise empty
3282 - parameters is a list of (name, help) parameters, if any
3283
3284 """
3285 if top_dirs is None:
3286 top_dirs = pathutils.ES_SEARCH_PATH
3287
3288 result = []
3289 for dir_name in top_dirs:
3290 if os.path.isdir(dir_name):
3291 try:
3292 f_names = utils.ListVisibleFiles(dir_name)
3293 except EnvironmentError, err:
3294 logging.exception("Can't list the ExtStorage directory %s: %s",
3295 dir_name, err)
3296 break
3297 for name in f_names:
3298 es_path = utils.PathJoin(dir_name, name)
3299 status, es_inst = bdev.ExtStorageFromDisk(name, base_dir=dir_name)
3300 if status:
3301 diagnose = ""
3302 parameters = es_inst.supported_parameters
3303 else:
3304 diagnose = es_inst
3305 parameters = []
3306 result.append((name, es_path, status, diagnose, parameters))
3307
3308 return result
3309
3310
3311 -def BlockdevGrow(disk, amount, dryrun, backingstore, excl_stor):
3312 """Grow a stack of block devices.
3313
3314 This function is called recursively, with the childrens being the
3315 first ones to resize.
3316
3317 @type disk: L{objects.Disk}
3318 @param disk: the disk to be grown
3319 @type amount: integer
3320 @param amount: the amount (in mebibytes) to grow with
3321 @type dryrun: boolean
3322 @param dryrun: whether to execute the operation in simulation mode
3323 only, without actually increasing the size
3324 @param backingstore: whether to execute the operation on backing storage
3325 only, or on "logical" storage only; e.g. DRBD is logical storage,
3326 whereas LVM, file, RBD are backing storage
3327 @rtype: (status, result)
3328 @type excl_stor: boolean
3329 @param excl_stor: Whether exclusive_storage is active
3330 @return: a tuple with the status of the operation (True/False), and
3331 the errors message if status is False
3332
3333 """
3334 r_dev = _RecursiveFindBD(disk)
3335 if r_dev is None:
3336 _Fail("Cannot find block device %s", disk)
3337
3338 try:
3339 r_dev.Grow(amount, dryrun, backingstore, excl_stor)
3340 except errors.BlockDeviceError, err:
3341 _Fail("Failed to grow block device: %s", err, exc=True)
3342
3345 """Create a snapshot copy of a block device.
3346
3347 This function is called recursively, and the snapshot is actually created
3348 just for the leaf lvm backend device.
3349
3350 @type disk: L{objects.Disk}
3351 @param disk: the disk to be snapshotted
3352 @rtype: string
3353 @return: snapshot disk ID as (vg, lv)
3354
3355 """
3356 if disk.dev_type == constants.DT_DRBD8:
3357 if not disk.children:
3358 _Fail("DRBD device '%s' without backing storage cannot be snapshotted",
3359 disk.unique_id)
3360 return BlockdevSnapshot(disk.children[0])
3361 elif disk.dev_type == constants.DT_PLAIN:
3362 r_dev = _RecursiveFindBD(disk)
3363 if r_dev is not None:
3364
3365
3366 return r_dev.Snapshot(disk.size)
3367 else:
3368 _Fail("Cannot find block device %s", disk)
3369 else:
3370 _Fail("Cannot snapshot non-lvm block device '%s' of type '%s'",
3371 disk.logical_id, disk.dev_type)
3372
3375 """Sets 'metadata' information on block devices.
3376
3377 This function sets 'info' metadata on block devices. Initial
3378 information is set at device creation; this function should be used
3379 for example after renames.
3380
3381 @type disk: L{objects.Disk}
3382 @param disk: the disk to be grown
3383 @type info: string
3384 @param info: new 'info' metadata
3385 @rtype: (status, result)
3386 @return: a tuple with the status of the operation (True/False), and
3387 the errors message if status is False
3388
3389 """
3390 r_dev = _RecursiveFindBD(disk)
3391 if r_dev is None:
3392 _Fail("Cannot find block device %s", disk)
3393
3394 try:
3395 r_dev.SetInfo(info)
3396 except errors.BlockDeviceError, err:
3397 _Fail("Failed to set information on block device: %s", err, exc=True)
3398
3401 """Write out the export configuration information.
3402
3403 @type instance: L{objects.Instance}
3404 @param instance: the instance which we export, used for
3405 saving configuration
3406 @type snap_disks: list of L{objects.Disk}
3407 @param snap_disks: list of snapshot block devices, which
3408 will be used to get the actual name of the dump file
3409
3410 @rtype: None
3411
3412 """
3413 destdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name + ".new")
3414 finaldestdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name)
3415
3416 config = objects.SerializableConfigParser()
3417
3418 config.add_section(constants.INISECT_EXP)
3419 config.set(constants.INISECT_EXP, "version", "0")
3420 config.set(constants.INISECT_EXP, "timestamp", "%d" % int(time.time()))
3421 config.set(constants.INISECT_EXP, "source", instance.primary_node)
3422 config.set(constants.INISECT_EXP, "os", instance.os)
3423 config.set(constants.INISECT_EXP, "compression", "none")
3424
3425 config.add_section(constants.INISECT_INS)
3426 config.set(constants.INISECT_INS, "name", instance.name)
3427 config.set(constants.INISECT_INS, "maxmem", "%d" %
3428 instance.beparams[constants.BE_MAXMEM])
3429 config.set(constants.INISECT_INS, "minmem", "%d" %
3430 instance.beparams[constants.BE_MINMEM])
3431
3432 config.set(constants.INISECT_INS, "memory", "%d" %
3433 instance.beparams[constants.BE_MAXMEM])
3434 config.set(constants.INISECT_INS, "vcpus", "%d" %
3435 instance.beparams[constants.BE_VCPUS])
3436 config.set(constants.INISECT_INS, "disk_template", instance.disk_template)
3437 config.set(constants.INISECT_INS, "hypervisor", instance.hypervisor)
3438 config.set(constants.INISECT_INS, "tags", " ".join(instance.GetTags()))
3439
3440 nic_total = 0
3441 for nic_count, nic in enumerate(instance.nics):
3442 nic_total += 1
3443 config.set(constants.INISECT_INS, "nic%d_mac" %
3444 nic_count, "%s" % nic.mac)
3445 config.set(constants.INISECT_INS, "nic%d_ip" % nic_count, "%s" % nic.ip)
3446 config.set(constants.INISECT_INS, "nic%d_network" % nic_count,
3447 "%s" % nic.network)
3448 config.set(constants.INISECT_INS, "nic%d_name" % nic_count,
3449 "%s" % nic.name)
3450 for param in constants.NICS_PARAMETER_TYPES:
3451 config.set(constants.INISECT_INS, "nic%d_%s" % (nic_count, param),
3452 "%s" % nic.nicparams.get(param, None))
3453
3454 config.set(constants.INISECT_INS, "nic_count", "%d" % nic_total)
3455
3456 disk_total = 0
3457 for disk_count, disk in enumerate(snap_disks):
3458 if disk:
3459 disk_total += 1
3460 config.set(constants.INISECT_INS, "disk%d_ivname" % disk_count,
3461 ("%s" % disk.iv_name))
3462 config.set(constants.INISECT_INS, "disk%d_dump" % disk_count,
3463 ("%s" % disk.logical_id[1]))
3464 config.set(constants.INISECT_INS, "disk%d_size" % disk_count,
3465 ("%d" % disk.size))
3466 config.set(constants.INISECT_INS, "disk%d_name" % disk_count,
3467 "%s" % disk.name)
3468
3469 config.set(constants.INISECT_INS, "disk_count", "%d" % disk_total)
3470
3471
3472
3473 config.add_section(constants.INISECT_HYP)
3474 for name, value in instance.hvparams.items():
3475 if name not in constants.HVC_GLOBALS:
3476 config.set(constants.INISECT_HYP, name, str(value))
3477
3478 config.add_section(constants.INISECT_BEP)
3479 for name, value in instance.beparams.items():
3480 config.set(constants.INISECT_BEP, name, str(value))
3481
3482 config.add_section(constants.INISECT_OSP)
3483 for name, value in instance.osparams.items():
3484 config.set(constants.INISECT_OSP, name, str(value))
3485
3486 config.add_section(constants.INISECT_OSP_PRIVATE)
3487 for name, value in instance.osparams_private.items():
3488 config.set(constants.INISECT_OSP_PRIVATE, name, str(value.Get()))
3489
3490 utils.WriteFile(utils.PathJoin(destdir, constants.EXPORT_CONF_FILE),
3491 data=config.Dumps())
3492 shutil.rmtree(finaldestdir, ignore_errors=True)
3493 shutil.move(destdir, finaldestdir)
3494
3517
3520 """Return a list of exports currently available on this machine.
3521
3522 @rtype: list
3523 @return: list of the exports
3524
3525 """
3526 if os.path.isdir(pathutils.EXPORT_DIR):
3527 return sorted(utils.ListVisibleFiles(pathutils.EXPORT_DIR))
3528 else:
3529 _Fail("No exports directory")
3530
3533 """Remove an existing export from the node.
3534
3535 @type export: str
3536 @param export: the name of the export to remove
3537 @rtype: None
3538
3539 """
3540 target = utils.PathJoin(pathutils.EXPORT_DIR, export)
3541
3542 try:
3543 shutil.rmtree(target)
3544 except EnvironmentError, err:
3545 _Fail("Error while removing the export: %s", err, exc=True)
3546
3549 """Rename a list of block devices.
3550
3551 @type devlist: list of tuples
3552 @param devlist: list of tuples of the form (disk, new_unique_id); disk is
3553 an L{objects.Disk} object describing the current disk, and new
3554 unique_id is the name we rename it to
3555 @rtype: boolean
3556 @return: True if all renames succeeded, False otherwise
3557
3558 """
3559 msgs = []
3560 result = True
3561 for disk, unique_id in devlist:
3562 dev = _RecursiveFindBD(disk)
3563 if dev is None:
3564 msgs.append("Can't find device %s in rename" % str(disk))
3565 result = False
3566 continue
3567 try:
3568 old_rpath = dev.dev_path
3569 dev.Rename(unique_id)
3570 new_rpath = dev.dev_path
3571 if old_rpath != new_rpath:
3572 DevCacheManager.RemoveCache(old_rpath)
3573
3574
3575
3576
3577
3578 except errors.BlockDeviceError, err:
3579 msgs.append("Can't rename device '%s' to '%s': %s" %
3580 (dev, unique_id, err))
3581 logging.exception("Can't rename device '%s' to '%s'", dev, unique_id)
3582 result = False
3583 if not result:
3584 _Fail("; ".join(msgs))
3585
3603
3606 """Create file storage directory.
3607
3608 @type file_storage_dir: str
3609 @param file_storage_dir: directory to create
3610
3611 @rtype: tuple
3612 @return: tuple with first element a boolean indicating wheter dir
3613 creation was successful or not
3614
3615 """
3616 file_storage_dir = _TransformFileStorageDir(file_storage_dir)
3617 if os.path.exists(file_storage_dir):
3618 if not os.path.isdir(file_storage_dir):
3619 _Fail("Specified storage dir '%s' is not a directory",
3620 file_storage_dir)
3621 else:
3622 try:
3623 os.makedirs(file_storage_dir, 0750)
3624 except OSError, err:
3625 _Fail("Cannot create file storage directory '%s': %s",
3626 file_storage_dir, err, exc=True)
3627
3630 """Remove file storage directory.
3631
3632 Remove it only if it's empty. If not log an error and return.
3633
3634 @type file_storage_dir: str
3635 @param file_storage_dir: the directory we should cleanup
3636 @rtype: tuple (success,)
3637 @return: tuple of one element, C{success}, denoting
3638 whether the operation was successful
3639
3640 """
3641 file_storage_dir = _TransformFileStorageDir(file_storage_dir)
3642 if os.path.exists(file_storage_dir):
3643 if not os.path.isdir(file_storage_dir):
3644 _Fail("Specified Storage directory '%s' is not a directory",
3645 file_storage_dir)
3646
3647 try:
3648 os.rmdir(file_storage_dir)
3649 except OSError, err:
3650 _Fail("Cannot remove file storage directory '%s': %s",
3651 file_storage_dir, err)
3652
3655 """Rename the file storage directory.
3656
3657 @type old_file_storage_dir: str
3658 @param old_file_storage_dir: the current path
3659 @type new_file_storage_dir: str
3660 @param new_file_storage_dir: the name we should rename to
3661 @rtype: tuple (success,)
3662 @return: tuple of one element, C{success}, denoting
3663 whether the operation was successful
3664
3665 """
3666 old_file_storage_dir = _TransformFileStorageDir(old_file_storage_dir)
3667 new_file_storage_dir = _TransformFileStorageDir(new_file_storage_dir)
3668 if not os.path.exists(new_file_storage_dir):
3669 if os.path.isdir(old_file_storage_dir):
3670 try:
3671 os.rename(old_file_storage_dir, new_file_storage_dir)
3672 except OSError, err:
3673 _Fail("Cannot rename '%s' to '%s': %s",
3674 old_file_storage_dir, new_file_storage_dir, err)
3675 else:
3676 _Fail("Specified storage dir '%s' is not a directory",
3677 old_file_storage_dir)
3678 else:
3679 if os.path.exists(old_file_storage_dir):
3680 _Fail("Cannot rename '%s' to '%s': both locations exist",
3681 old_file_storage_dir, new_file_storage_dir)
3682
3685 """Checks whether the given filename is in the queue directory.
3686
3687 @type file_name: str
3688 @param file_name: the file name we should check
3689 @rtype: None
3690 @raises RPCFail: if the file is not valid
3691
3692 """
3693 if not utils.IsBelowDir(pathutils.QUEUE_DIR, file_name):
3694 _Fail("Passed job queue file '%s' does not belong to"
3695 " the queue directory '%s'", file_name, pathutils.QUEUE_DIR)
3696
3699 """Updates a file in the queue directory.
3700
3701 This is just a wrapper over L{utils.io.WriteFile}, with proper
3702 checking.
3703
3704 @type file_name: str
3705 @param file_name: the job file name
3706 @type content: str
3707 @param content: the new job contents
3708 @rtype: boolean
3709 @return: the success of the operation
3710
3711 """
3712 file_name = vcluster.LocalizeVirtualPath(file_name)
3713
3714 _EnsureJobQueueFile(file_name)
3715 getents = runtime.GetEnts()
3716
3717
3718 utils.WriteFile(file_name, data=_Decompress(content), uid=getents.masterd_uid,
3719 gid=getents.daemons_gid, mode=constants.JOB_QUEUE_FILES_PERMS)
3720
3723 """Renames a job queue file.
3724
3725 This is just a wrapper over os.rename with proper checking.
3726
3727 @type old: str
3728 @param old: the old (actual) file name
3729 @type new: str
3730 @param new: the desired file name
3731 @rtype: tuple
3732 @return: the success of the operation and payload
3733
3734 """
3735 old = vcluster.LocalizeVirtualPath(old)
3736 new = vcluster.LocalizeVirtualPath(new)
3737
3738 _EnsureJobQueueFile(old)
3739 _EnsureJobQueueFile(new)
3740
3741 getents = runtime.GetEnts()
3742
3743 utils.RenameFile(old, new, mkdir=True, mkdir_mode=0750,
3744 dir_uid=getents.masterd_uid, dir_gid=getents.daemons_gid)
3745
3748 """Closes the given block devices.
3749
3750 This means they will be switched to secondary mode (in case of
3751 DRBD).
3752
3753 @param instance_name: if the argument is not empty, the symlinks
3754 of this instance will be removed
3755 @type disks: list of L{objects.Disk}
3756 @param disks: the list of disks to be closed
3757 @rtype: tuple (success, message)
3758 @return: a tuple of success and message, where success
3759 indicates the succes of the operation, and message
3760 which will contain the error details in case we
3761 failed
3762
3763 """
3764 bdevs = []
3765 for cf in disks:
3766 rd = _RecursiveFindBD(cf)
3767 if rd is None:
3768 _Fail("Can't find device %s", cf)
3769 bdevs.append(rd)
3770
3771 msg = []
3772 for rd in bdevs:
3773 try:
3774 rd.Close()
3775 except errors.BlockDeviceError, err:
3776 msg.append(str(err))
3777 if msg:
3778 _Fail("Can't make devices secondary: %s", ",".join(msg))
3779 else:
3780 if instance_name:
3781 _RemoveBlockDevLinks(instance_name, disks)
3782
3785 """Validates the given hypervisor parameters.
3786
3787 @type hvname: string
3788 @param hvname: the hypervisor name
3789 @type hvparams: dict
3790 @param hvparams: the hypervisor parameters to be validated
3791 @rtype: None
3792
3793 """
3794 try:
3795 hv_type = hypervisor.GetHypervisor(hvname)
3796 hv_type.ValidateParameters(hvparams)
3797 except errors.HypervisorError, err:
3798 _Fail(str(err), log=False)
3799
3802 """Check whether a list of parameters is supported by the OS.
3803
3804 @type os_obj: L{objects.OS}
3805 @param os_obj: OS object to check
3806 @type parameters: list
3807 @param parameters: the list of parameters to check
3808
3809 """
3810 supported = [v[0] for v in os_obj.supported_parameters]
3811 delta = frozenset(parameters).difference(supported)
3812 if delta:
3813 _Fail("The following parameters are not supported"
3814 " by the OS %s: %s" % (os_obj.name, utils.CommaJoin(delta)))
3815
3818 """Check whether an OS name conforms to the os variants specification.
3819
3820 @type os_obj: L{objects.OS}
3821 @param os_obj: OS object to check
3822
3823 @type name: string
3824 @param name: OS name passed by the user, to check for validity
3825
3826 @rtype: NoneType
3827 @return: None
3828 @raise RPCFail: if OS variant is not valid
3829
3830 """
3831 variant = objects.OS.GetVariant(name)
3832
3833 if not os_obj.supported_variants:
3834 if variant:
3835 _Fail("OS '%s' does not support variants ('%s' passed)" %
3836 (os_obj.name, variant))
3837 else:
3838 return
3839
3840 if not variant:
3841 _Fail("OS name '%s' must include a variant" % name)
3842
3843 if variant not in os_obj.supported_variants:
3844 _Fail("OS '%s' does not support variant '%s'" % (os_obj.name, variant))
3845
3846
3847 -def ValidateOS(required, osname, checks, osparams, force_variant):
3848 """Validate the given OS parameters.
3849
3850 @type required: boolean
3851 @param required: whether absence of the OS should translate into
3852 failure or not
3853 @type osname: string
3854 @param osname: the OS to be validated
3855 @type checks: list
3856 @param checks: list of the checks to run (currently only 'parameters')
3857 @type osparams: dict
3858 @param osparams: dictionary with OS parameters, some of which may be
3859 private.
3860 @rtype: boolean
3861 @return: True if the validation passed, or False if the OS was not
3862 found and L{required} was false
3863
3864 """
3865 if not constants.OS_VALIDATE_CALLS.issuperset(checks):
3866 _Fail("Unknown checks required for OS %s: %s", osname,
3867 set(checks).difference(constants.OS_VALIDATE_CALLS))
3868
3869 name_only = objects.OS.GetName(osname)
3870 status, tbv = _TryOSFromDisk(name_only, None)
3871
3872 if not status:
3873 if required:
3874 _Fail(tbv)
3875 else:
3876 return False
3877
3878 if not force_variant:
3879 _CheckOSVariant(tbv, osname)
3880
3881 if max(tbv.api_versions) < constants.OS_API_V20:
3882 return True
3883
3884 if constants.OS_VALIDATE_PARAMETERS in checks:
3885 _CheckOSPList(tbv, osparams.keys())
3886
3887 validate_env = OSCoreEnv(osname, tbv, osparams)
3888 result = utils.RunCmd([tbv.verify_script] + checks, env=validate_env,
3889 cwd=tbv.path, reset_env=True)
3890 if result.failed:
3891 logging.error("os validate command '%s' returned error: %s output: %s",
3892 result.cmd, result.fail_reason, result.output)
3893 _Fail("OS validation script failed (%s), output: %s",
3894 result.fail_reason, result.output, log=False)
3895
3896 return True
3897
3898
3899 -def ExportOS(instance, override_env):
3900 """Creates a GZIPed tarball with an OS definition and environment.
3901
3902 The archive contains a file with the environment variables needed by
3903 the OS scripts.
3904
3905 @type instance: L{objects.Instance}
3906 @param instance: instance for which the OS definition is exported
3907
3908 @type override_env: dict of string to string
3909 @param override_env: if supplied, it overrides the environment on a
3910 key-by-key basis that is part of the archive
3911
3912 @rtype: string
3913 @return: filepath of the archive
3914
3915 """
3916 assert instance
3917 assert instance.os
3918
3919 temp_dir = tempfile.mkdtemp()
3920 inst_os = OSFromDisk(instance.os)
3921
3922 result = utils.RunCmd(["ln", "-s", inst_os.path,
3923 utils.PathJoin(temp_dir, "os")])
3924 if result.failed:
3925 _Fail("Failed to copy OS package '%s' to '%s': %s, output '%s'",
3926 inst_os, temp_dir, result.fail_reason, result.output)
3927
3928 env = OSEnvironment(instance, inst_os)
3929 env.update(override_env)
3930
3931 with open(utils.PathJoin(temp_dir, "environment"), "w") as f:
3932 for var in env:
3933 f.write(var + "=" + env[var] + "\n")
3934
3935 (fd, os_package) = tempfile.mkstemp(suffix=".tgz")
3936 os.close(fd)
3937
3938 result = utils.RunCmd(["tar", "--dereference", "-czv",
3939 "-f", os_package,
3940 "-C", temp_dir,
3941 "."])
3942 if result.failed:
3943 _Fail("Failed to create OS archive '%s': %s, output '%s'",
3944 os_package, result.fail_reason, result.output)
3945
3946 result = utils.RunCmd(["rm", "-rf", temp_dir])
3947 if result.failed:
3948 _Fail("Failed to remove copy of OS package '%s' in '%s': %s, output '%s'",
3949 inst_os, temp_dir, result.fail_reason, result.output)
3950
3951 return os_package
3952
3975
3984
3987 """Creates a new X509 certificate for SSL/TLS.
3988
3989 @type validity: int
3990 @param validity: Validity in seconds
3991 @rtype: tuple; (string, string)
3992 @return: Certificate name and public part
3993
3994 """
3995 serial_no = int(time.time())
3996 (key_pem, cert_pem) = \
3997 utils.GenerateSelfSignedX509Cert(netutils.Hostname.GetSysName(),
3998 min(validity, _MAX_SSL_CERT_VALIDITY),
3999 serial_no)
4000
4001 cert_dir = tempfile.mkdtemp(dir=cryptodir,
4002 prefix="x509-%s-" % utils.TimestampForFilename())
4003 try:
4004 name = os.path.basename(cert_dir)
4005 assert len(name) > 5
4006
4007 (_, key_file, cert_file) = _GetX509Filenames(cryptodir, name)
4008
4009 utils.WriteFile(key_file, mode=0400, data=key_pem)
4010 utils.WriteFile(cert_file, mode=0400, data=cert_pem)
4011
4012
4013 return (name, cert_pem)
4014 except Exception:
4015 shutil.rmtree(cert_dir, ignore_errors=True)
4016 raise
4017
4020 """Removes a X509 certificate.
4021
4022 @type name: string
4023 @param name: Certificate name
4024
4025 """
4026 (cert_dir, key_file, cert_file) = _GetX509Filenames(cryptodir, name)
4027
4028 utils.RemoveFile(key_file)
4029 utils.RemoveFile(cert_file)
4030
4031 try:
4032 os.rmdir(cert_dir)
4033 except EnvironmentError, err:
4034 _Fail("Cannot remove certificate directory '%s': %s",
4035 cert_dir, err)
4036
4039 """Returns the command for the requested input/output.
4040
4041 @type instance: L{objects.Instance}
4042 @param instance: The instance object
4043 @param mode: Import/export mode
4044 @param ieio: Input/output type
4045 @param ieargs: Input/output arguments
4046
4047 """
4048 assert mode in (constants.IEM_IMPORT, constants.IEM_EXPORT)
4049
4050 env = None
4051 prefix = None
4052 suffix = None
4053 exp_size = None
4054
4055 if ieio == constants.IEIO_FILE:
4056 (filename, ) = ieargs
4057
4058 if not utils.IsNormAbsPath(filename):
4059 _Fail("Path '%s' is not normalized or absolute", filename)
4060
4061 real_filename = os.path.realpath(filename)
4062 directory = os.path.dirname(real_filename)
4063
4064 if not utils.IsBelowDir(pathutils.EXPORT_DIR, real_filename):
4065 _Fail("File '%s' is not under exports directory '%s': %s",
4066 filename, pathutils.EXPORT_DIR, real_filename)
4067
4068
4069 utils.Makedirs(directory, mode=0750)
4070
4071 quoted_filename = utils.ShellQuote(filename)
4072
4073 if mode == constants.IEM_IMPORT:
4074 suffix = "> %s" % quoted_filename
4075 elif mode == constants.IEM_EXPORT:
4076 suffix = "< %s" % quoted_filename
4077
4078
4079 try:
4080 st = os.stat(filename)
4081 except EnvironmentError, err:
4082 logging.error("Can't stat(2) %s: %s", filename, err)
4083 else:
4084 exp_size = utils.BytesToMebibyte(st.st_size)
4085
4086 elif ieio == constants.IEIO_RAW_DISK:
4087 (disk, ) = ieargs
4088
4089 real_disk = _OpenRealBD(disk)
4090
4091 if mode == constants.IEM_IMPORT:
4092
4093
4094 suffix = utils.BuildShellCmd("| dd of=%s conv=nocreat,notrunc bs=%s",
4095 real_disk.dev_path,
4096 str(1024 * 1024))
4097
4098 elif mode == constants.IEM_EXPORT:
4099
4100 prefix = utils.BuildShellCmd("dd if=%s bs=%s count=%s |",
4101 real_disk.dev_path,
4102 str(1024 * 1024),
4103 str(disk.size))
4104 exp_size = disk.size
4105
4106 elif ieio == constants.IEIO_SCRIPT:
4107 (disk, disk_index, ) = ieargs
4108
4109 assert isinstance(disk_index, (int, long))
4110
4111 inst_os = OSFromDisk(instance.os)
4112 env = OSEnvironment(instance, inst_os)
4113
4114 if mode == constants.IEM_IMPORT:
4115 env["IMPORT_DEVICE"] = env["DISK_%d_PATH" % disk_index]
4116 env["IMPORT_INDEX"] = str(disk_index)
4117 script = inst_os.import_script
4118
4119 elif mode == constants.IEM_EXPORT:
4120 real_disk = _OpenRealBD(disk)
4121 env["EXPORT_DEVICE"] = real_disk.dev_path
4122 env["EXPORT_INDEX"] = str(disk_index)
4123 script = inst_os.export_script
4124
4125
4126 script_cmd = utils.BuildShellCmd("( cd %s && %s; )", inst_os.path, script)
4127
4128 if mode == constants.IEM_IMPORT:
4129 suffix = "| %s" % script_cmd
4130
4131 elif mode == constants.IEM_EXPORT:
4132 prefix = "%s |" % script_cmd
4133
4134
4135 exp_size = constants.IE_CUSTOM_SIZE
4136
4137 else:
4138 _Fail("Invalid %s I/O mode %r", mode, ieio)
4139
4140 return (env, prefix, suffix, exp_size)
4141
4144 """Creates status directory for import/export.
4145
4146 """
4147 return tempfile.mkdtemp(dir=pathutils.IMPORT_EXPORT_DIR,
4148 prefix=("%s-%s-" %
4149 (prefix, utils.TimestampForFilename())))
4150
4154 """Starts an import or export daemon.
4155
4156 @param mode: Import/output mode
4157 @type opts: L{objects.ImportExportOptions}
4158 @param opts: Daemon options
4159 @type host: string
4160 @param host: Remote host for export (None for import)
4161 @type port: int
4162 @param port: Remote port for export (None for import)
4163 @type instance: L{objects.Instance}
4164 @param instance: Instance object
4165 @type component: string
4166 @param component: which part of the instance is transferred now,
4167 e.g. 'disk/0'
4168 @param ieio: Input/output type
4169 @param ieioargs: Input/output arguments
4170
4171 """
4172 if mode == constants.IEM_IMPORT:
4173 prefix = "import"
4174
4175 if not (host is None and port is None):
4176 _Fail("Can not specify host or port on import")
4177
4178 elif mode == constants.IEM_EXPORT:
4179 prefix = "export"
4180
4181 if host is None or port is None:
4182 _Fail("Host and port must be specified for an export")
4183
4184 else:
4185 _Fail("Invalid mode %r", mode)
4186
4187 if (opts.key_name is None) ^ (opts.ca_pem is None):
4188 _Fail("Cluster certificate can only be used for both key and CA")
4189
4190 (cmd_env, cmd_prefix, cmd_suffix, exp_size) = \
4191 _GetImportExportIoCommand(instance, mode, ieio, ieioargs)
4192
4193 if opts.key_name is None:
4194
4195 key_path = pathutils.NODED_CERT_FILE
4196 cert_path = pathutils.NODED_CERT_FILE
4197 assert opts.ca_pem is None
4198 else:
4199 (_, key_path, cert_path) = _GetX509Filenames(pathutils.CRYPTO_KEYS_DIR,
4200 opts.key_name)
4201 assert opts.ca_pem is not None
4202
4203 for i in [key_path, cert_path]:
4204 if not os.path.exists(i):
4205 _Fail("File '%s' does not exist" % i)
4206
4207 status_dir = _CreateImportExportStatusDir("%s-%s" % (prefix, component))
4208 try:
4209 status_file = utils.PathJoin(status_dir, _IES_STATUS_FILE)
4210 pid_file = utils.PathJoin(status_dir, _IES_PID_FILE)
4211 ca_file = utils.PathJoin(status_dir, _IES_CA_FILE)
4212
4213 if opts.ca_pem is None:
4214
4215 ca = utils.ReadFile(pathutils.NODED_CERT_FILE)
4216 else:
4217 ca = opts.ca_pem
4218
4219
4220 utils.WriteFile(ca_file, data=ca, mode=0400)
4221
4222 cmd = [
4223 pathutils.IMPORT_EXPORT_DAEMON,
4224 status_file, mode,
4225 "--key=%s" % key_path,
4226 "--cert=%s" % cert_path,
4227 "--ca=%s" % ca_file,
4228 ]
4229
4230 if host:
4231 cmd.append("--host=%s" % host)
4232
4233 if port:
4234 cmd.append("--port=%s" % port)
4235
4236 if opts.ipv6:
4237 cmd.append("--ipv6")
4238 else:
4239 cmd.append("--ipv4")
4240
4241 if opts.compress:
4242 cmd.append("--compress=%s" % opts.compress)
4243
4244 if opts.magic:
4245 cmd.append("--magic=%s" % opts.magic)
4246
4247 if exp_size is not None:
4248 cmd.append("--expected-size=%s" % exp_size)
4249
4250 if cmd_prefix:
4251 cmd.append("--cmd-prefix=%s" % cmd_prefix)
4252
4253 if cmd_suffix:
4254 cmd.append("--cmd-suffix=%s" % cmd_suffix)
4255
4256 if mode == constants.IEM_EXPORT:
4257
4258 cmd.append("--connect-retries=%s" % constants.RIE_CONNECT_RETRIES)
4259 cmd.append("--connect-timeout=%s" % constants.RIE_CONNECT_ATTEMPT_TIMEOUT)
4260 elif opts.connect_timeout is not None:
4261 assert mode == constants.IEM_IMPORT
4262
4263 cmd.append("--connect-timeout=%s" % opts.connect_timeout)
4264
4265 logfile = _InstanceLogName(prefix, instance.os, instance.name, component)
4266
4267
4268
4269 utils.StartDaemon(cmd, env=cmd_env, pidfile=pid_file,
4270 output=logfile)
4271
4272
4273 return os.path.basename(status_dir)
4274
4275 except Exception:
4276 shutil.rmtree(status_dir, ignore_errors=True)
4277 raise
4278
4281 """Returns import/export daemon status.
4282
4283 @type names: sequence
4284 @param names: List of names
4285 @rtype: List of dicts
4286 @return: Returns a list of the state of each named import/export or None if a
4287 status couldn't be read
4288
4289 """
4290 result = []
4291
4292 for name in names:
4293 status_file = utils.PathJoin(pathutils.IMPORT_EXPORT_DIR, name,
4294 _IES_STATUS_FILE)
4295
4296 try:
4297 data = utils.ReadFile(status_file)
4298 except EnvironmentError, err:
4299 if err.errno != errno.ENOENT:
4300 raise
4301 data = None
4302
4303 if not data:
4304 result.append(None)
4305 continue
4306
4307 result.append(serializer.LoadJson(data))
4308
4309 return result
4310
4325
4328 """Cleanup after an import or export.
4329
4330 If the import/export daemon is still running it's killed. Afterwards the
4331 whole status directory is removed.
4332
4333 """
4334 logging.info("Finalizing import/export %s", name)
4335
4336 status_dir = utils.PathJoin(pathutils.IMPORT_EXPORT_DIR, name)
4337
4338 pid = utils.ReadLockedPidFile(utils.PathJoin(status_dir, _IES_PID_FILE))
4339
4340 if pid:
4341 logging.info("Import/export %s is still running with PID %s",
4342 name, pid)
4343 utils.KillProcess(pid, waitpid=False)
4344
4345 shutil.rmtree(status_dir, ignore_errors=True)
4346
4349 """Finds attached L{BlockDev}s for the given disks.
4350
4351 @type disks: list of L{objects.Disk}
4352 @param disks: the disk objects we need to find
4353
4354 @return: list of L{BlockDev} objects or C{None} if a given disk
4355 was not found or was no attached.
4356
4357 """
4358 bdevs = []
4359
4360 for disk in disks:
4361 rd = _RecursiveFindBD(disk)
4362 if rd is None:
4363 _Fail("Can't find device %s", disk)
4364 bdevs.append(rd)
4365 return bdevs
4366
4369 """Disconnects the network on a list of drbd devices.
4370
4371 """
4372 bdevs = _FindDisks(disks)
4373
4374
4375 for rd in bdevs:
4376 try:
4377 rd.DisconnectNet()
4378 except errors.BlockDeviceError, err:
4379 _Fail("Can't change network configuration to standalone mode: %s",
4380 err, exc=True)
4381
4384 """Attaches the network on a list of drbd devices.
4385
4386 """
4387 bdevs = _FindDisks(disks)
4388
4389 if multimaster:
4390 for idx, rd in enumerate(bdevs):
4391 try:
4392 _SymlinkBlockDev(instance_name, rd.dev_path, idx)
4393 except EnvironmentError, err:
4394 _Fail("Can't create symlink: %s", err)
4395
4396
4397 for rd in bdevs:
4398 try:
4399 rd.AttachNet(multimaster)
4400 except errors.BlockDeviceError, err:
4401 _Fail("Can't change network configuration: %s", err)
4402
4403
4404
4405
4406
4407
4408
4409 def _Attach():
4410 all_connected = True
4411
4412 for rd in bdevs:
4413 stats = rd.GetProcStatus()
4414
4415 if multimaster:
4416
4417
4418
4419
4420
4421
4422 all_connected = (all_connected and
4423 stats.is_connected and
4424 stats.is_disk_uptodate and
4425 stats.peer_disk_uptodate)
4426 else:
4427 all_connected = (all_connected and
4428 (stats.is_connected or stats.is_in_resync))
4429
4430 if stats.is_standalone:
4431
4432
4433
4434 try:
4435 rd.AttachNet(multimaster)
4436 except errors.BlockDeviceError, err:
4437 _Fail("Can't change network configuration: %s", err)
4438
4439 if not all_connected:
4440 raise utils.RetryAgain()
4441
4442 try:
4443
4444 utils.Retry(_Attach, (0.1, 1.5, 5.0), 2 * 60)
4445 except utils.RetryTimeout:
4446 _Fail("Timeout in disk reconnecting")
4447
4448 if multimaster:
4449
4450 for rd in bdevs:
4451 try:
4452 rd.Open()
4453 except errors.BlockDeviceError, err:
4454 _Fail("Can't change to primary mode: %s", err)
4455
4458 """Wait until DRBDs have synchronized.
4459
4460 """
4461 def _helper(rd):
4462 stats = rd.GetProcStatus()
4463 if not (stats.is_connected or stats.is_in_resync):
4464 raise utils.RetryAgain()
4465 return stats
4466
4467 bdevs = _FindDisks(disks)
4468
4469 min_resync = 100
4470 alldone = True
4471 for rd in bdevs:
4472 try:
4473
4474 stats = utils.Retry(_helper, 1, 15, args=[rd])
4475 except utils.RetryTimeout:
4476 stats = rd.GetProcStatus()
4477
4478 if not (stats.is_connected or stats.is_in_resync):
4479 _Fail("DRBD device %s is not in sync: stats=%s", rd, stats)
4480 alldone = alldone and (not stats.is_in_resync)
4481 if stats.sync_percent is not None:
4482 min_resync = min(min_resync, stats.sync_percent)
4483
4484 return (alldone, min_resync)
4485
4488 """Checks which of the passed disks needs activation and returns their UUIDs.
4489
4490 """
4491 faulty_disks = []
4492
4493 for disk in disks:
4494 rd = _RecursiveFindBD(disk)
4495 if rd is None:
4496 faulty_disks.append(disk)
4497 continue
4498
4499 stats = rd.GetProcStatus()
4500 if stats.is_standalone or stats.is_diskless:
4501 faulty_disks.append(disk)
4502
4503 return [disk.uuid for disk in faulty_disks]
4504
4514
4517 """Hard-powercycle the node.
4518
4519 Because we need to return first, and schedule the powercycle in the
4520 background, we won't be able to report failures nicely.
4521
4522 """
4523 hyper = hypervisor.GetHypervisor(hypervisor_type)
4524 try:
4525 pid = os.fork()
4526 except OSError:
4527
4528 pid = 0
4529 if pid > 0:
4530 return "Reboot scheduled in 5 seconds"
4531
4532 try:
4533 utils.Mlockall()
4534 except Exception:
4535 pass
4536 time.sleep(5)
4537 hyper.PowercycleNode(hvparams=hvparams)
4538
4541 """Verifies a restricted command name.
4542
4543 @type cmd: string
4544 @param cmd: Command name
4545 @rtype: tuple; (boolean, string or None)
4546 @return: The tuple's first element is the status; if C{False}, the second
4547 element is an error message string, otherwise it's C{None}
4548
4549 """
4550 if not cmd.strip():
4551 return (False, "Missing command name")
4552
4553 if os.path.basename(cmd) != cmd:
4554 return (False, "Invalid command name")
4555
4556 if not constants.EXT_PLUGIN_MASK.match(cmd):
4557 return (False, "Command name contains forbidden characters")
4558
4559 return (True, None)
4560
4563 """Common checks for restricted command file system directories and files.
4564
4565 @type path: string
4566 @param path: Path to check
4567 @param owner: C{None} or tuple containing UID and GID
4568 @rtype: tuple; (boolean, string or C{os.stat} result)
4569 @return: The tuple's first element is the status; if C{False}, the second
4570 element is an error message string, otherwise it's the result of C{os.stat}
4571
4572 """
4573 if owner is None:
4574
4575 owner = (0, 0)
4576
4577 try:
4578 st = os.stat(path)
4579 except EnvironmentError, err:
4580 return (False, "Can't stat(2) '%s': %s" % (path, err))
4581
4582 if stat.S_IMODE(st.st_mode) & (~_RCMD_MAX_MODE):
4583 return (False, "Permissions on '%s' are too permissive" % path)
4584
4585 if (st.st_uid, st.st_gid) != owner:
4586 (owner_uid, owner_gid) = owner
4587 return (False, "'%s' is not owned by %s:%s" % (path, owner_uid, owner_gid))
4588
4589 return (True, st)
4590
4593 """Verifies restricted command directory.
4594
4595 @type path: string
4596 @param path: Path to check
4597 @rtype: tuple; (boolean, string or None)
4598 @return: The tuple's first element is the status; if C{False}, the second
4599 element is an error message string, otherwise it's C{None}
4600
4601 """
4602 (status, value) = _CommonRestrictedCmdCheck(path, _owner)
4603
4604 if not status:
4605 return (False, value)
4606
4607 if not stat.S_ISDIR(value.st_mode):
4608 return (False, "Path '%s' is not a directory" % path)
4609
4610 return (True, None)
4611
4614 """Verifies a whole restricted command and returns its executable filename.
4615
4616 @type path: string
4617 @param path: Directory containing restricted commands
4618 @type cmd: string
4619 @param cmd: Command name
4620 @rtype: tuple; (boolean, string)
4621 @return: The tuple's first element is the status; if C{False}, the second
4622 element is an error message string, otherwise the second element is the
4623 absolute path to the executable
4624
4625 """
4626 executable = utils.PathJoin(path, cmd)
4627
4628 (status, msg) = _CommonRestrictedCmdCheck(executable, _owner)
4629
4630 if not status:
4631 return (False, msg)
4632
4633 if not utils.IsExecutable(executable):
4634 return (False, "access(2) thinks '%s' can't be executed" % executable)
4635
4636 return (True, executable)
4637
4643 """Performs a number of tests on a restricted command.
4644
4645 @type path: string
4646 @param path: Directory containing restricted commands
4647 @type cmd: string
4648 @param cmd: Command name
4649 @return: Same as L{_VerifyRestrictedCmd}
4650
4651 """
4652
4653 (status, msg) = _verify_dir(path)
4654 if status:
4655
4656 (status, msg) = _verify_name(cmd)
4657
4658 if not status:
4659 return (False, msg)
4660
4661
4662 return _verify_cmd(path, cmd)
4663
4673 """Executes a restricted command after performing strict tests.
4674
4675 @type cmd: string
4676 @param cmd: Command name
4677 @rtype: string
4678 @return: Command output
4679 @raise RPCFail: In case of an error
4680
4681 """
4682 logging.info("Preparing to run restricted command '%s'", cmd)
4683
4684 if not _enabled:
4685 _Fail("Restricted commands disabled at configure time")
4686
4687 lock = None
4688 try:
4689 cmdresult = None
4690 try:
4691 lock = utils.FileLock.Open(_lock_file)
4692 lock.Exclusive(blocking=True, timeout=_lock_timeout)
4693
4694 (status, value) = _prepare_fn(_path, cmd)
4695
4696 if status:
4697 cmdresult = _runcmd_fn([value], env={}, reset_env=True,
4698 postfork_fn=lambda _: lock.Unlock())
4699 else:
4700 logging.error(value)
4701 except Exception:
4702
4703 logging.exception("Caught exception")
4704
4705 if cmdresult is None:
4706 logging.info("Sleeping for %0.1f seconds before returning",
4707 _RCMD_INVALID_DELAY)
4708 _sleep_fn(_RCMD_INVALID_DELAY)
4709
4710
4711 _Fail("Executing command '%s' failed" % cmd)
4712 elif cmdresult.failed or cmdresult.fail_reason:
4713 _Fail("Restricted command '%s' failed: %s; output: %s",
4714 cmd, cmdresult.fail_reason, cmdresult.output)
4715 else:
4716 return cmdresult.output
4717 finally:
4718 if lock is not None:
4719
4720 lock.Close()
4721 lock = None
4722
4725 """Creates or removes the watcher pause file.
4726
4727 @type until: None or number
4728 @param until: Unix timestamp saying until when the watcher shouldn't run
4729
4730 """
4731 if until is None:
4732 logging.info("Received request to no longer pause watcher")
4733 utils.RemoveFile(_filename)
4734 else:
4735 logging.info("Received request to pause watcher until %s", until)
4736
4737 if not ht.TNumber(until):
4738 _Fail("Duration must be numeric")
4739
4740 utils.WriteFile(_filename, data="%d\n" % (until, ), mode=0644)
4741
4768
4771 """ Checks if a file exists and returns information related to it.
4772
4773 Currently returned information:
4774 - file size: int, size in bytes
4775
4776 @type file_path: string
4777 @param file_path: Name of file to examine.
4778
4779 @rtype: tuple of bool, dict
4780 @return: Whether the file exists, and a dictionary of information about the
4781 file gathered by os.stat.
4782
4783 """
4784 try:
4785 stat_info = os.stat(file_path)
4786 values_dict = {
4787 constants.STAT_SIZE: stat_info.st_size,
4788 }
4789 return True, values_dict
4790 except IOError:
4791 return False, {}
4792
4795 """Hook runner.
4796
4797 This class is instantiated on the node side (ganeti-noded) and not
4798 on the master side.
4799
4800 """
4801 - def __init__(self, hooks_base_dir=None):
4802 """Constructor for hooks runner.
4803
4804 @type hooks_base_dir: str or None
4805 @param hooks_base_dir: if not None, this overrides the
4806 L{pathutils.HOOKS_BASE_DIR} (useful for unittests)
4807
4808 """
4809 if hooks_base_dir is None:
4810 hooks_base_dir = pathutils.HOOKS_BASE_DIR
4811
4812
4813 self._BASE_DIR = hooks_base_dir
4814
4816 """Check that the hooks will be run only locally and then run them.
4817
4818 """
4819 assert len(node_list) == 1
4820 node = node_list[0]
4821 _, myself = ssconf.GetMasterAndMyself()
4822 assert node == myself
4823
4824 results = self.RunHooks(hpath, phase, env)
4825
4826
4827 return {node: (None, False, results)}
4828
4829 - def RunHooks(self, hpath, phase, env):
4830 """Run the scripts in the hooks directory.
4831
4832 @type hpath: str
4833 @param hpath: the path to the hooks directory which
4834 holds the scripts
4835 @type phase: str
4836 @param phase: either L{constants.HOOKS_PHASE_PRE} or
4837 L{constants.HOOKS_PHASE_POST}
4838 @type env: dict
4839 @param env: dictionary with the environment for the hook
4840 @rtype: list
4841 @return: list of 3-element tuples:
4842 - script path
4843 - script result, either L{constants.HKR_SUCCESS} or
4844 L{constants.HKR_FAIL}
4845 - output of the script
4846
4847 @raise errors.ProgrammerError: for invalid input
4848 parameters
4849
4850 """
4851 if phase == constants.HOOKS_PHASE_PRE:
4852 suffix = "pre"
4853 elif phase == constants.HOOKS_PHASE_POST:
4854 suffix = "post"
4855 else:
4856 _Fail("Unknown hooks phase '%s'", phase)
4857
4858 subdir = "%s-%s.d" % (hpath, suffix)
4859 dir_name = utils.PathJoin(self._BASE_DIR, subdir)
4860
4861 results = []
4862
4863 if not os.path.isdir(dir_name):
4864
4865
4866 return results
4867
4868 runparts_results = utils.RunParts(dir_name, env=env, reset_env=True)
4869
4870 for (relname, relstatus, runresult) in runparts_results:
4871 if relstatus == constants.RUNPARTS_SKIP:
4872 rrval = constants.HKR_SKIP
4873 output = ""
4874 elif relstatus == constants.RUNPARTS_ERR:
4875 rrval = constants.HKR_FAIL
4876 output = "Hook script execution error: %s" % runresult
4877 elif relstatus == constants.RUNPARTS_RUN:
4878 if runresult.failed:
4879 rrval = constants.HKR_FAIL
4880 else:
4881 rrval = constants.HKR_SUCCESS
4882 output = utils.SafeEncode(runresult.output.strip())
4883 results.append(("%s/%s" % (subdir, relname), rrval, output))
4884
4885 return results
4886
4889 """IAllocator runner.
4890
4891 This class is instantiated on the node side (ganeti-noded) and not on
4892 the master side.
4893
4894 """
4895 @staticmethod
4896 - def Run(name, idata, ial_params):
4897 """Run an iallocator script.
4898
4899 @type name: str
4900 @param name: the iallocator script name
4901 @type idata: str
4902 @param idata: the allocator input data
4903 @type ial_params: list
4904 @param ial_params: the iallocator parameters
4905
4906 @rtype: tuple
4907 @return: two element tuple of:
4908 - status
4909 - either error message or stdout of allocator (for success)
4910
4911 """
4912 alloc_script = utils.FindFile(name, constants.IALLOCATOR_SEARCH_PATH,
4913 os.path.isfile)
4914 if alloc_script is None:
4915 _Fail("iallocator module '%s' not found in the search path", name)
4916
4917 fd, fin_name = tempfile.mkstemp(prefix="ganeti-iallocator.")
4918 try:
4919 os.write(fd, idata)
4920 os.close(fd)
4921 result = utils.RunCmd([alloc_script, fin_name] + ial_params)
4922 if result.failed:
4923 _Fail("iallocator module '%s' failed: %s, output '%s'",
4924 name, result.fail_reason, result.output)
4925 finally:
4926 os.unlink(fin_name)
4927
4928 return result.stdout
4929
4932 """Simple class for managing a cache of block device information.
4933
4934 """
4935 _DEV_PREFIX = "/dev/"
4936 _ROOT_DIR = pathutils.BDEV_CACHE_DIR
4937
4938 @classmethod
4940 """Converts a /dev/name path to the cache file name.
4941
4942 This replaces slashes with underscores and strips the /dev
4943 prefix. It then returns the full path to the cache file.
4944
4945 @type dev_path: str
4946 @param dev_path: the C{/dev/} path name
4947 @rtype: str
4948 @return: the converted path name
4949
4950 """
4951 if dev_path.startswith(cls._DEV_PREFIX):
4952 dev_path = dev_path[len(cls._DEV_PREFIX):]
4953 dev_path = dev_path.replace("/", "_")
4954 fpath = utils.PathJoin(cls._ROOT_DIR, "bdev_%s" % dev_path)
4955 return fpath
4956
4957 @classmethod
4958 - def UpdateCache(cls, dev_path, owner, on_primary, iv_name):
4959 """Updates the cache information for a given device.
4960
4961 @type dev_path: str
4962 @param dev_path: the pathname of the device
4963 @type owner: str
4964 @param owner: the owner (instance name) of the device
4965 @type on_primary: bool
4966 @param on_primary: whether this is the primary
4967 node nor not
4968 @type iv_name: str
4969 @param iv_name: the instance-visible name of the
4970 device, as in objects.Disk.iv_name
4971
4972 @rtype: None
4973
4974 """
4975 if dev_path is None:
4976 logging.error("DevCacheManager.UpdateCache got a None dev_path")
4977 return
4978 fpath = cls._ConvertPath(dev_path)
4979 if on_primary:
4980 state = "primary"
4981 else:
4982 state = "secondary"
4983 if iv_name is None:
4984 iv_name = "not_visible"
4985 fdata = "%s %s %s\n" % (str(owner), state, iv_name)
4986 try:
4987 utils.WriteFile(fpath, data=fdata)
4988 except EnvironmentError, err:
4989 logging.exception("Can't update bdev cache for %s: %s", dev_path, err)
4990
4991 @classmethod
4993 """Remove data for a dev_path.
4994
4995 This is just a wrapper over L{utils.io.RemoveFile} with a converted
4996 path name and logging.
4997
4998 @type dev_path: str
4999 @param dev_path: the pathname of the device
5000
5001 @rtype: None
5002
5003 """
5004 if dev_path is None:
5005 logging.error("DevCacheManager.RemoveCache got a None dev_path")
5006 return
5007 fpath = cls._ConvertPath(dev_path)
5008 try:
5009 utils.RemoveFile(fpath)
5010 except EnvironmentError, err:
5011 logging.exception("Can't update bdev cache for %s: %s", dev_path, err)
5012