1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 """Functions used by the node daemon
23
24 @var _ALLOWED_UPLOAD_FILES: denotes which files are accepted in
25 the L{UploadFile} function
26 @var _ALLOWED_CLEAN_DIRS: denotes which directories are accepted
27 in the L{_CleanDirectory} function
28
29 """
30
31
32
33
34
35
36
37
38 import os
39 import os.path
40 import shutil
41 import time
42 import stat
43 import errno
44 import re
45 import random
46 import logging
47 import tempfile
48 import zlib
49 import base64
50 import signal
51
52 from ganeti import errors
53 from ganeti import utils
54 from ganeti import ssh
55 from ganeti import hypervisor
56 from ganeti import constants
57 from ganeti import bdev
58 from ganeti import objects
59 from ganeti import ssconf
60 from ganeti import serializer
61 from ganeti import netutils
62 from ganeti import runtime
63 from ganeti import compat
64 from ganeti import pathutils
65 from ganeti import vcluster
66 from ganeti import ht
67 from ganeti import hooksmaster
68
69
70 _BOOT_ID_PATH = "/proc/sys/kernel/random/boot_id"
71 _ALLOWED_CLEAN_DIRS = compat.UniqueFrozenset([
72 pathutils.DATA_DIR,
73 pathutils.JOB_QUEUE_ARCHIVE_DIR,
74 pathutils.QUEUE_DIR,
75 pathutils.CRYPTO_KEYS_DIR,
76 ])
77 _MAX_SSL_CERT_VALIDITY = 7 * 24 * 60 * 60
78 _X509_KEY_FILE = "key"
79 _X509_CERT_FILE = "cert"
80 _IES_STATUS_FILE = "status"
81 _IES_PID_FILE = "pid"
82 _IES_CA_FILE = "ca"
83
84
85 _LVSLINE_REGEX = re.compile("^ *([^|]+)\|([^|]+)\|([0-9.]+)\|([^|]{6,})\|?$")
86
87
88 _MASTER_START = "start"
89 _MASTER_STOP = "stop"
90
91
92 _RCMD_MAX_MODE = (stat.S_IRWXU |
93 stat.S_IRGRP | stat.S_IXGRP |
94 stat.S_IROTH | stat.S_IXOTH)
95
96
97 _RCMD_INVALID_DELAY = 10
98
99
100
101
102 _RCMD_LOCK_TIMEOUT = _RCMD_INVALID_DELAY * 0.8
106 """Class denoting RPC failure.
107
108 Its argument is the error message.
109
110 """
111
114 """Path of the file containing the reason of the instance status change.
115
116 @type instance_name: string
117 @param instance_name: The name of the instance
118 @rtype: string
119 @return: The path of the file
120
121 """
122 return utils.PathJoin(pathutils.INSTANCE_REASON_DIR, instance_name)
123
126 """Serialize a reason trail related to an instance change of state to file.
127
128 The exact location of the file depends on the name of the instance and on
129 the configuration of the Ganeti cluster defined at deploy time.
130
131 @type instance_name: string
132 @param instance_name: The name of the instance
133 @rtype: None
134
135 """
136 json = serializer.DumpJson(trail)
137 filename = _GetInstReasonFilename(instance_name)
138 utils.WriteFile(filename, data=json)
139
140
141 -def _Fail(msg, *args, **kwargs):
142 """Log an error and the raise an RPCFail exception.
143
144 This exception is then handled specially in the ganeti daemon and
145 turned into a 'failed' return type. As such, this function is a
146 useful shortcut for logging the error and returning it to the master
147 daemon.
148
149 @type msg: string
150 @param msg: the text of the exception
151 @raise RPCFail
152
153 """
154 if args:
155 msg = msg % args
156 if "log" not in kwargs or kwargs["log"]:
157 if "exc" in kwargs and kwargs["exc"]:
158 logging.exception(msg)
159 else:
160 logging.error(msg)
161 raise RPCFail(msg)
162
165 """Simple wrapper to return a SimpleStore.
166
167 @rtype: L{ssconf.SimpleStore}
168 @return: a SimpleStore instance
169
170 """
171 return ssconf.SimpleStore()
172
175 """Simple wrapper to return an SshRunner.
176
177 @type cluster_name: str
178 @param cluster_name: the cluster name, which is needed
179 by the SshRunner constructor
180 @rtype: L{ssh.SshRunner}
181 @return: an SshRunner instance
182
183 """
184 return ssh.SshRunner(cluster_name)
185
188 """Unpacks data compressed by the RPC client.
189
190 @type data: list or tuple
191 @param data: Data sent by RPC client
192 @rtype: str
193 @return: Decompressed data
194
195 """
196 assert isinstance(data, (list, tuple))
197 assert len(data) == 2
198 (encoding, content) = data
199 if encoding == constants.RPC_ENCODING_NONE:
200 return content
201 elif encoding == constants.RPC_ENCODING_ZLIB_BASE64:
202 return zlib.decompress(base64.b64decode(content))
203 else:
204 raise AssertionError("Unknown data encoding")
205
208 """Removes all regular files in a directory.
209
210 @type path: str
211 @param path: the directory to clean
212 @type exclude: list
213 @param exclude: list of files to be excluded, defaults
214 to the empty list
215
216 """
217 if path not in _ALLOWED_CLEAN_DIRS:
218 _Fail("Path passed to _CleanDirectory not in allowed clean targets: '%s'",
219 path)
220
221 if not os.path.isdir(path):
222 return
223 if exclude is None:
224 exclude = []
225 else:
226
227 exclude = [os.path.normpath(i) for i in exclude]
228
229 for rel_name in utils.ListVisibleFiles(path):
230 full_name = utils.PathJoin(path, rel_name)
231 if full_name in exclude:
232 continue
233 if os.path.isfile(full_name) and not os.path.islink(full_name):
234 utils.RemoveFile(full_name)
235
238 """Build the list of allowed upload files.
239
240 This is abstracted so that it's built only once at module import time.
241
242 """
243 allowed_files = set([
244 pathutils.CLUSTER_CONF_FILE,
245 pathutils.ETC_HOSTS,
246 pathutils.SSH_KNOWN_HOSTS_FILE,
247 pathutils.VNC_PASSWORD_FILE,
248 pathutils.RAPI_CERT_FILE,
249 pathutils.SPICE_CERT_FILE,
250 pathutils.SPICE_CACERT_FILE,
251 pathutils.RAPI_USERS_FILE,
252 pathutils.CONFD_HMAC_KEY,
253 pathutils.CLUSTER_DOMAIN_SECRET_FILE,
254 ])
255
256 for hv_name in constants.HYPER_TYPES:
257 hv_class = hypervisor.GetHypervisorClass(hv_name)
258 allowed_files.update(hv_class.GetAncillaryFiles()[0])
259
260 assert pathutils.FILE_STORAGE_PATHS_FILE not in allowed_files, \
261 "Allowed file storage paths should never be uploaded via RPC"
262
263 return frozenset(allowed_files)
264
265
266 _ALLOWED_UPLOAD_FILES = _BuildUploadFileList()
278
281 """Returns master information.
282
283 This is an utility function to compute master information, either
284 for consumption here or from the node daemon.
285
286 @rtype: tuple
287 @return: master_netdev, master_ip, master_name, primary_ip_family,
288 master_netmask
289 @raise RPCFail: in case of errors
290
291 """
292 try:
293 cfg = _GetConfig()
294 master_netdev = cfg.GetMasterNetdev()
295 master_ip = cfg.GetMasterIP()
296 master_netmask = cfg.GetMasterNetmask()
297 master_node = cfg.GetMasterNode()
298 primary_ip_family = cfg.GetPrimaryIPFamily()
299 except errors.ConfigurationError, err:
300 _Fail("Cluster configuration incomplete: %s", err, exc=True)
301 return (master_netdev, master_ip, master_node, primary_ip_family,
302 master_netmask)
303
306 """Decorator that runs hooks before and after the decorated function.
307
308 @type hook_opcode: string
309 @param hook_opcode: opcode of the hook
310 @type hooks_path: string
311 @param hooks_path: path of the hooks
312 @type env_builder_fn: function
313 @param env_builder_fn: function that returns a dictionary containing the
314 environment variables for the hooks. Will get all the parameters of the
315 decorated function.
316 @raise RPCFail: in case of pre-hook failure
317
318 """
319 def decorator(fn):
320 def wrapper(*args, **kwargs):
321 _, myself = ssconf.GetMasterAndMyself()
322 nodes = ([myself], [myself])
323
324 env_fn = compat.partial(env_builder_fn, *args, **kwargs)
325
326 cfg = _GetConfig()
327 hr = HooksRunner()
328 hm = hooksmaster.HooksMaster(hook_opcode, hooks_path, nodes,
329 hr.RunLocalHooks, None, env_fn,
330 logging.warning, cfg.GetClusterName(),
331 cfg.GetMasterNode())
332 hm.RunPhase(constants.HOOKS_PHASE_PRE)
333 result = fn(*args, **kwargs)
334 hm.RunPhase(constants.HOOKS_PHASE_POST)
335
336 return result
337 return wrapper
338 return decorator
339
342 """Builds environment variables for master IP hooks.
343
344 @type master_params: L{objects.MasterNetworkParameters}
345 @param master_params: network parameters of the master
346 @type use_external_mip_script: boolean
347 @param use_external_mip_script: whether to use an external master IP
348 address setup script (unused, but necessary per the implementation of the
349 _RunLocalHooks decorator)
350
351 """
352
353 ver = netutils.IPAddress.GetVersionFromAddressFamily(master_params.ip_family)
354 env = {
355 "MASTER_NETDEV": master_params.netdev,
356 "MASTER_IP": master_params.ip,
357 "MASTER_NETMASK": str(master_params.netmask),
358 "CLUSTER_IP_VERSION": str(ver),
359 }
360
361 return env
362
365 """Execute the master IP address setup script.
366
367 @type master_params: L{objects.MasterNetworkParameters}
368 @param master_params: network parameters of the master
369 @type action: string
370 @param action: action to pass to the script. Must be one of
371 L{backend._MASTER_START} or L{backend._MASTER_STOP}
372 @type use_external_mip_script: boolean
373 @param use_external_mip_script: whether to use an external master IP
374 address setup script
375 @raise backend.RPCFail: if there are errors during the execution of the
376 script
377
378 """
379 env = _BuildMasterIpEnv(master_params)
380
381 if use_external_mip_script:
382 setup_script = pathutils.EXTERNAL_MASTER_SETUP_SCRIPT
383 else:
384 setup_script = pathutils.DEFAULT_MASTER_SETUP_SCRIPT
385
386 result = utils.RunCmd([setup_script, action], env=env, reset_env=True)
387
388 if result.failed:
389 _Fail("Failed to %s the master IP. Script return value: %s, output: '%s'" %
390 (action, result.exit_code, result.output), log=True)
391
392
393 @RunLocalHooks(constants.FAKE_OP_MASTER_TURNUP, "master-ip-turnup",
394 _BuildMasterIpEnv)
396 """Activate the IP address of the master daemon.
397
398 @type master_params: L{objects.MasterNetworkParameters}
399 @param master_params: network parameters of the master
400 @type use_external_mip_script: boolean
401 @param use_external_mip_script: whether to use an external master IP
402 address setup script
403 @raise RPCFail: in case of errors during the IP startup
404
405 """
406 _RunMasterSetupScript(master_params, _MASTER_START,
407 use_external_mip_script)
408
411 """Activate local node as master node.
412
413 The function will start the master daemons (ganeti-masterd and ganeti-rapi).
414
415 @type no_voting: boolean
416 @param no_voting: whether to start ganeti-masterd without a node vote
417 but still non-interactively
418 @rtype: None
419
420 """
421
422 if no_voting:
423 masterd_args = "--no-voting --yes-do-it"
424 else:
425 masterd_args = ""
426
427 env = {
428 "EXTRA_MASTERD_ARGS": masterd_args,
429 }
430
431 result = utils.RunCmd([pathutils.DAEMON_UTIL, "start-master"], env=env)
432 if result.failed:
433 msg = "Can't start Ganeti master: %s" % result.output
434 logging.error(msg)
435 _Fail(msg)
436
437
438 @RunLocalHooks(constants.FAKE_OP_MASTER_TURNDOWN, "master-ip-turndown",
439 _BuildMasterIpEnv)
441 """Deactivate the master IP on this node.
442
443 @type master_params: L{objects.MasterNetworkParameters}
444 @param master_params: network parameters of the master
445 @type use_external_mip_script: boolean
446 @param use_external_mip_script: whether to use an external master IP
447 address setup script
448 @raise RPCFail: in case of errors during the IP turndown
449
450 """
451 _RunMasterSetupScript(master_params, _MASTER_STOP,
452 use_external_mip_script)
453
456 """Stop the master daemons on this node.
457
458 Stop the master daemons (ganeti-masterd and ganeti-rapi) on this node.
459
460 @rtype: None
461
462 """
463
464
465
466 result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop-master"])
467 if result.failed:
468 logging.error("Could not stop Ganeti master, command %s had exitcode %s"
469 " and error %s",
470 result.cmd, result.exit_code, result.output)
471
474 """Change the netmask of the master IP.
475
476 @param old_netmask: the old value of the netmask
477 @param netmask: the new value of the netmask
478 @param master_ip: the master IP
479 @param master_netdev: the master network device
480
481 """
482 if old_netmask == netmask:
483 return
484
485 if not netutils.IPAddress.Own(master_ip):
486 _Fail("The master IP address is not up, not attempting to change its"
487 " netmask")
488
489 result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "add",
490 "%s/%s" % (master_ip, netmask),
491 "dev", master_netdev, "label",
492 "%s:0" % master_netdev])
493 if result.failed:
494 _Fail("Could not set the new netmask on the master IP address")
495
496 result = utils.RunCmd([constants.IP_COMMAND_PATH, "address", "del",
497 "%s/%s" % (master_ip, old_netmask),
498 "dev", master_netdev, "label",
499 "%s:0" % master_netdev])
500 if result.failed:
501 _Fail("Could not bring down the master IP address with the old netmask")
502
505 """Modify a host entry in /etc/hosts.
506
507 @param mode: The mode to operate. Either add or remove entry
508 @param host: The host to operate on
509 @param ip: The ip associated with the entry
510
511 """
512 if mode == constants.ETC_HOSTS_ADD:
513 if not ip:
514 RPCFail("Mode 'add' needs 'ip' parameter, but parameter not"
515 " present")
516 utils.AddHostToEtcHosts(host, ip)
517 elif mode == constants.ETC_HOSTS_REMOVE:
518 if ip:
519 RPCFail("Mode 'remove' does not allow 'ip' parameter, but"
520 " parameter is present")
521 utils.RemoveHostFromEtcHosts(host)
522 else:
523 RPCFail("Mode not supported")
524
527 """Cleans up and remove the current node.
528
529 This function cleans up and prepares the current node to be removed
530 from the cluster.
531
532 If processing is successful, then it raises an
533 L{errors.QuitGanetiException} which is used as a special case to
534 shutdown the node daemon.
535
536 @param modify_ssh_setup: boolean
537
538 """
539 _CleanDirectory(pathutils.DATA_DIR)
540 _CleanDirectory(pathutils.CRYPTO_KEYS_DIR)
541 JobQueuePurge()
542
543 if modify_ssh_setup:
544 try:
545 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.SSH_LOGIN_USER)
546
547 utils.RemoveAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
548
549 utils.RemoveFile(priv_key)
550 utils.RemoveFile(pub_key)
551 except errors.OpExecError:
552 logging.exception("Error while processing ssh files")
553
554 try:
555 utils.RemoveFile(pathutils.CONFD_HMAC_KEY)
556 utils.RemoveFile(pathutils.RAPI_CERT_FILE)
557 utils.RemoveFile(pathutils.SPICE_CERT_FILE)
558 utils.RemoveFile(pathutils.SPICE_CACERT_FILE)
559 utils.RemoveFile(pathutils.NODED_CERT_FILE)
560 except:
561 logging.exception("Error while removing cluster secrets")
562
563 result = utils.RunCmd([pathutils.DAEMON_UTIL, "stop", constants.CONFD])
564 if result.failed:
565 logging.error("Command %s failed with exitcode %s and error %s",
566 result.cmd, result.exit_code, result.output)
567
568
569 raise errors.QuitGanetiException(True, "Shutdown scheduled")
570
573 """Retrieves information about a LVM volume group.
574
575 """
576
577 vginfo = bdev.LogicalVolume.GetVGInfo([name], excl_stor)
578 if vginfo:
579 vg_free = int(round(vginfo[0][0], 0))
580 vg_size = int(round(vginfo[0][1], 0))
581 else:
582 vg_free = None
583 vg_size = None
584
585 return {
586 "name": name,
587 "vg_free": vg_free,
588 "vg_size": vg_size,
589 }
590
593 """Retrieves node information from a hypervisor.
594
595 The information returned depends on the hypervisor. Common items:
596
597 - vg_size is the size of the configured volume group in MiB
598 - vg_free is the free size of the volume group in MiB
599 - memory_dom0 is the memory allocated for domain0 in MiB
600 - memory_free is the currently available (free) ram in MiB
601 - memory_total is the total number of ram in MiB
602 - hv_version: the hypervisor version, if available
603
604 """
605 return hypervisor.GetHypervisor(name).GetNodeInfo()
606
609 """Calls C{fn} for all names in C{names} and returns a dictionary.
610
611 @rtype: None or dict
612
613 """
614 if names is None:
615 return None
616 else:
617 return map(fn, names)
618
621 """Gives back a hash with different information about the node.
622
623 @type vg_names: list of string
624 @param vg_names: Names of the volume groups to ask for disk space information
625 @type hv_names: list of string
626 @param hv_names: Names of the hypervisors to ask for node information
627 @type excl_stor: boolean
628 @param excl_stor: Whether exclusive_storage is active
629 @rtype: tuple; (string, None/dict, None/dict)
630 @return: Tuple containing boot ID, volume group information and hypervisor
631 information
632
633 """
634 bootid = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n")
635 vg_info = _GetNamedNodeInfo(vg_names, (lambda vg: _GetVgInfo(vg, excl_stor)))
636 hv_info = _GetNamedNodeInfo(hv_names, _GetHvInfo)
637
638 return (bootid, vg_info, hv_info)
639
642 """Check that PVs are not shared among LVs
643
644 @type pvi_list: list of L{objects.LvmPvInfo} objects
645 @param pvi_list: information about the PVs
646
647 @rtype: list of tuples (string, list of strings)
648 @return: offending volumes, as tuples: (pv_name, [lv1_name, lv2_name...])
649
650 """
651 res = []
652 for pvi in pvi_list:
653 if len(pvi.lv_list) > 1:
654 res.append((pvi.name, pvi.lv_list))
655 return res
656
659 """Verify the status of the local node.
660
661 Based on the input L{what} parameter, various checks are done on the
662 local node.
663
664 If the I{filelist} key is present, this list of
665 files is checksummed and the file/checksum pairs are returned.
666
667 If the I{nodelist} key is present, we check that we have
668 connectivity via ssh with the target nodes (and check the hostname
669 report).
670
671 If the I{node-net-test} key is present, we check that we have
672 connectivity to the given nodes via both primary IP and, if
673 applicable, secondary IPs.
674
675 @type what: C{dict}
676 @param what: a dictionary of things to check:
677 - filelist: list of files for which to compute checksums
678 - nodelist: list of nodes we should check ssh communication with
679 - node-net-test: list of nodes we should check node daemon port
680 connectivity with
681 - hypervisor: list with hypervisors to run the verify for
682 @rtype: dict
683 @return: a dictionary with the same keys as the input dict, and
684 values representing the result of the checks
685
686 """
687 result = {}
688 my_name = netutils.Hostname.GetSysName()
689 port = netutils.GetDaemonPort(constants.NODED)
690 vm_capable = my_name not in what.get(constants.NV_VMNODES, [])
691
692 if constants.NV_HYPERVISOR in what and vm_capable:
693 result[constants.NV_HYPERVISOR] = tmp = {}
694 for hv_name in what[constants.NV_HYPERVISOR]:
695 try:
696 val = hypervisor.GetHypervisor(hv_name).Verify()
697 except errors.HypervisorError, err:
698 val = "Error while checking hypervisor: %s" % str(err)
699 tmp[hv_name] = val
700
701 if constants.NV_HVPARAMS in what and vm_capable:
702 result[constants.NV_HVPARAMS] = tmp = []
703 for source, hv_name, hvparms in what[constants.NV_HVPARAMS]:
704 try:
705 logging.info("Validating hv %s, %s", hv_name, hvparms)
706 hypervisor.GetHypervisor(hv_name).ValidateParameters(hvparms)
707 except errors.HypervisorError, err:
708 tmp.append((source, hv_name, str(err)))
709
710 if constants.NV_FILELIST in what:
711 fingerprints = utils.FingerprintFiles(map(vcluster.LocalizeVirtualPath,
712 what[constants.NV_FILELIST]))
713 result[constants.NV_FILELIST] = \
714 dict((vcluster.MakeVirtualPath(key), value)
715 for (key, value) in fingerprints.items())
716
717 if constants.NV_NODELIST in what:
718 (nodes, bynode) = what[constants.NV_NODELIST]
719
720
721 try:
722 nodes.extend(bynode[my_name])
723 except KeyError:
724 pass
725
726
727 random.shuffle(nodes)
728
729
730 val = {}
731 for node in nodes:
732 success, message = _GetSshRunner(cluster_name).VerifyNodeHostname(node)
733 if not success:
734 val[node] = message
735
736 result[constants.NV_NODELIST] = val
737
738 if constants.NV_NODENETTEST in what:
739 result[constants.NV_NODENETTEST] = tmp = {}
740 my_pip = my_sip = None
741 for name, pip, sip in what[constants.NV_NODENETTEST]:
742 if name == my_name:
743 my_pip = pip
744 my_sip = sip
745 break
746 if not my_pip:
747 tmp[my_name] = ("Can't find my own primary/secondary IP"
748 " in the node list")
749 else:
750 for name, pip, sip in what[constants.NV_NODENETTEST]:
751 fail = []
752 if not netutils.TcpPing(pip, port, source=my_pip):
753 fail.append("primary")
754 if sip != pip:
755 if not netutils.TcpPing(sip, port, source=my_sip):
756 fail.append("secondary")
757 if fail:
758 tmp[name] = ("failure using the %s interface(s)" %
759 " and ".join(fail))
760
761 if constants.NV_MASTERIP in what:
762
763
764 master_name, master_ip = what[constants.NV_MASTERIP]
765 if master_name == my_name:
766 source = constants.IP4_ADDRESS_LOCALHOST
767 else:
768 source = None
769 result[constants.NV_MASTERIP] = netutils.TcpPing(master_ip, port,
770 source=source)
771
772 if constants.NV_USERSCRIPTS in what:
773 result[constants.NV_USERSCRIPTS] = \
774 [script for script in what[constants.NV_USERSCRIPTS]
775 if not utils.IsExecutable(script)]
776
777 if constants.NV_OOB_PATHS in what:
778 result[constants.NV_OOB_PATHS] = tmp = []
779 for path in what[constants.NV_OOB_PATHS]:
780 try:
781 st = os.stat(path)
782 except OSError, err:
783 tmp.append("error stating out of band helper: %s" % err)
784 else:
785 if stat.S_ISREG(st.st_mode):
786 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR:
787 tmp.append(None)
788 else:
789 tmp.append("out of band helper %s is not executable" % path)
790 else:
791 tmp.append("out of band helper %s is not a file" % path)
792
793 if constants.NV_LVLIST in what and vm_capable:
794 try:
795 val = GetVolumeList(utils.ListVolumeGroups().keys())
796 except RPCFail, err:
797 val = str(err)
798 result[constants.NV_LVLIST] = val
799
800 if constants.NV_INSTANCELIST in what and vm_capable:
801
802 try:
803 val = GetInstanceList(what[constants.NV_INSTANCELIST])
804 except RPCFail, err:
805 val = str(err)
806 result[constants.NV_INSTANCELIST] = val
807
808 if constants.NV_VGLIST in what and vm_capable:
809 result[constants.NV_VGLIST] = utils.ListVolumeGroups()
810
811 if constants.NV_PVLIST in what and vm_capable:
812 check_exclusive_pvs = constants.NV_EXCLUSIVEPVS in what
813 val = bdev.LogicalVolume.GetPVInfo(what[constants.NV_PVLIST],
814 filter_allocatable=False,
815 include_lvs=check_exclusive_pvs)
816 if check_exclusive_pvs:
817 result[constants.NV_EXCLUSIVEPVS] = _CheckExclusivePvs(val)
818 for pvi in val:
819
820 pvi.lv_list = []
821 result[constants.NV_PVLIST] = map(objects.LvmPvInfo.ToDict, val)
822
823 if constants.NV_VERSION in what:
824 result[constants.NV_VERSION] = (constants.PROTOCOL_VERSION,
825 constants.RELEASE_VERSION)
826
827 if constants.NV_HVINFO in what and vm_capable:
828 hyper = hypervisor.GetHypervisor(what[constants.NV_HVINFO])
829 result[constants.NV_HVINFO] = hyper.GetNodeInfo()
830
831 if constants.NV_DRBDLIST in what and vm_capable:
832 try:
833 used_minors = bdev.DRBD8.GetUsedDevs().keys()
834 except errors.BlockDeviceError, err:
835 logging.warning("Can't get used minors list", exc_info=True)
836 used_minors = str(err)
837 result[constants.NV_DRBDLIST] = used_minors
838
839 if constants.NV_DRBDHELPER in what and vm_capable:
840 status = True
841 try:
842 payload = bdev.BaseDRBD.GetUsermodeHelper()
843 except errors.BlockDeviceError, err:
844 logging.error("Can't get DRBD usermode helper: %s", str(err))
845 status = False
846 payload = str(err)
847 result[constants.NV_DRBDHELPER] = (status, payload)
848
849 if constants.NV_NODESETUP in what:
850 result[constants.NV_NODESETUP] = tmpr = []
851 if not os.path.isdir("/sys/block") or not os.path.isdir("/sys/class/net"):
852 tmpr.append("The sysfs filesytem doesn't seem to be mounted"
853 " under /sys, missing required directories /sys/block"
854 " and /sys/class/net")
855 if (not os.path.isdir("/proc/sys") or
856 not os.path.isfile("/proc/sysrq-trigger")):
857 tmpr.append("The procfs filesystem doesn't seem to be mounted"
858 " under /proc, missing required directory /proc/sys and"
859 " the file /proc/sysrq-trigger")
860
861 if constants.NV_TIME in what:
862 result[constants.NV_TIME] = utils.SplitTime(time.time())
863
864 if constants.NV_OSLIST in what and vm_capable:
865 result[constants.NV_OSLIST] = DiagnoseOS()
866
867 if constants.NV_BRIDGES in what and vm_capable:
868 result[constants.NV_BRIDGES] = [bridge
869 for bridge in what[constants.NV_BRIDGES]
870 if not utils.BridgeExists(bridge)]
871
872 if what.get(constants.NV_FILE_STORAGE_PATHS) == my_name:
873 result[constants.NV_FILE_STORAGE_PATHS] = \
874 bdev.ComputeWrongFileStoragePaths()
875
876 return result
877
880 """Return the size of the given block devices
881
882 @type devices: list
883 @param devices: list of block device nodes to query
884 @rtype: dict
885 @return:
886 dictionary of all block devices under /dev (key). The value is their
887 size in MiB.
888
889 {'/dev/disk/by-uuid/123456-12321231-312312-312': 124}
890
891 """
892 DEV_PREFIX = "/dev/"
893 blockdevs = {}
894
895 for devpath in devices:
896 if not utils.IsBelowDir(DEV_PREFIX, devpath):
897 continue
898
899 try:
900 st = os.stat(devpath)
901 except EnvironmentError, err:
902 logging.warning("Error stat()'ing device %s: %s", devpath, str(err))
903 continue
904
905 if stat.S_ISBLK(st.st_mode):
906 result = utils.RunCmd(["blockdev", "--getsize64", devpath])
907 if result.failed:
908
909 logging.warning("Cannot get size for block device %s", devpath)
910 continue
911
912 size = int(result.stdout) / (1024 * 1024)
913 blockdevs[devpath] = size
914 return blockdevs
915
918 """Compute list of logical volumes and their size.
919
920 @type vg_names: list
921 @param vg_names: the volume groups whose LVs we should list, or
922 empty for all volume groups
923 @rtype: dict
924 @return:
925 dictionary of all partions (key) with value being a tuple of
926 their size (in MiB), inactive and online status::
927
928 {'xenvg/test1': ('20.06', True, True)}
929
930 in case of errors, a string is returned with the error
931 details.
932
933 """
934 lvs = {}
935 sep = "|"
936 if not vg_names:
937 vg_names = []
938 result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
939 "--separator=%s" % sep,
940 "-ovg_name,lv_name,lv_size,lv_attr"] + vg_names)
941 if result.failed:
942 _Fail("Failed to list logical volumes, lvs output: %s", result.output)
943
944 for line in result.stdout.splitlines():
945 line = line.strip()
946 match = _LVSLINE_REGEX.match(line)
947 if not match:
948 logging.error("Invalid line returned from lvs output: '%s'", line)
949 continue
950 vg_name, name, size, attr = match.groups()
951 inactive = attr[4] == "-"
952 online = attr[5] == "o"
953 virtual = attr[0] == "v"
954 if virtual:
955
956
957 continue
958 lvs[vg_name + "/" + name] = (size, inactive, online)
959
960 return lvs
961
964 """List the volume groups and their size.
965
966 @rtype: dict
967 @return: dictionary with keys volume name and values the
968 size of the volume
969
970 """
971 return utils.ListVolumeGroups()
972
975 """List all volumes on this node.
976
977 @rtype: list
978 @return:
979 A list of dictionaries, each having four keys:
980 - name: the logical volume name,
981 - size: the size of the logical volume
982 - dev: the physical device on which the LV lives
983 - vg: the volume group to which it belongs
984
985 In case of errors, we return an empty list and log the
986 error.
987
988 Note that since a logical volume can live on multiple physical
989 volumes, the resulting list might include a logical volume
990 multiple times.
991
992 """
993 result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
994 "--separator=|",
995 "--options=lv_name,lv_size,devices,vg_name"])
996 if result.failed:
997 _Fail("Failed to list logical volumes, lvs output: %s",
998 result.output)
999
1000 def parse_dev(dev):
1001 return dev.split("(")[0]
1002
1003 def handle_dev(dev):
1004 return [parse_dev(x) for x in dev.split(",")]
1005
1006 def map_line(line):
1007 line = [v.strip() for v in line]
1008 return [{"name": line[0], "size": line[1],
1009 "dev": dev, "vg": line[3]} for dev in handle_dev(line[2])]
1010
1011 all_devs = []
1012 for line in result.stdout.splitlines():
1013 if line.count("|") >= 3:
1014 all_devs.extend(map_line(line.split("|")))
1015 else:
1016 logging.warning("Strange line in the output from lvs: '%s'", line)
1017 return all_devs
1018
1021 """Check if a list of bridges exist on the current node.
1022
1023 @rtype: boolean
1024 @return: C{True} if all of them exist, C{False} otherwise
1025
1026 """
1027 missing = []
1028 for bridge in bridges_list:
1029 if not utils.BridgeExists(bridge):
1030 missing.append(bridge)
1031
1032 if missing:
1033 _Fail("Missing bridges %s", utils.CommaJoin(missing))
1034
1037 """Provides a list of instances.
1038
1039 @type hypervisor_list: list
1040 @param hypervisor_list: the list of hypervisors to query information
1041
1042 @rtype: list
1043 @return: a list of all running instances on the current node
1044 - instance1.example.com
1045 - instance2.example.com
1046
1047 """
1048 results = []
1049 for hname in hypervisor_list:
1050 try:
1051 names = hypervisor.GetHypervisor(hname).ListInstances()
1052 results.extend(names)
1053 except errors.HypervisorError, err:
1054 _Fail("Error enumerating instances (hypervisor %s): %s",
1055 hname, err, exc=True)
1056
1057 return results
1058
1061 """Gives back the information about an instance as a dictionary.
1062
1063 @type instance: string
1064 @param instance: the instance name
1065 @type hname: string
1066 @param hname: the hypervisor type of the instance
1067
1068 @rtype: dict
1069 @return: dictionary with the following keys:
1070 - memory: memory size of instance (int)
1071 - state: xen state of instance (string)
1072 - time: cpu time of instance (float)
1073 - vcpus: the number of vcpus (int)
1074
1075 """
1076 output = {}
1077
1078 iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance)
1079 if iinfo is not None:
1080 output["memory"] = iinfo[2]
1081 output["vcpus"] = iinfo[3]
1082 output["state"] = iinfo[4]
1083 output["time"] = iinfo[5]
1084
1085 return output
1086
1089 """Gives whether an instance can be migrated.
1090
1091 @type instance: L{objects.Instance}
1092 @param instance: object representing the instance to be checked.
1093
1094 @rtype: tuple
1095 @return: tuple of (result, description) where:
1096 - result: whether the instance can be migrated or not
1097 - description: a description of the issue, if relevant
1098
1099 """
1100 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1101 iname = instance.name
1102 if iname not in hyper.ListInstances():
1103 _Fail("Instance %s is not running", iname)
1104
1105 for idx in range(len(instance.disks)):
1106 link_name = _GetBlockDevSymlinkPath(iname, idx)
1107 if not os.path.islink(link_name):
1108 logging.warning("Instance %s is missing symlink %s for disk %d",
1109 iname, link_name, idx)
1110
1113 """Gather data about all instances.
1114
1115 This is the equivalent of L{GetInstanceInfo}, except that it
1116 computes data for all instances at once, thus being faster if one
1117 needs data about more than one instance.
1118
1119 @type hypervisor_list: list
1120 @param hypervisor_list: list of hypervisors to query for instance data
1121
1122 @rtype: dict
1123 @return: dictionary of instance: data, with data having the following keys:
1124 - memory: memory size of instance (int)
1125 - state: xen state of instance (string)
1126 - time: cpu time of instance (float)
1127 - vcpus: the number of vcpus
1128
1129 """
1130 output = {}
1131
1132 for hname in hypervisor_list:
1133 iinfo = hypervisor.GetHypervisor(hname).GetAllInstancesInfo()
1134 if iinfo:
1135 for name, _, memory, vcpus, state, times in iinfo:
1136 value = {
1137 "memory": memory,
1138 "vcpus": vcpus,
1139 "state": state,
1140 "time": times,
1141 }
1142 if name in output:
1143
1144
1145
1146 for key in "memory", "vcpus":
1147 if value[key] != output[name][key]:
1148 _Fail("Instance %s is running twice"
1149 " with different parameters", name)
1150 output[name] = value
1151
1152 return output
1153
1156 """Compute the OS log filename for a given instance and operation.
1157
1158 The instance name and os name are passed in as strings since not all
1159 operations have these as part of an instance object.
1160
1161 @type kind: string
1162 @param kind: the operation type (e.g. add, import, etc.)
1163 @type os_name: string
1164 @param os_name: the os name
1165 @type instance: string
1166 @param instance: the name of the instance being imported/added/etc.
1167 @type component: string or None
1168 @param component: the name of the component of the instance being
1169 transferred
1170
1171 """
1172
1173 if component:
1174 assert "/" not in component
1175 c_msg = "-%s" % component
1176 else:
1177 c_msg = ""
1178 base = ("%s-%s-%s%s-%s.log" %
1179 (kind, os_name, instance, c_msg, utils.TimestampForFilename()))
1180 return utils.PathJoin(pathutils.LOG_OS_DIR, base)
1181
1184 """Add an OS to an instance.
1185
1186 @type instance: L{objects.Instance}
1187 @param instance: Instance whose OS is to be installed
1188 @type reinstall: boolean
1189 @param reinstall: whether this is an instance reinstall
1190 @type debug: integer
1191 @param debug: debug level, passed to the OS scripts
1192 @rtype: None
1193
1194 """
1195 inst_os = OSFromDisk(instance.os)
1196
1197 create_env = OSEnvironment(instance, inst_os, debug)
1198 if reinstall:
1199 create_env["INSTANCE_REINSTALL"] = "1"
1200
1201 logfile = _InstanceLogName("add", instance.os, instance.name, None)
1202
1203 result = utils.RunCmd([inst_os.create_script], env=create_env,
1204 cwd=inst_os.path, output=logfile, reset_env=True)
1205 if result.failed:
1206 logging.error("os create command '%s' returned error: %s, logfile: %s,"
1207 " output: %s", result.cmd, result.fail_reason, logfile,
1208 result.output)
1209 lines = [utils.SafeEncode(val)
1210 for val in utils.TailFile(logfile, lines=20)]
1211 _Fail("OS create script failed (%s), last lines in the"
1212 " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
1213
1216 """Run the OS rename script for an instance.
1217
1218 @type instance: L{objects.Instance}
1219 @param instance: Instance whose OS is to be installed
1220 @type old_name: string
1221 @param old_name: previous instance name
1222 @type debug: integer
1223 @param debug: debug level, passed to the OS scripts
1224 @rtype: boolean
1225 @return: the success of the operation
1226
1227 """
1228 inst_os = OSFromDisk(instance.os)
1229
1230 rename_env = OSEnvironment(instance, inst_os, debug)
1231 rename_env["OLD_INSTANCE_NAME"] = old_name
1232
1233 logfile = _InstanceLogName("rename", instance.os,
1234 "%s-%s" % (old_name, instance.name), None)
1235
1236 result = utils.RunCmd([inst_os.rename_script], env=rename_env,
1237 cwd=inst_os.path, output=logfile, reset_env=True)
1238
1239 if result.failed:
1240 logging.error("os create command '%s' returned error: %s output: %s",
1241 result.cmd, result.fail_reason, result.output)
1242 lines = [utils.SafeEncode(val)
1243 for val in utils.TailFile(logfile, lines=20)]
1244 _Fail("OS rename script failed (%s), last lines in the"
1245 " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
1246
1258
1261 """Set up symlinks to a instance's block device.
1262
1263 This is an auxiliary function run when an instance is start (on the primary
1264 node) or when an instance is migrated (on the target node).
1265
1266
1267 @param instance_name: the name of the target instance
1268 @param device_path: path of the physical block device, on the node
1269 @param idx: the disk index
1270 @return: absolute path to the disk's symlink
1271
1272 """
1273 link_name = _GetBlockDevSymlinkPath(instance_name, idx)
1274 try:
1275 os.symlink(device_path, link_name)
1276 except OSError, err:
1277 if err.errno == errno.EEXIST:
1278 if (not os.path.islink(link_name) or
1279 os.readlink(link_name) != device_path):
1280 os.remove(link_name)
1281 os.symlink(device_path, link_name)
1282 else:
1283 raise
1284
1285 return link_name
1286
1289 """Remove the block device symlinks belonging to the given instance.
1290
1291 """
1292 for idx, _ in enumerate(disks):
1293 link_name = _GetBlockDevSymlinkPath(instance_name, idx)
1294 if os.path.islink(link_name):
1295 try:
1296 os.remove(link_name)
1297 except OSError:
1298 logging.exception("Can't remove symlink '%s'", link_name)
1299
1302 """Set up an instance's block device(s).
1303
1304 This is run on the primary node at instance startup. The block
1305 devices must be already assembled.
1306
1307 @type instance: L{objects.Instance}
1308 @param instance: the instance whose disks we shoul assemble
1309 @rtype: list
1310 @return: list of (disk_object, device_path)
1311
1312 """
1313 block_devices = []
1314 for idx, disk in enumerate(instance.disks):
1315 device = _RecursiveFindBD(disk)
1316 if device is None:
1317 raise errors.BlockDeviceError("Block device '%s' is not set up." %
1318 str(disk))
1319 device.Open()
1320 try:
1321 link_name = _SymlinkBlockDev(instance.name, device.dev_path, idx)
1322 except OSError, e:
1323 raise errors.BlockDeviceError("Cannot create block device symlink: %s" %
1324 e.strerror)
1325
1326 block_devices.append((disk, link_name))
1327
1328 return block_devices
1329
1330
1331 -def StartInstance(instance, startup_paused, reason, store_reason=True):
1332 """Start an instance.
1333
1334 @type instance: L{objects.Instance}
1335 @param instance: the instance object
1336 @type startup_paused: bool
1337 @param instance: pause instance at startup?
1338 @type reason: list of reasons
1339 @param reason: the reason trail for this startup
1340 @type store_reason: boolean
1341 @param store_reason: whether to store the shutdown reason trail on file
1342 @rtype: None
1343
1344 """
1345 running_instances = GetInstanceList([instance.hypervisor])
1346
1347 if instance.name in running_instances:
1348 logging.info("Instance %s already running, not starting", instance.name)
1349 return
1350
1351 try:
1352 block_devices = _GatherAndLinkBlockDevs(instance)
1353 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1354 hyper.StartInstance(instance, block_devices, startup_paused)
1355 if store_reason:
1356 _StoreInstReasonTrail(instance.name, reason)
1357 except errors.BlockDeviceError, err:
1358 _Fail("Block device error: %s", err, exc=True)
1359 except errors.HypervisorError, err:
1360 _RemoveBlockDevLinks(instance.name, instance.disks)
1361 _Fail("Hypervisor error: %s", err, exc=True)
1362
1365 """Shut an instance down.
1366
1367 @note: this functions uses polling with a hardcoded timeout.
1368
1369 @type instance: L{objects.Instance}
1370 @param instance: the instance object
1371 @type timeout: integer
1372 @param timeout: maximum timeout for soft shutdown
1373 @type reason: list of reasons
1374 @param reason: the reason trail for this shutdown
1375 @type store_reason: boolean
1376 @param store_reason: whether to store the shutdown reason trail on file
1377 @rtype: None
1378
1379 """
1380 hv_name = instance.hypervisor
1381 hyper = hypervisor.GetHypervisor(hv_name)
1382 iname = instance.name
1383
1384 if instance.name not in hyper.ListInstances():
1385 logging.info("Instance %s not running, doing nothing", iname)
1386 return
1387
1388 class _TryShutdown:
1389 def __init__(self):
1390 self.tried_once = False
1391
1392 def __call__(self):
1393 if iname not in hyper.ListInstances():
1394 return
1395
1396 try:
1397 hyper.StopInstance(instance, retry=self.tried_once, timeout=timeout)
1398 if store_reason:
1399 _StoreInstReasonTrail(instance.name, reason)
1400 except errors.HypervisorError, err:
1401 if iname not in hyper.ListInstances():
1402
1403
1404 return
1405
1406 _Fail("Failed to stop instance %s: %s", iname, err)
1407
1408 self.tried_once = True
1409
1410 raise utils.RetryAgain()
1411
1412 try:
1413 utils.Retry(_TryShutdown(), 5, timeout)
1414 except utils.RetryTimeout:
1415
1416 logging.error("Shutdown of '%s' unsuccessful, forcing", iname)
1417
1418 try:
1419 hyper.StopInstance(instance, force=True)
1420 except errors.HypervisorError, err:
1421 if iname in hyper.ListInstances():
1422
1423
1424 _Fail("Failed to force stop instance %s: %s", iname, err)
1425
1426 time.sleep(1)
1427
1428 if iname in hyper.ListInstances():
1429 _Fail("Could not shutdown instance %s even by destroy", iname)
1430
1431 try:
1432 hyper.CleanupInstance(instance.name)
1433 except errors.HypervisorError, err:
1434 logging.warning("Failed to execute post-shutdown cleanup step: %s", err)
1435
1436 _RemoveBlockDevLinks(iname, instance.disks)
1437
1438
1439 -def InstanceReboot(instance, reboot_type, shutdown_timeout, reason):
1440 """Reboot an instance.
1441
1442 @type instance: L{objects.Instance}
1443 @param instance: the instance object to reboot
1444 @type reboot_type: str
1445 @param reboot_type: the type of reboot, one the following
1446 constants:
1447 - L{constants.INSTANCE_REBOOT_SOFT}: only reboot the
1448 instance OS, do not recreate the VM
1449 - L{constants.INSTANCE_REBOOT_HARD}: tear down and
1450 restart the VM (at the hypervisor level)
1451 - the other reboot type (L{constants.INSTANCE_REBOOT_FULL}) is
1452 not accepted here, since that mode is handled differently, in
1453 cmdlib, and translates into full stop and start of the
1454 instance (instead of a call_instance_reboot RPC)
1455 @type shutdown_timeout: integer
1456 @param shutdown_timeout: maximum timeout for soft shutdown
1457 @type reason: list of reasons
1458 @param reason: the reason trail for this reboot
1459 @rtype: None
1460
1461 """
1462 running_instances = GetInstanceList([instance.hypervisor])
1463
1464 if instance.name not in running_instances:
1465 _Fail("Cannot reboot instance %s that is not running", instance.name)
1466
1467 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1468 if reboot_type == constants.INSTANCE_REBOOT_SOFT:
1469 try:
1470 hyper.RebootInstance(instance)
1471 except errors.HypervisorError, err:
1472 _Fail("Failed to soft reboot instance %s: %s", instance.name, err)
1473 elif reboot_type == constants.INSTANCE_REBOOT_HARD:
1474 try:
1475 InstanceShutdown(instance, shutdown_timeout, reason, store_reason=False)
1476 result = StartInstance(instance, False, reason, store_reason=False)
1477 _StoreInstReasonTrail(instance.name, reason)
1478 return result
1479 except errors.HypervisorError, err:
1480 _Fail("Failed to hard reboot instance %s: %s", instance.name, err)
1481 else:
1482 _Fail("Invalid reboot_type received: %s", reboot_type)
1483
1504
1519
1522 """Prepare the node to accept an instance.
1523
1524 @type instance: L{objects.Instance}
1525 @param instance: the instance definition
1526 @type info: string/data (opaque)
1527 @param info: migration information, from the source node
1528 @type target: string
1529 @param target: target host (usually ip), on this node
1530
1531 """
1532
1533 if instance.disk_template in constants.DTS_EXT_MIRROR:
1534
1535
1536 try:
1537 _GatherAndLinkBlockDevs(instance)
1538 except errors.BlockDeviceError, err:
1539 _Fail("Block device error: %s", err, exc=True)
1540
1541 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1542 try:
1543 hyper.AcceptInstance(instance, info, target)
1544 except errors.HypervisorError, err:
1545 if instance.disk_template in constants.DTS_EXT_MIRROR:
1546 _RemoveBlockDevLinks(instance.name, instance.disks)
1547 _Fail("Failed to accept instance: %s", err, exc=True)
1548
1551 """Finalize any preparation to accept an instance.
1552
1553 @type instance: L{objects.Instance}
1554 @param instance: the instance definition
1555 @type info: string/data (opaque)
1556 @param info: migration information, from the source node
1557 @type success: boolean
1558 @param success: whether the migration was a success or a failure
1559
1560 """
1561 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1562 try:
1563 hyper.FinalizeMigrationDst(instance, info, success)
1564 except errors.HypervisorError, err:
1565 _Fail("Failed to finalize migration on the target node: %s", err, exc=True)
1566
1569 """Migrates an instance to another node.
1570
1571 @type instance: L{objects.Instance}
1572 @param instance: the instance definition
1573 @type target: string
1574 @param target: the target node name
1575 @type live: boolean
1576 @param live: whether the migration should be done live or not (the
1577 interpretation of this parameter is left to the hypervisor)
1578 @raise RPCFail: if migration fails for some reason
1579
1580 """
1581 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1582
1583 try:
1584 hyper.MigrateInstance(instance, target, live)
1585 except errors.HypervisorError, err:
1586 _Fail("Failed to migrate instance: %s", err, exc=True)
1587
1590 """Finalize the instance migration on the source node.
1591
1592 @type instance: L{objects.Instance}
1593 @param instance: the instance definition of the migrated instance
1594 @type success: bool
1595 @param success: whether the migration succeeded or not
1596 @type live: bool
1597 @param live: whether the user requested a live migration or not
1598 @raise RPCFail: If the execution fails for some reason
1599
1600 """
1601 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1602
1603 try:
1604 hyper.FinalizeMigrationSource(instance, success, live)
1605 except Exception, err:
1606 _Fail("Failed to finalize the migration on the source node: %s", err,
1607 exc=True)
1608
1611 """Get the migration status
1612
1613 @type instance: L{objects.Instance}
1614 @param instance: the instance that is being migrated
1615 @rtype: L{objects.MigrationStatus}
1616 @return: the status of the current migration (one of
1617 L{constants.HV_MIGRATION_VALID_STATUSES}), plus any additional
1618 progress info that can be retrieved from the hypervisor
1619 @raise RPCFail: If the migration status cannot be retrieved
1620
1621 """
1622 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1623 try:
1624 return hyper.GetMigrationStatus(instance)
1625 except Exception, err:
1626 _Fail("Failed to get migration status: %s", err, exc=True)
1627
1628
1629 -def BlockdevCreate(disk, size, owner, on_primary, info, excl_stor):
1630 """Creates a block device for an instance.
1631
1632 @type disk: L{objects.Disk}
1633 @param disk: the object describing the disk we should create
1634 @type size: int
1635 @param size: the size of the physical underlying device, in MiB
1636 @type owner: str
1637 @param owner: the name of the instance for which disk is created,
1638 used for device cache data
1639 @type on_primary: boolean
1640 @param on_primary: indicates if it is the primary node or not
1641 @type info: string
1642 @param info: string that will be sent to the physical device
1643 creation, used for example to set (LVM) tags on LVs
1644 @type excl_stor: boolean
1645 @param excl_stor: Whether exclusive_storage is active
1646
1647 @return: the new unique_id of the device (this can sometime be
1648 computed only after creation), or None. On secondary nodes,
1649 it's not required to return anything.
1650
1651 """
1652
1653
1654 clist = []
1655 if disk.children:
1656 for child in disk.children:
1657 try:
1658 crdev = _RecursiveAssembleBD(child, owner, on_primary)
1659 except errors.BlockDeviceError, err:
1660 _Fail("Can't assemble device %s: %s", child, err)
1661 if on_primary or disk.AssembleOnSecondary():
1662
1663
1664 try:
1665
1666 crdev.Open()
1667 except errors.BlockDeviceError, err:
1668 _Fail("Can't make child '%s' read-write: %s", child, err)
1669 clist.append(crdev)
1670
1671 try:
1672 device = bdev.Create(disk, clist, excl_stor)
1673 except errors.BlockDeviceError, err:
1674 _Fail("Can't create block device: %s", err)
1675
1676 if on_primary or disk.AssembleOnSecondary():
1677 try:
1678 device.Assemble()
1679 except errors.BlockDeviceError, err:
1680 _Fail("Can't assemble device after creation, unusual event: %s", err)
1681 if on_primary or disk.OpenOnSecondary():
1682 try:
1683 device.Open(force=True)
1684 except errors.BlockDeviceError, err:
1685 _Fail("Can't make device r/w after creation, unusual event: %s", err)
1686 DevCacheManager.UpdateCache(device.dev_path, owner,
1687 on_primary, disk.iv_name)
1688
1689 device.SetInfo(info)
1690
1691 return device.unique_id
1692
1695 """This function actually wipes the device.
1696
1697 @param path: The path to the device to wipe
1698 @param offset: The offset in MiB in the file
1699 @param size: The size in MiB to write
1700
1701 """
1702
1703
1704
1705 block_size = 1024 * 1024
1706
1707 cmd = [constants.DD_CMD, "if=/dev/zero", "seek=%d" % offset,
1708 "bs=%s" % block_size, "oflag=direct", "of=%s" % path,
1709 "count=%d" % size]
1710 result = utils.RunCmd(cmd)
1711
1712 if result.failed:
1713 _Fail("Wipe command '%s' exited with error: %s; output: %s", result.cmd,
1714 result.fail_reason, result.output)
1715
1718 """Wipes a block device.
1719
1720 @type disk: L{objects.Disk}
1721 @param disk: the disk object we want to wipe
1722 @type offset: int
1723 @param offset: The offset in MiB in the file
1724 @type size: int
1725 @param size: The size in MiB to write
1726
1727 """
1728 try:
1729 rdev = _RecursiveFindBD(disk)
1730 except errors.BlockDeviceError:
1731 rdev = None
1732
1733 if not rdev:
1734 _Fail("Cannot execute wipe for device %s: device not found", disk.iv_name)
1735
1736
1737 if offset < 0:
1738 _Fail("Negative offset")
1739 if size < 0:
1740 _Fail("Negative size")
1741 if offset > rdev.size:
1742 _Fail("Offset is bigger than device size")
1743 if (offset + size) > rdev.size:
1744 _Fail("The provided offset and size to wipe is bigger than device size")
1745
1746 _WipeDevice(rdev.dev_path, offset, size)
1747
1750 """Pause or resume the sync of the block device.
1751
1752 @type disks: list of L{objects.Disk}
1753 @param disks: the disks object we want to pause/resume
1754 @type pause: bool
1755 @param pause: Wheater to pause or resume
1756
1757 """
1758 success = []
1759 for disk in disks:
1760 try:
1761 rdev = _RecursiveFindBD(disk)
1762 except errors.BlockDeviceError:
1763 rdev = None
1764
1765 if not rdev:
1766 success.append((False, ("Cannot change sync for device %s:"
1767 " device not found" % disk.iv_name)))
1768 continue
1769
1770 result = rdev.PauseResumeSync(pause)
1771
1772 if result:
1773 success.append((result, None))
1774 else:
1775 if pause:
1776 msg = "Pause"
1777 else:
1778 msg = "Resume"
1779 success.append((result, "%s for device %s failed" % (msg, disk.iv_name)))
1780
1781 return success
1782
1785 """Remove a block device.
1786
1787 @note: This is intended to be called recursively.
1788
1789 @type disk: L{objects.Disk}
1790 @param disk: the disk object we should remove
1791 @rtype: boolean
1792 @return: the success of the operation
1793
1794 """
1795 msgs = []
1796 try:
1797 rdev = _RecursiveFindBD(disk)
1798 except errors.BlockDeviceError, err:
1799
1800 logging.info("Can't attach to device %s in remove", disk)
1801 rdev = None
1802 if rdev is not None:
1803 r_path = rdev.dev_path
1804 try:
1805 rdev.Remove()
1806 except errors.BlockDeviceError, err:
1807 msgs.append(str(err))
1808 if not msgs:
1809 DevCacheManager.RemoveCache(r_path)
1810
1811 if disk.children:
1812 for child in disk.children:
1813 try:
1814 BlockdevRemove(child)
1815 except RPCFail, err:
1816 msgs.append(str(err))
1817
1818 if msgs:
1819 _Fail("; ".join(msgs))
1820
1823 """Activate a block device for an instance.
1824
1825 This is run on the primary and secondary nodes for an instance.
1826
1827 @note: this function is called recursively.
1828
1829 @type disk: L{objects.Disk}
1830 @param disk: the disk we try to assemble
1831 @type owner: str
1832 @param owner: the name of the instance which owns the disk
1833 @type as_primary: boolean
1834 @param as_primary: if we should make the block device
1835 read/write
1836
1837 @return: the assembled device or None (in case no device
1838 was assembled)
1839 @raise errors.BlockDeviceError: in case there is an error
1840 during the activation of the children or the device
1841 itself
1842
1843 """
1844 children = []
1845 if disk.children:
1846 mcn = disk.ChildrenNeeded()
1847 if mcn == -1:
1848 mcn = 0
1849 else:
1850 mcn = len(disk.children) - mcn
1851 for chld_disk in disk.children:
1852 try:
1853 cdev = _RecursiveAssembleBD(chld_disk, owner, as_primary)
1854 except errors.BlockDeviceError, err:
1855 if children.count(None) >= mcn:
1856 raise
1857 cdev = None
1858 logging.error("Error in child activation (but continuing): %s",
1859 str(err))
1860 children.append(cdev)
1861
1862 if as_primary or disk.AssembleOnSecondary():
1863 r_dev = bdev.Assemble(disk, children)
1864 result = r_dev
1865 if as_primary or disk.OpenOnSecondary():
1866 r_dev.Open()
1867 DevCacheManager.UpdateCache(r_dev.dev_path, owner,
1868 as_primary, disk.iv_name)
1869
1870 else:
1871 result = True
1872 return result
1873
1876 """Activate a block device for an instance.
1877
1878 This is a wrapper over _RecursiveAssembleBD.
1879
1880 @rtype: str or boolean
1881 @return: a C{/dev/...} path for primary nodes, and
1882 C{True} for secondary nodes
1883
1884 """
1885 try:
1886 result = _RecursiveAssembleBD(disk, owner, as_primary)
1887 if isinstance(result, bdev.BlockDev):
1888
1889 result = result.dev_path
1890 if as_primary:
1891 _SymlinkBlockDev(owner, result, idx)
1892 except errors.BlockDeviceError, err:
1893 _Fail("Error while assembling disk: %s", err, exc=True)
1894 except OSError, err:
1895 _Fail("Error while symlinking disk: %s", err, exc=True)
1896
1897 return result
1898
1901 """Shut down a block device.
1902
1903 First, if the device is assembled (Attach() is successful), then
1904 the device is shutdown. Then the children of the device are
1905 shutdown.
1906
1907 This function is called recursively. Note that we don't cache the
1908 children or such, as oppossed to assemble, shutdown of different
1909 devices doesn't require that the upper device was active.
1910
1911 @type disk: L{objects.Disk}
1912 @param disk: the description of the disk we should
1913 shutdown
1914 @rtype: None
1915
1916 """
1917 msgs = []
1918 r_dev = _RecursiveFindBD(disk)
1919 if r_dev is not None:
1920 r_path = r_dev.dev_path
1921 try:
1922 r_dev.Shutdown()
1923 DevCacheManager.RemoveCache(r_path)
1924 except errors.BlockDeviceError, err:
1925 msgs.append(str(err))
1926
1927 if disk.children:
1928 for child in disk.children:
1929 try:
1930 BlockdevShutdown(child)
1931 except RPCFail, err:
1932 msgs.append(str(err))
1933
1934 if msgs:
1935 _Fail("; ".join(msgs))
1936
1939 """Extend a mirrored block device.
1940
1941 @type parent_cdev: L{objects.Disk}
1942 @param parent_cdev: the disk to which we should add children
1943 @type new_cdevs: list of L{objects.Disk}
1944 @param new_cdevs: the list of children which we should add
1945 @rtype: None
1946
1947 """
1948 parent_bdev = _RecursiveFindBD(parent_cdev)
1949 if parent_bdev is None:
1950 _Fail("Can't find parent device '%s' in add children", parent_cdev)
1951 new_bdevs = [_RecursiveFindBD(disk) for disk in new_cdevs]
1952 if new_bdevs.count(None) > 0:
1953 _Fail("Can't find new device(s) to add: %s:%s", new_bdevs, new_cdevs)
1954 parent_bdev.AddChildren(new_bdevs)
1955
1958 """Shrink a mirrored block device.
1959
1960 @type parent_cdev: L{objects.Disk}
1961 @param parent_cdev: the disk from which we should remove children
1962 @type new_cdevs: list of L{objects.Disk}
1963 @param new_cdevs: the list of children which we should remove
1964 @rtype: None
1965
1966 """
1967 parent_bdev = _RecursiveFindBD(parent_cdev)
1968 if parent_bdev is None:
1969 _Fail("Can't find parent device '%s' in remove children", parent_cdev)
1970 devs = []
1971 for disk in new_cdevs:
1972 rpath = disk.StaticDevPath()
1973 if rpath is None:
1974 bd = _RecursiveFindBD(disk)
1975 if bd is None:
1976 _Fail("Can't find device %s while removing children", disk)
1977 else:
1978 devs.append(bd.dev_path)
1979 else:
1980 if not utils.IsNormAbsPath(rpath):
1981 _Fail("Strange path returned from StaticDevPath: '%s'", rpath)
1982 devs.append(rpath)
1983 parent_bdev.RemoveChildren(devs)
1984
1987 """Get the mirroring status of a list of devices.
1988
1989 @type disks: list of L{objects.Disk}
1990 @param disks: the list of disks which we should query
1991 @rtype: disk
1992 @return: List of L{objects.BlockDevStatus}, one for each disk
1993 @raise errors.BlockDeviceError: if any of the disks cannot be
1994 found
1995
1996 """
1997 stats = []
1998 for dsk in disks:
1999 rbd = _RecursiveFindBD(dsk)
2000 if rbd is None:
2001 _Fail("Can't find device %s", dsk)
2002
2003 stats.append(rbd.CombinedSyncStatus())
2004
2005 return stats
2006
2009 """Get the mirroring status of a list of devices.
2010
2011 @type disks: list of L{objects.Disk}
2012 @param disks: the list of disks which we should query
2013 @rtype: disk
2014 @return: List of tuples, (bool, status), one for each disk; bool denotes
2015 success/failure, status is L{objects.BlockDevStatus} on success, string
2016 otherwise
2017
2018 """
2019 result = []
2020 for disk in disks:
2021 try:
2022 rbd = _RecursiveFindBD(disk)
2023 if rbd is None:
2024 result.append((False, "Can't find device %s" % disk))
2025 continue
2026
2027 status = rbd.CombinedSyncStatus()
2028 except errors.BlockDeviceError, err:
2029 logging.exception("Error while getting disk status")
2030 result.append((False, str(err)))
2031 else:
2032 result.append((True, status))
2033
2034 assert len(disks) == len(result)
2035
2036 return result
2037
2040 """Check if a device is activated.
2041
2042 If so, return information about the real device.
2043
2044 @type disk: L{objects.Disk}
2045 @param disk: the disk object we need to find
2046
2047 @return: None if the device can't be found,
2048 otherwise the device instance
2049
2050 """
2051 children = []
2052 if disk.children:
2053 for chdisk in disk.children:
2054 children.append(_RecursiveFindBD(chdisk))
2055
2056 return bdev.FindDevice(disk, children)
2057
2060 """Opens the underlying block device of a disk.
2061
2062 @type disk: L{objects.Disk}
2063 @param disk: the disk object we want to open
2064
2065 """
2066 real_disk = _RecursiveFindBD(disk)
2067 if real_disk is None:
2068 _Fail("Block device '%s' is not set up", disk)
2069
2070 real_disk.Open()
2071
2072 return real_disk
2073
2076 """Check if a device is activated.
2077
2078 If it is, return information about the real device.
2079
2080 @type disk: L{objects.Disk}
2081 @param disk: the disk to find
2082 @rtype: None or objects.BlockDevStatus
2083 @return: None if the disk cannot be found, otherwise a the current
2084 information
2085
2086 """
2087 try:
2088 rbd = _RecursiveFindBD(disk)
2089 except errors.BlockDeviceError, err:
2090 _Fail("Failed to find device: %s", err, exc=True)
2091
2092 if rbd is None:
2093 return None
2094
2095 return rbd.GetSyncStatus()
2096
2099 """Computes the size of the given disks.
2100
2101 If a disk is not found, returns None instead.
2102
2103 @type disks: list of L{objects.Disk}
2104 @param disks: the list of disk to compute the size for
2105 @rtype: list
2106 @return: list with elements None if the disk cannot be found,
2107 otherwise the size
2108
2109 """
2110 result = []
2111 for cf in disks:
2112 try:
2113 rbd = _RecursiveFindBD(cf)
2114 except errors.BlockDeviceError:
2115 result.append(None)
2116 continue
2117 if rbd is None:
2118 result.append(None)
2119 else:
2120 result.append(rbd.GetActualSize())
2121 return result
2122
2125 """Export a block device to a remote node.
2126
2127 @type disk: L{objects.Disk}
2128 @param disk: the description of the disk to export
2129 @type dest_node: str
2130 @param dest_node: the destination node to export to
2131 @type dest_path: str
2132 @param dest_path: the destination path on the target node
2133 @type cluster_name: str
2134 @param cluster_name: the cluster name, needed for SSH hostalias
2135 @rtype: None
2136
2137 """
2138 real_disk = _OpenRealBD(disk)
2139
2140
2141 expcmd = utils.BuildShellCmd("set -e; set -o pipefail; "
2142 "dd if=%s bs=1048576 count=%s",
2143 real_disk.dev_path, str(disk.size))
2144
2145
2146
2147
2148
2149
2150
2151 destcmd = utils.BuildShellCmd("dd of=%s conv=nocreat,notrunc bs=65536"
2152 " oflag=dsync", dest_path)
2153
2154 remotecmd = _GetSshRunner(cluster_name).BuildCmd(dest_node,
2155 constants.SSH_LOGIN_USER,
2156 destcmd)
2157
2158
2159 command = "|".join([expcmd, utils.ShellQuoteArgs(remotecmd)])
2160
2161 result = utils.RunCmd(["bash", "-c", command])
2162
2163 if result.failed:
2164 _Fail("Disk copy command '%s' returned error: %s"
2165 " output: %s", command, result.fail_reason, result.output)
2166
2167
2168 -def UploadFile(file_name, data, mode, uid, gid, atime, mtime):
2169 """Write a file to the filesystem.
2170
2171 This allows the master to overwrite(!) a file. It will only perform
2172 the operation if the file belongs to a list of configuration files.
2173
2174 @type file_name: str
2175 @param file_name: the target file name
2176 @type data: str
2177 @param data: the new contents of the file
2178 @type mode: int
2179 @param mode: the mode to give the file (can be None)
2180 @type uid: string
2181 @param uid: the owner of the file
2182 @type gid: string
2183 @param gid: the group of the file
2184 @type atime: float
2185 @param atime: the atime to set on the file (can be None)
2186 @type mtime: float
2187 @param mtime: the mtime to set on the file (can be None)
2188 @rtype: None
2189
2190 """
2191 file_name = vcluster.LocalizeVirtualPath(file_name)
2192
2193 if not os.path.isabs(file_name):
2194 _Fail("Filename passed to UploadFile is not absolute: '%s'", file_name)
2195
2196 if file_name not in _ALLOWED_UPLOAD_FILES:
2197 _Fail("Filename passed to UploadFile not in allowed upload targets: '%s'",
2198 file_name)
2199
2200 raw_data = _Decompress(data)
2201
2202 if not (isinstance(uid, basestring) and isinstance(gid, basestring)):
2203 _Fail("Invalid username/groupname type")
2204
2205 getents = runtime.GetEnts()
2206 uid = getents.LookupUser(uid)
2207 gid = getents.LookupGroup(gid)
2208
2209 utils.SafeWriteFile(file_name, None,
2210 data=raw_data, mode=mode, uid=uid, gid=gid,
2211 atime=atime, mtime=mtime)
2212
2213
2214 -def RunOob(oob_program, command, node, timeout):
2215 """Executes oob_program with given command on given node.
2216
2217 @param oob_program: The path to the executable oob_program
2218 @param command: The command to invoke on oob_program
2219 @param node: The node given as an argument to the program
2220 @param timeout: Timeout after which we kill the oob program
2221
2222 @return: stdout
2223 @raise RPCFail: If execution fails for some reason
2224
2225 """
2226 result = utils.RunCmd([oob_program, command, node], timeout=timeout)
2227
2228 if result.failed:
2229 _Fail("'%s' failed with reason '%s'; output: %s", result.cmd,
2230 result.fail_reason, result.output)
2231
2232 return result.stdout
2233
2236 """Compute and return the API version of a given OS.
2237
2238 This function will try to read the API version of the OS residing in
2239 the 'os_dir' directory.
2240
2241 @type os_dir: str
2242 @param os_dir: the directory in which we should look for the OS
2243 @rtype: tuple
2244 @return: tuple (status, data) with status denoting the validity and
2245 data holding either the vaid versions or an error message
2246
2247 """
2248 api_file = utils.PathJoin(os_dir, constants.OS_API_FILE)
2249
2250 try:
2251 st = os.stat(api_file)
2252 except EnvironmentError, err:
2253 return False, ("Required file '%s' not found under path %s: %s" %
2254 (constants.OS_API_FILE, os_dir, utils.ErrnoOrStr(err)))
2255
2256 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
2257 return False, ("File '%s' in %s is not a regular file" %
2258 (constants.OS_API_FILE, os_dir))
2259
2260 try:
2261 api_versions = utils.ReadFile(api_file).splitlines()
2262 except EnvironmentError, err:
2263 return False, ("Error while reading the API version file at %s: %s" %
2264 (api_file, utils.ErrnoOrStr(err)))
2265
2266 try:
2267 api_versions = [int(version.strip()) for version in api_versions]
2268 except (TypeError, ValueError), err:
2269 return False, ("API version(s) can't be converted to integer: %s" %
2270 str(err))
2271
2272 return True, api_versions
2273
2276 """Compute the validity for all OSes.
2277
2278 @type top_dirs: list
2279 @param top_dirs: the list of directories in which to
2280 search (if not given defaults to
2281 L{pathutils.OS_SEARCH_PATH})
2282 @rtype: list of L{objects.OS}
2283 @return: a list of tuples (name, path, status, diagnose, variants,
2284 parameters, api_version) for all (potential) OSes under all
2285 search paths, where:
2286 - name is the (potential) OS name
2287 - path is the full path to the OS
2288 - status True/False is the validity of the OS
2289 - diagnose is the error message for an invalid OS, otherwise empty
2290 - variants is a list of supported OS variants, if any
2291 - parameters is a list of (name, help) parameters, if any
2292 - api_version is a list of support OS API versions
2293
2294 """
2295 if top_dirs is None:
2296 top_dirs = pathutils.OS_SEARCH_PATH
2297
2298 result = []
2299 for dir_name in top_dirs:
2300 if os.path.isdir(dir_name):
2301 try:
2302 f_names = utils.ListVisibleFiles(dir_name)
2303 except EnvironmentError, err:
2304 logging.exception("Can't list the OS directory %s: %s", dir_name, err)
2305 break
2306 for name in f_names:
2307 os_path = utils.PathJoin(dir_name, name)
2308 status, os_inst = _TryOSFromDisk(name, base_dir=dir_name)
2309 if status:
2310 diagnose = ""
2311 variants = os_inst.supported_variants
2312 parameters = os_inst.supported_parameters
2313 api_versions = os_inst.api_versions
2314 else:
2315 diagnose = os_inst
2316 variants = parameters = api_versions = []
2317 result.append((name, os_path, status, diagnose, variants,
2318 parameters, api_versions))
2319
2320 return result
2321
2324 """Create an OS instance from disk.
2325
2326 This function will return an OS instance if the given name is a
2327 valid OS name.
2328
2329 @type base_dir: string
2330 @keyword base_dir: Base directory containing OS installations.
2331 Defaults to a search in all the OS_SEARCH_PATH dirs.
2332 @rtype: tuple
2333 @return: success and either the OS instance if we find a valid one,
2334 or error message
2335
2336 """
2337 if base_dir is None:
2338 os_dir = utils.FindFile(name, pathutils.OS_SEARCH_PATH, os.path.isdir)
2339 else:
2340 os_dir = utils.FindFile(name, [base_dir], os.path.isdir)
2341
2342 if os_dir is None:
2343 return False, "Directory for OS %s not found in search path" % name
2344
2345 status, api_versions = _OSOndiskAPIVersion(os_dir)
2346 if not status:
2347
2348 return status, api_versions
2349
2350 if not constants.OS_API_VERSIONS.intersection(api_versions):
2351 return False, ("API version mismatch for path '%s': found %s, want %s." %
2352 (os_dir, api_versions, constants.OS_API_VERSIONS))
2353
2354
2355
2356
2357 os_files = dict.fromkeys(constants.OS_SCRIPTS, True)
2358
2359 if max(api_versions) >= constants.OS_API_V15:
2360 os_files[constants.OS_VARIANTS_FILE] = False
2361
2362 if max(api_versions) >= constants.OS_API_V20:
2363 os_files[constants.OS_PARAMETERS_FILE] = True
2364 else:
2365 del os_files[constants.OS_SCRIPT_VERIFY]
2366
2367 for (filename, required) in os_files.items():
2368 os_files[filename] = utils.PathJoin(os_dir, filename)
2369
2370 try:
2371 st = os.stat(os_files[filename])
2372 except EnvironmentError, err:
2373 if err.errno == errno.ENOENT and not required:
2374 del os_files[filename]
2375 continue
2376 return False, ("File '%s' under path '%s' is missing (%s)" %
2377 (filename, os_dir, utils.ErrnoOrStr(err)))
2378
2379 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
2380 return False, ("File '%s' under path '%s' is not a regular file" %
2381 (filename, os_dir))
2382
2383 if filename in constants.OS_SCRIPTS:
2384 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
2385 return False, ("File '%s' under path '%s' is not executable" %
2386 (filename, os_dir))
2387
2388 variants = []
2389 if constants.OS_VARIANTS_FILE in os_files:
2390 variants_file = os_files[constants.OS_VARIANTS_FILE]
2391 try:
2392 variants = \
2393 utils.FilterEmptyLinesAndComments(utils.ReadFile(variants_file))
2394 except EnvironmentError, err:
2395
2396 if err.errno != errno.ENOENT:
2397 return False, ("Error while reading the OS variants file at %s: %s" %
2398 (variants_file, utils.ErrnoOrStr(err)))
2399
2400 parameters = []
2401 if constants.OS_PARAMETERS_FILE in os_files:
2402 parameters_file = os_files[constants.OS_PARAMETERS_FILE]
2403 try:
2404 parameters = utils.ReadFile(parameters_file).splitlines()
2405 except EnvironmentError, err:
2406 return False, ("Error while reading the OS parameters file at %s: %s" %
2407 (parameters_file, utils.ErrnoOrStr(err)))
2408 parameters = [v.split(None, 1) for v in parameters]
2409
2410 os_obj = objects.OS(name=name, path=os_dir,
2411 create_script=os_files[constants.OS_SCRIPT_CREATE],
2412 export_script=os_files[constants.OS_SCRIPT_EXPORT],
2413 import_script=os_files[constants.OS_SCRIPT_IMPORT],
2414 rename_script=os_files[constants.OS_SCRIPT_RENAME],
2415 verify_script=os_files.get(constants.OS_SCRIPT_VERIFY,
2416 None),
2417 supported_variants=variants,
2418 supported_parameters=parameters,
2419 api_versions=api_versions)
2420 return True, os_obj
2421
2424 """Create an OS instance from disk.
2425
2426 This function will return an OS instance if the given name is a
2427 valid OS name. Otherwise, it will raise an appropriate
2428 L{RPCFail} exception, detailing why this is not a valid OS.
2429
2430 This is just a wrapper over L{_TryOSFromDisk}, which doesn't raise
2431 an exception but returns true/false status data.
2432
2433 @type base_dir: string
2434 @keyword base_dir: Base directory containing OS installations.
2435 Defaults to a search in all the OS_SEARCH_PATH dirs.
2436 @rtype: L{objects.OS}
2437 @return: the OS instance if we find a valid one
2438 @raise RPCFail: if we don't find a valid OS
2439
2440 """
2441 name_only = objects.OS.GetName(name)
2442 status, payload = _TryOSFromDisk(name_only, base_dir)
2443
2444 if not status:
2445 _Fail(payload)
2446
2447 return payload
2448
2449
2450 -def OSCoreEnv(os_name, inst_os, os_params, debug=0):
2451 """Calculate the basic environment for an os script.
2452
2453 @type os_name: str
2454 @param os_name: full operating system name (including variant)
2455 @type inst_os: L{objects.OS}
2456 @param inst_os: operating system for which the environment is being built
2457 @type os_params: dict
2458 @param os_params: the OS parameters
2459 @type debug: integer
2460 @param debug: debug level (0 or 1, for OS Api 10)
2461 @rtype: dict
2462 @return: dict of environment variables
2463 @raise errors.BlockDeviceError: if the block device
2464 cannot be found
2465
2466 """
2467 result = {}
2468 api_version = \
2469 max(constants.OS_API_VERSIONS.intersection(inst_os.api_versions))
2470 result["OS_API_VERSION"] = "%d" % api_version
2471 result["OS_NAME"] = inst_os.name
2472 result["DEBUG_LEVEL"] = "%d" % debug
2473
2474
2475 if api_version >= constants.OS_API_V15 and inst_os.supported_variants:
2476 variant = objects.OS.GetVariant(os_name)
2477 if not variant:
2478 variant = inst_os.supported_variants[0]
2479 else:
2480 variant = ""
2481 result["OS_VARIANT"] = variant
2482
2483
2484 for pname, pvalue in os_params.items():
2485 result["OSP_%s" % pname.upper()] = pvalue
2486
2487
2488
2489
2490 result["PATH"] = constants.HOOKS_PATH
2491
2492 return result
2493
2496 """Calculate the environment for an os script.
2497
2498 @type instance: L{objects.Instance}
2499 @param instance: target instance for the os script run
2500 @type inst_os: L{objects.OS}
2501 @param inst_os: operating system for which the environment is being built
2502 @type debug: integer
2503 @param debug: debug level (0 or 1, for OS Api 10)
2504 @rtype: dict
2505 @return: dict of environment variables
2506 @raise errors.BlockDeviceError: if the block device
2507 cannot be found
2508
2509 """
2510 result = OSCoreEnv(instance.os, inst_os, instance.osparams, debug=debug)
2511
2512 for attr in ["name", "os", "uuid", "ctime", "mtime", "primary_node"]:
2513 result["INSTANCE_%s" % attr.upper()] = str(getattr(instance, attr))
2514
2515 result["HYPERVISOR"] = instance.hypervisor
2516 result["DISK_COUNT"] = "%d" % len(instance.disks)
2517 result["NIC_COUNT"] = "%d" % len(instance.nics)
2518 result["INSTANCE_SECONDARY_NODES"] = \
2519 ("%s" % " ".join(instance.secondary_nodes))
2520
2521
2522 for idx, disk in enumerate(instance.disks):
2523 real_disk = _OpenRealBD(disk)
2524 result["DISK_%d_PATH" % idx] = real_disk.dev_path
2525 result["DISK_%d_ACCESS" % idx] = disk.mode
2526 result["DISK_%d_UUID" % idx] = disk.uuid
2527 if disk.name:
2528 result["DISK_%d_NAME" % idx] = disk.name
2529 if constants.HV_DISK_TYPE in instance.hvparams:
2530 result["DISK_%d_FRONTEND_TYPE" % idx] = \
2531 instance.hvparams[constants.HV_DISK_TYPE]
2532 if disk.dev_type in constants.LDS_BLOCK:
2533 result["DISK_%d_BACKEND_TYPE" % idx] = "block"
2534 elif disk.dev_type == constants.LD_FILE:
2535 result["DISK_%d_BACKEND_TYPE" % idx] = \
2536 "file:%s" % disk.physical_id[0]
2537
2538
2539 for idx, nic in enumerate(instance.nics):
2540 result["NIC_%d_MAC" % idx] = nic.mac
2541 result["NIC_%d_UUID" % idx] = nic.uuid
2542 if nic.name:
2543 result["NIC_%d_NAME" % idx] = nic.name
2544 if nic.ip:
2545 result["NIC_%d_IP" % idx] = nic.ip
2546 result["NIC_%d_MODE" % idx] = nic.nicparams[constants.NIC_MODE]
2547 if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2548 result["NIC_%d_BRIDGE" % idx] = nic.nicparams[constants.NIC_LINK]
2549 if nic.nicparams[constants.NIC_LINK]:
2550 result["NIC_%d_LINK" % idx] = nic.nicparams[constants.NIC_LINK]
2551 if nic.netinfo:
2552 nobj = objects.Network.FromDict(nic.netinfo)
2553 result.update(nobj.HooksDict("NIC_%d_" % idx))
2554 if constants.HV_NIC_TYPE in instance.hvparams:
2555 result["NIC_%d_FRONTEND_TYPE" % idx] = \
2556 instance.hvparams[constants.HV_NIC_TYPE]
2557
2558
2559 for source, kind in [(instance.beparams, "BE"), (instance.hvparams, "HV")]:
2560 for key, value in source.items():
2561 result["INSTANCE_%s_%s" % (kind, key)] = str(value)
2562
2563 return result
2564
2567 """Compute the validity for all ExtStorage Providers.
2568
2569 @type top_dirs: list
2570 @param top_dirs: the list of directories in which to
2571 search (if not given defaults to
2572 L{pathutils.ES_SEARCH_PATH})
2573 @rtype: list of L{objects.ExtStorage}
2574 @return: a list of tuples (name, path, status, diagnose, parameters)
2575 for all (potential) ExtStorage Providers under all
2576 search paths, where:
2577 - name is the (potential) ExtStorage Provider
2578 - path is the full path to the ExtStorage Provider
2579 - status True/False is the validity of the ExtStorage Provider
2580 - diagnose is the error message for an invalid ExtStorage Provider,
2581 otherwise empty
2582 - parameters is a list of (name, help) parameters, if any
2583
2584 """
2585 if top_dirs is None:
2586 top_dirs = pathutils.ES_SEARCH_PATH
2587
2588 result = []
2589 for dir_name in top_dirs:
2590 if os.path.isdir(dir_name):
2591 try:
2592 f_names = utils.ListVisibleFiles(dir_name)
2593 except EnvironmentError, err:
2594 logging.exception("Can't list the ExtStorage directory %s: %s",
2595 dir_name, err)
2596 break
2597 for name in f_names:
2598 es_path = utils.PathJoin(dir_name, name)
2599 status, es_inst = bdev.ExtStorageFromDisk(name, base_dir=dir_name)
2600 if status:
2601 diagnose = ""
2602 parameters = es_inst.supported_parameters
2603 else:
2604 diagnose = es_inst
2605 parameters = []
2606 result.append((name, es_path, status, diagnose, parameters))
2607
2608 return result
2609
2612 """Grow a stack of block devices.
2613
2614 This function is called recursively, with the childrens being the
2615 first ones to resize.
2616
2617 @type disk: L{objects.Disk}
2618 @param disk: the disk to be grown
2619 @type amount: integer
2620 @param amount: the amount (in mebibytes) to grow with
2621 @type dryrun: boolean
2622 @param dryrun: whether to execute the operation in simulation mode
2623 only, without actually increasing the size
2624 @param backingstore: whether to execute the operation on backing storage
2625 only, or on "logical" storage only; e.g. DRBD is logical storage,
2626 whereas LVM, file, RBD are backing storage
2627 @rtype: (status, result)
2628 @return: a tuple with the status of the operation (True/False), and
2629 the errors message if status is False
2630
2631 """
2632 r_dev = _RecursiveFindBD(disk)
2633 if r_dev is None:
2634 _Fail("Cannot find block device %s", disk)
2635
2636 try:
2637 r_dev.Grow(amount, dryrun, backingstore)
2638 except errors.BlockDeviceError, err:
2639 _Fail("Failed to grow block device: %s", err, exc=True)
2640
2643 """Create a snapshot copy of a block device.
2644
2645 This function is called recursively, and the snapshot is actually created
2646 just for the leaf lvm backend device.
2647
2648 @type disk: L{objects.Disk}
2649 @param disk: the disk to be snapshotted
2650 @rtype: string
2651 @return: snapshot disk ID as (vg, lv)
2652
2653 """
2654 if disk.dev_type == constants.LD_DRBD8:
2655 if not disk.children:
2656 _Fail("DRBD device '%s' without backing storage cannot be snapshotted",
2657 disk.unique_id)
2658 return BlockdevSnapshot(disk.children[0])
2659 elif disk.dev_type == constants.LD_LV:
2660 r_dev = _RecursiveFindBD(disk)
2661 if r_dev is not None:
2662
2663
2664 return r_dev.Snapshot(disk.size)
2665 else:
2666 _Fail("Cannot find block device %s", disk)
2667 else:
2668 _Fail("Cannot snapshot non-lvm block device '%s' of type '%s'",
2669 disk.unique_id, disk.dev_type)
2670
2673 """Sets 'metadata' information on block devices.
2674
2675 This function sets 'info' metadata on block devices. Initial
2676 information is set at device creation; this function should be used
2677 for example after renames.
2678
2679 @type disk: L{objects.Disk}
2680 @param disk: the disk to be grown
2681 @type info: string
2682 @param info: new 'info' metadata
2683 @rtype: (status, result)
2684 @return: a tuple with the status of the operation (True/False), and
2685 the errors message if status is False
2686
2687 """
2688 r_dev = _RecursiveFindBD(disk)
2689 if r_dev is None:
2690 _Fail("Cannot find block device %s", disk)
2691
2692 try:
2693 r_dev.SetInfo(info)
2694 except errors.BlockDeviceError, err:
2695 _Fail("Failed to set information on block device: %s", err, exc=True)
2696
2699 """Write out the export configuration information.
2700
2701 @type instance: L{objects.Instance}
2702 @param instance: the instance which we export, used for
2703 saving configuration
2704 @type snap_disks: list of L{objects.Disk}
2705 @param snap_disks: list of snapshot block devices, which
2706 will be used to get the actual name of the dump file
2707
2708 @rtype: None
2709
2710 """
2711 destdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name + ".new")
2712 finaldestdir = utils.PathJoin(pathutils.EXPORT_DIR, instance.name)
2713
2714 config = objects.SerializableConfigParser()
2715
2716 config.add_section(constants.INISECT_EXP)
2717 config.set(constants.INISECT_EXP, "version", "0")
2718 config.set(constants.INISECT_EXP, "timestamp", "%d" % int(time.time()))
2719 config.set(constants.INISECT_EXP, "source", instance.primary_node)
2720 config.set(constants.INISECT_EXP, "os", instance.os)
2721 config.set(constants.INISECT_EXP, "compression", "none")
2722
2723 config.add_section(constants.INISECT_INS)
2724 config.set(constants.INISECT_INS, "name", instance.name)
2725 config.set(constants.INISECT_INS, "maxmem", "%d" %
2726 instance.beparams[constants.BE_MAXMEM])
2727 config.set(constants.INISECT_INS, "minmem", "%d" %
2728 instance.beparams[constants.BE_MINMEM])
2729
2730 config.set(constants.INISECT_INS, "memory", "%d" %
2731 instance.beparams[constants.BE_MAXMEM])
2732 config.set(constants.INISECT_INS, "vcpus", "%d" %
2733 instance.beparams[constants.BE_VCPUS])
2734 config.set(constants.INISECT_INS, "disk_template", instance.disk_template)
2735 config.set(constants.INISECT_INS, "hypervisor", instance.hypervisor)
2736 config.set(constants.INISECT_INS, "tags", " ".join(instance.GetTags()))
2737
2738 nic_total = 0
2739 for nic_count, nic in enumerate(instance.nics):
2740 nic_total += 1
2741 config.set(constants.INISECT_INS, "nic%d_mac" %
2742 nic_count, "%s" % nic.mac)
2743 config.set(constants.INISECT_INS, "nic%d_ip" % nic_count, "%s" % nic.ip)
2744 config.set(constants.INISECT_INS, "nic%d_network" % nic_count,
2745 "%s" % nic.network)
2746 config.set(constants.INISECT_INS, "nic%d_name" % nic_count,
2747 "%s" % nic.name)
2748 for param in constants.NICS_PARAMETER_TYPES:
2749 config.set(constants.INISECT_INS, "nic%d_%s" % (nic_count, param),
2750 "%s" % nic.nicparams.get(param, None))
2751
2752 config.set(constants.INISECT_INS, "nic_count", "%d" % nic_total)
2753
2754 disk_total = 0
2755 for disk_count, disk in enumerate(snap_disks):
2756 if disk:
2757 disk_total += 1
2758 config.set(constants.INISECT_INS, "disk%d_ivname" % disk_count,
2759 ("%s" % disk.iv_name))
2760 config.set(constants.INISECT_INS, "disk%d_dump" % disk_count,
2761 ("%s" % disk.physical_id[1]))
2762 config.set(constants.INISECT_INS, "disk%d_size" % disk_count,
2763 ("%d" % disk.size))
2764 config.set(constants.INISECT_INS, "disk%d_name" % disk_count,
2765 "%s" % disk.name)
2766
2767 config.set(constants.INISECT_INS, "disk_count", "%d" % disk_total)
2768
2769
2770
2771 config.add_section(constants.INISECT_HYP)
2772 for name, value in instance.hvparams.items():
2773 if name not in constants.HVC_GLOBALS:
2774 config.set(constants.INISECT_HYP, name, str(value))
2775
2776 config.add_section(constants.INISECT_BEP)
2777 for name, value in instance.beparams.items():
2778 config.set(constants.INISECT_BEP, name, str(value))
2779
2780 config.add_section(constants.INISECT_OSP)
2781 for name, value in instance.osparams.items():
2782 config.set(constants.INISECT_OSP, name, str(value))
2783
2784 utils.WriteFile(utils.PathJoin(destdir, constants.EXPORT_CONF_FILE),
2785 data=config.Dumps())
2786 shutil.rmtree(finaldestdir, ignore_errors=True)
2787 shutil.move(destdir, finaldestdir)
2788
2811
2814 """Return a list of exports currently available on this machine.
2815
2816 @rtype: list
2817 @return: list of the exports
2818
2819 """
2820 if os.path.isdir(pathutils.EXPORT_DIR):
2821 return sorted(utils.ListVisibleFiles(pathutils.EXPORT_DIR))
2822 else:
2823 _Fail("No exports directory")
2824
2827 """Remove an existing export from the node.
2828
2829 @type export: str
2830 @param export: the name of the export to remove
2831 @rtype: None
2832
2833 """
2834 target = utils.PathJoin(pathutils.EXPORT_DIR, export)
2835
2836 try:
2837 shutil.rmtree(target)
2838 except EnvironmentError, err:
2839 _Fail("Error while removing the export: %s", err, exc=True)
2840
2843 """Rename a list of block devices.
2844
2845 @type devlist: list of tuples
2846 @param devlist: list of tuples of the form (disk,
2847 new_logical_id, new_physical_id); disk is an
2848 L{objects.Disk} object describing the current disk,
2849 and new logical_id/physical_id is the name we
2850 rename it to
2851 @rtype: boolean
2852 @return: True if all renames succeeded, False otherwise
2853
2854 """
2855 msgs = []
2856 result = True
2857 for disk, unique_id in devlist:
2858 dev = _RecursiveFindBD(disk)
2859 if dev is None:
2860 msgs.append("Can't find device %s in rename" % str(disk))
2861 result = False
2862 continue
2863 try:
2864 old_rpath = dev.dev_path
2865 dev.Rename(unique_id)
2866 new_rpath = dev.dev_path
2867 if old_rpath != new_rpath:
2868 DevCacheManager.RemoveCache(old_rpath)
2869
2870
2871
2872
2873
2874 except errors.BlockDeviceError, err:
2875 msgs.append("Can't rename device '%s' to '%s': %s" %
2876 (dev, unique_id, err))
2877 logging.exception("Can't rename device '%s' to '%s'", dev, unique_id)
2878 result = False
2879 if not result:
2880 _Fail("; ".join(msgs))
2881
2903
2906 """Create file storage directory.
2907
2908 @type file_storage_dir: str
2909 @param file_storage_dir: directory to create
2910
2911 @rtype: tuple
2912 @return: tuple with first element a boolean indicating wheter dir
2913 creation was successful or not
2914
2915 """
2916 file_storage_dir = _TransformFileStorageDir(file_storage_dir)
2917 if os.path.exists(file_storage_dir):
2918 if not os.path.isdir(file_storage_dir):
2919 _Fail("Specified storage dir '%s' is not a directory",
2920 file_storage_dir)
2921 else:
2922 try:
2923 os.makedirs(file_storage_dir, 0750)
2924 except OSError, err:
2925 _Fail("Cannot create file storage directory '%s': %s",
2926 file_storage_dir, err, exc=True)
2927
2930 """Remove file storage directory.
2931
2932 Remove it only if it's empty. If not log an error and return.
2933
2934 @type file_storage_dir: str
2935 @param file_storage_dir: the directory we should cleanup
2936 @rtype: tuple (success,)
2937 @return: tuple of one element, C{success}, denoting
2938 whether the operation was successful
2939
2940 """
2941 file_storage_dir = _TransformFileStorageDir(file_storage_dir)
2942 if os.path.exists(file_storage_dir):
2943 if not os.path.isdir(file_storage_dir):
2944 _Fail("Specified Storage directory '%s' is not a directory",
2945 file_storage_dir)
2946
2947 try:
2948 os.rmdir(file_storage_dir)
2949 except OSError, err:
2950 _Fail("Cannot remove file storage directory '%s': %s",
2951 file_storage_dir, err)
2952
2955 """Rename the file storage directory.
2956
2957 @type old_file_storage_dir: str
2958 @param old_file_storage_dir: the current path
2959 @type new_file_storage_dir: str
2960 @param new_file_storage_dir: the name we should rename to
2961 @rtype: tuple (success,)
2962 @return: tuple of one element, C{success}, denoting
2963 whether the operation was successful
2964
2965 """
2966 old_file_storage_dir = _TransformFileStorageDir(old_file_storage_dir)
2967 new_file_storage_dir = _TransformFileStorageDir(new_file_storage_dir)
2968 if not os.path.exists(new_file_storage_dir):
2969 if os.path.isdir(old_file_storage_dir):
2970 try:
2971 os.rename(old_file_storage_dir, new_file_storage_dir)
2972 except OSError, err:
2973 _Fail("Cannot rename '%s' to '%s': %s",
2974 old_file_storage_dir, new_file_storage_dir, err)
2975 else:
2976 _Fail("Specified storage dir '%s' is not a directory",
2977 old_file_storage_dir)
2978 else:
2979 if os.path.exists(old_file_storage_dir):
2980 _Fail("Cannot rename '%s' to '%s': both locations exist",
2981 old_file_storage_dir, new_file_storage_dir)
2982
2985 """Checks whether the given filename is in the queue directory.
2986
2987 @type file_name: str
2988 @param file_name: the file name we should check
2989 @rtype: None
2990 @raises RPCFail: if the file is not valid
2991
2992 """
2993 if not utils.IsBelowDir(pathutils.QUEUE_DIR, file_name):
2994 _Fail("Passed job queue file '%s' does not belong to"
2995 " the queue directory '%s'", file_name, pathutils.QUEUE_DIR)
2996
2999 """Updates a file in the queue directory.
3000
3001 This is just a wrapper over L{utils.io.WriteFile}, with proper
3002 checking.
3003
3004 @type file_name: str
3005 @param file_name: the job file name
3006 @type content: str
3007 @param content: the new job contents
3008 @rtype: boolean
3009 @return: the success of the operation
3010
3011 """
3012 file_name = vcluster.LocalizeVirtualPath(file_name)
3013
3014 _EnsureJobQueueFile(file_name)
3015 getents = runtime.GetEnts()
3016
3017
3018 utils.WriteFile(file_name, data=_Decompress(content), uid=getents.masterd_uid,
3019 gid=getents.daemons_gid, mode=constants.JOB_QUEUE_FILES_PERMS)
3020
3023 """Renames a job queue file.
3024
3025 This is just a wrapper over os.rename with proper checking.
3026
3027 @type old: str
3028 @param old: the old (actual) file name
3029 @type new: str
3030 @param new: the desired file name
3031 @rtype: tuple
3032 @return: the success of the operation and payload
3033
3034 """
3035 old = vcluster.LocalizeVirtualPath(old)
3036 new = vcluster.LocalizeVirtualPath(new)
3037
3038 _EnsureJobQueueFile(old)
3039 _EnsureJobQueueFile(new)
3040
3041 getents = runtime.GetEnts()
3042
3043 utils.RenameFile(old, new, mkdir=True, mkdir_mode=0750,
3044 dir_uid=getents.masterd_uid, dir_gid=getents.daemons_gid)
3045
3048 """Closes the given block devices.
3049
3050 This means they will be switched to secondary mode (in case of
3051 DRBD).
3052
3053 @param instance_name: if the argument is not empty, the symlinks
3054 of this instance will be removed
3055 @type disks: list of L{objects.Disk}
3056 @param disks: the list of disks to be closed
3057 @rtype: tuple (success, message)
3058 @return: a tuple of success and message, where success
3059 indicates the succes of the operation, and message
3060 which will contain the error details in case we
3061 failed
3062
3063 """
3064 bdevs = []
3065 for cf in disks:
3066 rd = _RecursiveFindBD(cf)
3067 if rd is None:
3068 _Fail("Can't find device %s", cf)
3069 bdevs.append(rd)
3070
3071 msg = []
3072 for rd in bdevs:
3073 try:
3074 rd.Close()
3075 except errors.BlockDeviceError, err:
3076 msg.append(str(err))
3077 if msg:
3078 _Fail("Can't make devices secondary: %s", ",".join(msg))
3079 else:
3080 if instance_name:
3081 _RemoveBlockDevLinks(instance_name, disks)
3082
3085 """Validates the given hypervisor parameters.
3086
3087 @type hvname: string
3088 @param hvname: the hypervisor name
3089 @type hvparams: dict
3090 @param hvparams: the hypervisor parameters to be validated
3091 @rtype: None
3092
3093 """
3094 try:
3095 hv_type = hypervisor.GetHypervisor(hvname)
3096 hv_type.ValidateParameters(hvparams)
3097 except errors.HypervisorError, err:
3098 _Fail(str(err), log=False)
3099
3102 """Check whether a list of parameters is supported by the OS.
3103
3104 @type os_obj: L{objects.OS}
3105 @param os_obj: OS object to check
3106 @type parameters: list
3107 @param parameters: the list of parameters to check
3108
3109 """
3110 supported = [v[0] for v in os_obj.supported_parameters]
3111 delta = frozenset(parameters).difference(supported)
3112 if delta:
3113 _Fail("The following parameters are not supported"
3114 " by the OS %s: %s" % (os_obj.name, utils.CommaJoin(delta)))
3115
3116
3117 -def ValidateOS(required, osname, checks, osparams):
3118 """Validate the given OS' parameters.
3119
3120 @type required: boolean
3121 @param required: whether absence of the OS should translate into
3122 failure or not
3123 @type osname: string
3124 @param osname: the OS to be validated
3125 @type checks: list
3126 @param checks: list of the checks to run (currently only 'parameters')
3127 @type osparams: dict
3128 @param osparams: dictionary with OS parameters
3129 @rtype: boolean
3130 @return: True if the validation passed, or False if the OS was not
3131 found and L{required} was false
3132
3133 """
3134 if not constants.OS_VALIDATE_CALLS.issuperset(checks):
3135 _Fail("Unknown checks required for OS %s: %s", osname,
3136 set(checks).difference(constants.OS_VALIDATE_CALLS))
3137
3138 name_only = objects.OS.GetName(osname)
3139 status, tbv = _TryOSFromDisk(name_only, None)
3140
3141 if not status:
3142 if required:
3143 _Fail(tbv)
3144 else:
3145 return False
3146
3147 if max(tbv.api_versions) < constants.OS_API_V20:
3148 return True
3149
3150 if constants.OS_VALIDATE_PARAMETERS in checks:
3151 _CheckOSPList(tbv, osparams.keys())
3152
3153 validate_env = OSCoreEnv(osname, tbv, osparams)
3154 result = utils.RunCmd([tbv.verify_script] + checks, env=validate_env,
3155 cwd=tbv.path, reset_env=True)
3156 if result.failed:
3157 logging.error("os validate command '%s' returned error: %s output: %s",
3158 result.cmd, result.fail_reason, result.output)
3159 _Fail("OS validation script failed (%s), output: %s",
3160 result.fail_reason, result.output, log=False)
3161
3162 return True
3163
3186
3195
3198 """Creates a new X509 certificate for SSL/TLS.
3199
3200 @type validity: int
3201 @param validity: Validity in seconds
3202 @rtype: tuple; (string, string)
3203 @return: Certificate name and public part
3204
3205 """
3206 (key_pem, cert_pem) = \
3207 utils.GenerateSelfSignedX509Cert(netutils.Hostname.GetSysName(),
3208 min(validity, _MAX_SSL_CERT_VALIDITY))
3209
3210 cert_dir = tempfile.mkdtemp(dir=cryptodir,
3211 prefix="x509-%s-" % utils.TimestampForFilename())
3212 try:
3213 name = os.path.basename(cert_dir)
3214 assert len(name) > 5
3215
3216 (_, key_file, cert_file) = _GetX509Filenames(cryptodir, name)
3217
3218 utils.WriteFile(key_file, mode=0400, data=key_pem)
3219 utils.WriteFile(cert_file, mode=0400, data=cert_pem)
3220
3221
3222 return (name, cert_pem)
3223 except Exception:
3224 shutil.rmtree(cert_dir, ignore_errors=True)
3225 raise
3226
3229 """Removes a X509 certificate.
3230
3231 @type name: string
3232 @param name: Certificate name
3233
3234 """
3235 (cert_dir, key_file, cert_file) = _GetX509Filenames(cryptodir, name)
3236
3237 utils.RemoveFile(key_file)
3238 utils.RemoveFile(cert_file)
3239
3240 try:
3241 os.rmdir(cert_dir)
3242 except EnvironmentError, err:
3243 _Fail("Cannot remove certificate directory '%s': %s",
3244 cert_dir, err)
3245
3248 """Returns the command for the requested input/output.
3249
3250 @type instance: L{objects.Instance}
3251 @param instance: The instance object
3252 @param mode: Import/export mode
3253 @param ieio: Input/output type
3254 @param ieargs: Input/output arguments
3255
3256 """
3257 assert mode in (constants.IEM_IMPORT, constants.IEM_EXPORT)
3258
3259 env = None
3260 prefix = None
3261 suffix = None
3262 exp_size = None
3263
3264 if ieio == constants.IEIO_FILE:
3265 (filename, ) = ieargs
3266
3267 if not utils.IsNormAbsPath(filename):
3268 _Fail("Path '%s' is not normalized or absolute", filename)
3269
3270 real_filename = os.path.realpath(filename)
3271 directory = os.path.dirname(real_filename)
3272
3273 if not utils.IsBelowDir(pathutils.EXPORT_DIR, real_filename):
3274 _Fail("File '%s' is not under exports directory '%s': %s",
3275 filename, pathutils.EXPORT_DIR, real_filename)
3276
3277
3278 utils.Makedirs(directory, mode=0750)
3279
3280 quoted_filename = utils.ShellQuote(filename)
3281
3282 if mode == constants.IEM_IMPORT:
3283 suffix = "> %s" % quoted_filename
3284 elif mode == constants.IEM_EXPORT:
3285 suffix = "< %s" % quoted_filename
3286
3287
3288 try:
3289 st = os.stat(filename)
3290 except EnvironmentError, err:
3291 logging.error("Can't stat(2) %s: %s", filename, err)
3292 else:
3293 exp_size = utils.BytesToMebibyte(st.st_size)
3294
3295 elif ieio == constants.IEIO_RAW_DISK:
3296 (disk, ) = ieargs
3297
3298 real_disk = _OpenRealBD(disk)
3299
3300 if mode == constants.IEM_IMPORT:
3301
3302
3303
3304
3305
3306
3307 suffix = utils.BuildShellCmd(("| dd of=%s conv=nocreat,notrunc"
3308 " bs=%s oflag=dsync"),
3309 real_disk.dev_path,
3310 str(64 * 1024))
3311
3312 elif mode == constants.IEM_EXPORT:
3313
3314 prefix = utils.BuildShellCmd("dd if=%s bs=%s count=%s |",
3315 real_disk.dev_path,
3316 str(1024 * 1024),
3317 str(disk.size))
3318 exp_size = disk.size
3319
3320 elif ieio == constants.IEIO_SCRIPT:
3321 (disk, disk_index, ) = ieargs
3322
3323 assert isinstance(disk_index, (int, long))
3324
3325 real_disk = _OpenRealBD(disk)
3326
3327 inst_os = OSFromDisk(instance.os)
3328 env = OSEnvironment(instance, inst_os)
3329
3330 if mode == constants.IEM_IMPORT:
3331 env["IMPORT_DEVICE"] = env["DISK_%d_PATH" % disk_index]
3332 env["IMPORT_INDEX"] = str(disk_index)
3333 script = inst_os.import_script
3334
3335 elif mode == constants.IEM_EXPORT:
3336 env["EXPORT_DEVICE"] = real_disk.dev_path
3337 env["EXPORT_INDEX"] = str(disk_index)
3338 script = inst_os.export_script
3339
3340
3341 script_cmd = utils.BuildShellCmd("( cd %s && %s; )", inst_os.path, script)
3342
3343 if mode == constants.IEM_IMPORT:
3344 suffix = "| %s" % script_cmd
3345
3346 elif mode == constants.IEM_EXPORT:
3347 prefix = "%s |" % script_cmd
3348
3349
3350 exp_size = constants.IE_CUSTOM_SIZE
3351
3352 else:
3353 _Fail("Invalid %s I/O mode %r", mode, ieio)
3354
3355 return (env, prefix, suffix, exp_size)
3356
3359 """Creates status directory for import/export.
3360
3361 """
3362 return tempfile.mkdtemp(dir=pathutils.IMPORT_EXPORT_DIR,
3363 prefix=("%s-%s-" %
3364 (prefix, utils.TimestampForFilename())))
3365
3369 """Starts an import or export daemon.
3370
3371 @param mode: Import/output mode
3372 @type opts: L{objects.ImportExportOptions}
3373 @param opts: Daemon options
3374 @type host: string
3375 @param host: Remote host for export (None for import)
3376 @type port: int
3377 @param port: Remote port for export (None for import)
3378 @type instance: L{objects.Instance}
3379 @param instance: Instance object
3380 @type component: string
3381 @param component: which part of the instance is transferred now,
3382 e.g. 'disk/0'
3383 @param ieio: Input/output type
3384 @param ieioargs: Input/output arguments
3385
3386 """
3387 if mode == constants.IEM_IMPORT:
3388 prefix = "import"
3389
3390 if not (host is None and port is None):
3391 _Fail("Can not specify host or port on import")
3392
3393 elif mode == constants.IEM_EXPORT:
3394 prefix = "export"
3395
3396 if host is None or port is None:
3397 _Fail("Host and port must be specified for an export")
3398
3399 else:
3400 _Fail("Invalid mode %r", mode)
3401
3402 if (opts.key_name is None) ^ (opts.ca_pem is None):
3403 _Fail("Cluster certificate can only be used for both key and CA")
3404
3405 (cmd_env, cmd_prefix, cmd_suffix, exp_size) = \
3406 _GetImportExportIoCommand(instance, mode, ieio, ieioargs)
3407
3408 if opts.key_name is None:
3409
3410 key_path = pathutils.NODED_CERT_FILE
3411 cert_path = pathutils.NODED_CERT_FILE
3412 assert opts.ca_pem is None
3413 else:
3414 (_, key_path, cert_path) = _GetX509Filenames(pathutils.CRYPTO_KEYS_DIR,
3415 opts.key_name)
3416 assert opts.ca_pem is not None
3417
3418 for i in [key_path, cert_path]:
3419 if not os.path.exists(i):
3420 _Fail("File '%s' does not exist" % i)
3421
3422 status_dir = _CreateImportExportStatusDir("%s-%s" % (prefix, component))
3423 try:
3424 status_file = utils.PathJoin(status_dir, _IES_STATUS_FILE)
3425 pid_file = utils.PathJoin(status_dir, _IES_PID_FILE)
3426 ca_file = utils.PathJoin(status_dir, _IES_CA_FILE)
3427
3428 if opts.ca_pem is None:
3429
3430 ca = utils.ReadFile(pathutils.NODED_CERT_FILE)
3431 else:
3432 ca = opts.ca_pem
3433
3434
3435 utils.WriteFile(ca_file, data=ca, mode=0400)
3436
3437 cmd = [
3438 pathutils.IMPORT_EXPORT_DAEMON,
3439 status_file, mode,
3440 "--key=%s" % key_path,
3441 "--cert=%s" % cert_path,
3442 "--ca=%s" % ca_file,
3443 ]
3444
3445 if host:
3446 cmd.append("--host=%s" % host)
3447
3448 if port:
3449 cmd.append("--port=%s" % port)
3450
3451 if opts.ipv6:
3452 cmd.append("--ipv6")
3453 else:
3454 cmd.append("--ipv4")
3455
3456 if opts.compress:
3457 cmd.append("--compress=%s" % opts.compress)
3458
3459 if opts.magic:
3460 cmd.append("--magic=%s" % opts.magic)
3461
3462 if exp_size is not None:
3463 cmd.append("--expected-size=%s" % exp_size)
3464
3465 if cmd_prefix:
3466 cmd.append("--cmd-prefix=%s" % cmd_prefix)
3467
3468 if cmd_suffix:
3469 cmd.append("--cmd-suffix=%s" % cmd_suffix)
3470
3471 if mode == constants.IEM_EXPORT:
3472
3473 cmd.append("--connect-retries=%s" % constants.RIE_CONNECT_RETRIES)
3474 cmd.append("--connect-timeout=%s" % constants.RIE_CONNECT_ATTEMPT_TIMEOUT)
3475 elif opts.connect_timeout is not None:
3476 assert mode == constants.IEM_IMPORT
3477
3478 cmd.append("--connect-timeout=%s" % opts.connect_timeout)
3479
3480 logfile = _InstanceLogName(prefix, instance.os, instance.name, component)
3481
3482
3483
3484 utils.StartDaemon(cmd, env=cmd_env, pidfile=pid_file,
3485 output=logfile)
3486
3487
3488 return os.path.basename(status_dir)
3489
3490 except Exception:
3491 shutil.rmtree(status_dir, ignore_errors=True)
3492 raise
3493
3496 """Returns import/export daemon status.
3497
3498 @type names: sequence
3499 @param names: List of names
3500 @rtype: List of dicts
3501 @return: Returns a list of the state of each named import/export or None if a
3502 status couldn't be read
3503
3504 """
3505 result = []
3506
3507 for name in names:
3508 status_file = utils.PathJoin(pathutils.IMPORT_EXPORT_DIR, name,
3509 _IES_STATUS_FILE)
3510
3511 try:
3512 data = utils.ReadFile(status_file)
3513 except EnvironmentError, err:
3514 if err.errno != errno.ENOENT:
3515 raise
3516 data = None
3517
3518 if not data:
3519 result.append(None)
3520 continue
3521
3522 result.append(serializer.LoadJson(data))
3523
3524 return result
3525
3540
3543 """Cleanup after an import or export.
3544
3545 If the import/export daemon is still running it's killed. Afterwards the
3546 whole status directory is removed.
3547
3548 """
3549 logging.info("Finalizing import/export %s", name)
3550
3551 status_dir = utils.PathJoin(pathutils.IMPORT_EXPORT_DIR, name)
3552
3553 pid = utils.ReadLockedPidFile(utils.PathJoin(status_dir, _IES_PID_FILE))
3554
3555 if pid:
3556 logging.info("Import/export %s is still running with PID %s",
3557 name, pid)
3558 utils.KillProcess(pid, waitpid=False)
3559
3560 shutil.rmtree(status_dir, ignore_errors=True)
3561
3564 """Sets the physical ID on disks and returns the block devices.
3565
3566 """
3567
3568 my_name = netutils.Hostname.GetSysName()
3569 for cf in disks:
3570 cf.SetPhysicalID(my_name, nodes_ip)
3571
3572 bdevs = []
3573
3574 for cf in disks:
3575 rd = _RecursiveFindBD(cf)
3576 if rd is None:
3577 _Fail("Can't find device %s", cf)
3578 bdevs.append(rd)
3579 return bdevs
3580
3583 """Disconnects the network on a list of drbd devices.
3584
3585 """
3586 bdevs = _FindDisks(nodes_ip, disks)
3587
3588
3589 for rd in bdevs:
3590 try:
3591 rd.DisconnectNet()
3592 except errors.BlockDeviceError, err:
3593 _Fail("Can't change network configuration to standalone mode: %s",
3594 err, exc=True)
3595
3596
3597 -def DrbdAttachNet(nodes_ip, disks, instance_name, multimaster):
3598 """Attaches the network on a list of drbd devices.
3599
3600 """
3601 bdevs = _FindDisks(nodes_ip, disks)
3602
3603 if multimaster:
3604 for idx, rd in enumerate(bdevs):
3605 try:
3606 _SymlinkBlockDev(instance_name, rd.dev_path, idx)
3607 except EnvironmentError, err:
3608 _Fail("Can't create symlink: %s", err)
3609
3610
3611 for rd in bdevs:
3612 try:
3613 rd.AttachNet(multimaster)
3614 except errors.BlockDeviceError, err:
3615 _Fail("Can't change network configuration: %s", err)
3616
3617
3618
3619
3620
3621
3622
3623 def _Attach():
3624 all_connected = True
3625
3626 for rd in bdevs:
3627 stats = rd.GetProcStatus()
3628
3629 if multimaster:
3630
3631
3632
3633
3634
3635
3636 all_connected = (all_connected and
3637 stats.is_connected and
3638 stats.is_disk_uptodate and
3639 stats.peer_disk_uptodate)
3640 else:
3641 all_connected = (all_connected and
3642 (stats.is_connected or stats.is_in_resync))
3643
3644 if stats.is_standalone:
3645
3646
3647
3648 try:
3649 rd.AttachNet(multimaster)
3650 except errors.BlockDeviceError, err:
3651 _Fail("Can't change network configuration: %s", err)
3652
3653 if not all_connected:
3654 raise utils.RetryAgain()
3655
3656 try:
3657
3658 utils.Retry(_Attach, (0.1, 1.5, 5.0), 2 * 60)
3659 except utils.RetryTimeout:
3660 _Fail("Timeout in disk reconnecting")
3661
3662 if multimaster:
3663
3664 for rd in bdevs:
3665 try:
3666 rd.Open()
3667 except errors.BlockDeviceError, err:
3668 _Fail("Can't change to primary mode: %s", err)
3669
3672 """Wait until DRBDs have synchronized.
3673
3674 """
3675 def _helper(rd):
3676 stats = rd.GetProcStatus()
3677 if not (stats.is_connected or stats.is_in_resync):
3678 raise utils.RetryAgain()
3679 return stats
3680
3681 bdevs = _FindDisks(nodes_ip, disks)
3682
3683 min_resync = 100
3684 alldone = True
3685 for rd in bdevs:
3686 try:
3687
3688 stats = utils.Retry(_helper, 1, 15, args=[rd])
3689 except utils.RetryTimeout:
3690 stats = rd.GetProcStatus()
3691
3692 if not (stats.is_connected or stats.is_in_resync):
3693 _Fail("DRBD device %s is not in sync: stats=%s", rd, stats)
3694 alldone = alldone and (not stats.is_in_resync)
3695 if stats.sync_percent is not None:
3696 min_resync = min(min_resync, stats.sync_percent)
3697
3698 return (alldone, min_resync)
3699
3709
3712 """Hard-powercycle the node.
3713
3714 Because we need to return first, and schedule the powercycle in the
3715 background, we won't be able to report failures nicely.
3716
3717 """
3718 hyper = hypervisor.GetHypervisor(hypervisor_type)
3719 try:
3720 pid = os.fork()
3721 except OSError:
3722
3723 pid = 0
3724 if pid > 0:
3725 return "Reboot scheduled in 5 seconds"
3726
3727 try:
3728 utils.Mlockall()
3729 except Exception:
3730 pass
3731 time.sleep(5)
3732 hyper.PowercycleNode()
3733
3736 """Verifies a restricted command name.
3737
3738 @type cmd: string
3739 @param cmd: Command name
3740 @rtype: tuple; (boolean, string or None)
3741 @return: The tuple's first element is the status; if C{False}, the second
3742 element is an error message string, otherwise it's C{None}
3743
3744 """
3745 if not cmd.strip():
3746 return (False, "Missing command name")
3747
3748 if os.path.basename(cmd) != cmd:
3749 return (False, "Invalid command name")
3750
3751 if not constants.EXT_PLUGIN_MASK.match(cmd):
3752 return (False, "Command name contains forbidden characters")
3753
3754 return (True, None)
3755
3758 """Common checks for restricted command file system directories and files.
3759
3760 @type path: string
3761 @param path: Path to check
3762 @param owner: C{None} or tuple containing UID and GID
3763 @rtype: tuple; (boolean, string or C{os.stat} result)
3764 @return: The tuple's first element is the status; if C{False}, the second
3765 element is an error message string, otherwise it's the result of C{os.stat}
3766
3767 """
3768 if owner is None:
3769
3770 owner = (0, 0)
3771
3772 try:
3773 st = os.stat(path)
3774 except EnvironmentError, err:
3775 return (False, "Can't stat(2) '%s': %s" % (path, err))
3776
3777 if stat.S_IMODE(st.st_mode) & (~_RCMD_MAX_MODE):
3778 return (False, "Permissions on '%s' are too permissive" % path)
3779
3780 if (st.st_uid, st.st_gid) != owner:
3781 (owner_uid, owner_gid) = owner
3782 return (False, "'%s' is not owned by %s:%s" % (path, owner_uid, owner_gid))
3783
3784 return (True, st)
3785
3788 """Verifies restricted command directory.
3789
3790 @type path: string
3791 @param path: Path to check
3792 @rtype: tuple; (boolean, string or None)
3793 @return: The tuple's first element is the status; if C{False}, the second
3794 element is an error message string, otherwise it's C{None}
3795
3796 """
3797 (status, value) = _CommonRestrictedCmdCheck(path, _owner)
3798
3799 if not status:
3800 return (False, value)
3801
3802 if not stat.S_ISDIR(value.st_mode):
3803 return (False, "Path '%s' is not a directory" % path)
3804
3805 return (True, None)
3806
3809 """Verifies a whole restricted command and returns its executable filename.
3810
3811 @type path: string
3812 @param path: Directory containing restricted commands
3813 @type cmd: string
3814 @param cmd: Command name
3815 @rtype: tuple; (boolean, string)
3816 @return: The tuple's first element is the status; if C{False}, the second
3817 element is an error message string, otherwise the second element is the
3818 absolute path to the executable
3819
3820 """
3821 executable = utils.PathJoin(path, cmd)
3822
3823 (status, msg) = _CommonRestrictedCmdCheck(executable, _owner)
3824
3825 if not status:
3826 return (False, msg)
3827
3828 if not utils.IsExecutable(executable):
3829 return (False, "access(2) thinks '%s' can't be executed" % executable)
3830
3831 return (True, executable)
3832
3838 """Performs a number of tests on a restricted command.
3839
3840 @type path: string
3841 @param path: Directory containing restricted commands
3842 @type cmd: string
3843 @param cmd: Command name
3844 @return: Same as L{_VerifyRestrictedCmd}
3845
3846 """
3847
3848 (status, msg) = _verify_dir(path)
3849 if status:
3850
3851 (status, msg) = _verify_name(cmd)
3852
3853 if not status:
3854 return (False, msg)
3855
3856
3857 return _verify_cmd(path, cmd)
3858
3868 """Executes a restricted command after performing strict tests.
3869
3870 @type cmd: string
3871 @param cmd: Command name
3872 @rtype: string
3873 @return: Command output
3874 @raise RPCFail: In case of an error
3875
3876 """
3877 logging.info("Preparing to run restricted command '%s'", cmd)
3878
3879 if not _enabled:
3880 _Fail("Restricted commands disabled at configure time")
3881
3882 lock = None
3883 try:
3884 cmdresult = None
3885 try:
3886 lock = utils.FileLock.Open(_lock_file)
3887 lock.Exclusive(blocking=True, timeout=_lock_timeout)
3888
3889 (status, value) = _prepare_fn(_path, cmd)
3890
3891 if status:
3892 cmdresult = _runcmd_fn([value], env={}, reset_env=True,
3893 postfork_fn=lambda _: lock.Unlock())
3894 else:
3895 logging.error(value)
3896 except Exception:
3897
3898 logging.exception("Caught exception")
3899
3900 if cmdresult is None:
3901 logging.info("Sleeping for %0.1f seconds before returning",
3902 _RCMD_INVALID_DELAY)
3903 _sleep_fn(_RCMD_INVALID_DELAY)
3904
3905
3906 _Fail("Executing command '%s' failed" % cmd)
3907 elif cmdresult.failed or cmdresult.fail_reason:
3908 _Fail("Restricted command '%s' failed: %s; output: %s",
3909 cmd, cmdresult.fail_reason, cmdresult.output)
3910 else:
3911 return cmdresult.output
3912 finally:
3913 if lock is not None:
3914
3915 lock.Close()
3916 lock = None
3917
3920 """Creates or removes the watcher pause file.
3921
3922 @type until: None or number
3923 @param until: Unix timestamp saying until when the watcher shouldn't run
3924
3925 """
3926 if until is None:
3927 logging.info("Received request to no longer pause watcher")
3928 utils.RemoveFile(_filename)
3929 else:
3930 logging.info("Received request to pause watcher until %s", until)
3931
3932 if not ht.TNumber(until):
3933 _Fail("Duration must be numeric")
3934
3935 utils.WriteFile(_filename, data="%d\n" % (until, ), mode=0644)
3936
3939 """Hook runner.
3940
3941 This class is instantiated on the node side (ganeti-noded) and not
3942 on the master side.
3943
3944 """
3945 - def __init__(self, hooks_base_dir=None):
3946 """Constructor for hooks runner.
3947
3948 @type hooks_base_dir: str or None
3949 @param hooks_base_dir: if not None, this overrides the
3950 L{pathutils.HOOKS_BASE_DIR} (useful for unittests)
3951
3952 """
3953 if hooks_base_dir is None:
3954 hooks_base_dir = pathutils.HOOKS_BASE_DIR
3955
3956
3957 self._BASE_DIR = hooks_base_dir
3958
3960 """Check that the hooks will be run only locally and then run them.
3961
3962 """
3963 assert len(node_list) == 1
3964 node = node_list[0]
3965 _, myself = ssconf.GetMasterAndMyself()
3966 assert node == myself
3967
3968 results = self.RunHooks(hpath, phase, env)
3969
3970
3971 return {node: (None, False, results)}
3972
3973 - def RunHooks(self, hpath, phase, env):
3974 """Run the scripts in the hooks directory.
3975
3976 @type hpath: str
3977 @param hpath: the path to the hooks directory which
3978 holds the scripts
3979 @type phase: str
3980 @param phase: either L{constants.HOOKS_PHASE_PRE} or
3981 L{constants.HOOKS_PHASE_POST}
3982 @type env: dict
3983 @param env: dictionary with the environment for the hook
3984 @rtype: list
3985 @return: list of 3-element tuples:
3986 - script path
3987 - script result, either L{constants.HKR_SUCCESS} or
3988 L{constants.HKR_FAIL}
3989 - output of the script
3990
3991 @raise errors.ProgrammerError: for invalid input
3992 parameters
3993
3994 """
3995 if phase == constants.HOOKS_PHASE_PRE:
3996 suffix = "pre"
3997 elif phase == constants.HOOKS_PHASE_POST:
3998 suffix = "post"
3999 else:
4000 _Fail("Unknown hooks phase '%s'", phase)
4001
4002 subdir = "%s-%s.d" % (hpath, suffix)
4003 dir_name = utils.PathJoin(self._BASE_DIR, subdir)
4004
4005 results = []
4006
4007 if not os.path.isdir(dir_name):
4008
4009
4010 return results
4011
4012 runparts_results = utils.RunParts(dir_name, env=env, reset_env=True)
4013
4014 for (relname, relstatus, runresult) in runparts_results:
4015 if relstatus == constants.RUNPARTS_SKIP:
4016 rrval = constants.HKR_SKIP
4017 output = ""
4018 elif relstatus == constants.RUNPARTS_ERR:
4019 rrval = constants.HKR_FAIL
4020 output = "Hook script execution error: %s" % runresult
4021 elif relstatus == constants.RUNPARTS_RUN:
4022 if runresult.failed:
4023 rrval = constants.HKR_FAIL
4024 else:
4025 rrval = constants.HKR_SUCCESS
4026 output = utils.SafeEncode(runresult.output.strip())
4027 results.append(("%s/%s" % (subdir, relname), rrval, output))
4028
4029 return results
4030
4033 """IAllocator runner.
4034
4035 This class is instantiated on the node side (ganeti-noded) and not on
4036 the master side.
4037
4038 """
4039 @staticmethod
4040 - def Run(name, idata):
4041 """Run an iallocator script.
4042
4043 @type name: str
4044 @param name: the iallocator script name
4045 @type idata: str
4046 @param idata: the allocator input data
4047
4048 @rtype: tuple
4049 @return: two element tuple of:
4050 - status
4051 - either error message or stdout of allocator (for success)
4052
4053 """
4054 alloc_script = utils.FindFile(name, constants.IALLOCATOR_SEARCH_PATH,
4055 os.path.isfile)
4056 if alloc_script is None:
4057 _Fail("iallocator module '%s' not found in the search path", name)
4058
4059 fd, fin_name = tempfile.mkstemp(prefix="ganeti-iallocator.")
4060 try:
4061 os.write(fd, idata)
4062 os.close(fd)
4063 result = utils.RunCmd([alloc_script, fin_name])
4064 if result.failed:
4065 _Fail("iallocator module '%s' failed: %s, output '%s'",
4066 name, result.fail_reason, result.output)
4067 finally:
4068 os.unlink(fin_name)
4069
4070 return result.stdout
4071
4074 """Simple class for managing a cache of block device information.
4075
4076 """
4077 _DEV_PREFIX = "/dev/"
4078 _ROOT_DIR = pathutils.BDEV_CACHE_DIR
4079
4080 @classmethod
4082 """Converts a /dev/name path to the cache file name.
4083
4084 This replaces slashes with underscores and strips the /dev
4085 prefix. It then returns the full path to the cache file.
4086
4087 @type dev_path: str
4088 @param dev_path: the C{/dev/} path name
4089 @rtype: str
4090 @return: the converted path name
4091
4092 """
4093 if dev_path.startswith(cls._DEV_PREFIX):
4094 dev_path = dev_path[len(cls._DEV_PREFIX):]
4095 dev_path = dev_path.replace("/", "_")
4096 fpath = utils.PathJoin(cls._ROOT_DIR, "bdev_%s" % dev_path)
4097 return fpath
4098
4099 @classmethod
4100 - def UpdateCache(cls, dev_path, owner, on_primary, iv_name):
4101 """Updates the cache information for a given device.
4102
4103 @type dev_path: str
4104 @param dev_path: the pathname of the device
4105 @type owner: str
4106 @param owner: the owner (instance name) of the device
4107 @type on_primary: bool
4108 @param on_primary: whether this is the primary
4109 node nor not
4110 @type iv_name: str
4111 @param iv_name: the instance-visible name of the
4112 device, as in objects.Disk.iv_name
4113
4114 @rtype: None
4115
4116 """
4117 if dev_path is None:
4118 logging.error("DevCacheManager.UpdateCache got a None dev_path")
4119 return
4120 fpath = cls._ConvertPath(dev_path)
4121 if on_primary:
4122 state = "primary"
4123 else:
4124 state = "secondary"
4125 if iv_name is None:
4126 iv_name = "not_visible"
4127 fdata = "%s %s %s\n" % (str(owner), state, iv_name)
4128 try:
4129 utils.WriteFile(fpath, data=fdata)
4130 except EnvironmentError, err:
4131 logging.exception("Can't update bdev cache for %s: %s", dev_path, err)
4132
4133 @classmethod
4135 """Remove data for a dev_path.
4136
4137 This is just a wrapper over L{utils.io.RemoveFile} with a converted
4138 path name and logging.
4139
4140 @type dev_path: str
4141 @param dev_path: the pathname of the device
4142
4143 @rtype: None
4144
4145 """
4146 if dev_path is None:
4147 logging.error("DevCacheManager.RemoveCache got a None dev_path")
4148 return
4149 fpath = cls._ConvertPath(dev_path)
4150 try:
4151 utils.RemoveFile(fpath)
4152 except EnvironmentError, err:
4153 logging.exception("Can't update bdev cache for %s: %s", dev_path, err)
4154