1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 """Functions used by the node daemon
23
24 @var _ALLOWED_UPLOAD_FILES: denotes which files are accepted in
25 the L{UploadFile} function
26 @var _ALLOWED_CLEAN_DIRS: denotes which directories are accepted
27 in the L{_CleanDirectory} function
28
29 """
30
31
32
33
34
35
36
37
38 import os
39 import os.path
40 import shutil
41 import time
42 import stat
43 import errno
44 import re
45 import random
46 import logging
47 import tempfile
48 import zlib
49 import base64
50 import signal
51
52 from ganeti import errors
53 from ganeti import utils
54 from ganeti import ssh
55 from ganeti import hypervisor
56 from ganeti import constants
57 from ganeti import bdev
58 from ganeti import objects
59 from ganeti import ssconf
60 from ganeti import serializer
61 from ganeti import netutils
62
63
64 _BOOT_ID_PATH = "/proc/sys/kernel/random/boot_id"
65 _ALLOWED_CLEAN_DIRS = frozenset([
66 constants.DATA_DIR,
67 constants.JOB_QUEUE_ARCHIVE_DIR,
68 constants.QUEUE_DIR,
69 constants.CRYPTO_KEYS_DIR,
70 ])
71 _MAX_SSL_CERT_VALIDITY = 7 * 24 * 60 * 60
72 _X509_KEY_FILE = "key"
73 _X509_CERT_FILE = "cert"
74 _IES_STATUS_FILE = "status"
75 _IES_PID_FILE = "pid"
76 _IES_CA_FILE = "ca"
80 """Class denoting RPC failure.
81
82 Its argument is the error message.
83
84 """
85
86
87 -def _Fail(msg, *args, **kwargs):
88 """Log an error and the raise an RPCFail exception.
89
90 This exception is then handled specially in the ganeti daemon and
91 turned into a 'failed' return type. As such, this function is a
92 useful shortcut for logging the error and returning it to the master
93 daemon.
94
95 @type msg: string
96 @param msg: the text of the exception
97 @raise RPCFail
98
99 """
100 if args:
101 msg = msg % args
102 if "log" not in kwargs or kwargs["log"]:
103 if "exc" in kwargs and kwargs["exc"]:
104 logging.exception(msg)
105 else:
106 logging.error(msg)
107 raise RPCFail(msg)
108
111 """Simple wrapper to return a SimpleStore.
112
113 @rtype: L{ssconf.SimpleStore}
114 @return: a SimpleStore instance
115
116 """
117 return ssconf.SimpleStore()
118
121 """Simple wrapper to return an SshRunner.
122
123 @type cluster_name: str
124 @param cluster_name: the cluster name, which is needed
125 by the SshRunner constructor
126 @rtype: L{ssh.SshRunner}
127 @return: an SshRunner instance
128
129 """
130 return ssh.SshRunner(cluster_name)
131
134 """Unpacks data compressed by the RPC client.
135
136 @type data: list or tuple
137 @param data: Data sent by RPC client
138 @rtype: str
139 @return: Decompressed data
140
141 """
142 assert isinstance(data, (list, tuple))
143 assert len(data) == 2
144 (encoding, content) = data
145 if encoding == constants.RPC_ENCODING_NONE:
146 return content
147 elif encoding == constants.RPC_ENCODING_ZLIB_BASE64:
148 return zlib.decompress(base64.b64decode(content))
149 else:
150 raise AssertionError("Unknown data encoding")
151
154 """Removes all regular files in a directory.
155
156 @type path: str
157 @param path: the directory to clean
158 @type exclude: list
159 @param exclude: list of files to be excluded, defaults
160 to the empty list
161
162 """
163 if path not in _ALLOWED_CLEAN_DIRS:
164 _Fail("Path passed to _CleanDirectory not in allowed clean targets: '%s'",
165 path)
166
167 if not os.path.isdir(path):
168 return
169 if exclude is None:
170 exclude = []
171 else:
172
173 exclude = [os.path.normpath(i) for i in exclude]
174
175 for rel_name in utils.ListVisibleFiles(path):
176 full_name = utils.PathJoin(path, rel_name)
177 if full_name in exclude:
178 continue
179 if os.path.isfile(full_name) and not os.path.islink(full_name):
180 utils.RemoveFile(full_name)
181
205
206
207 _ALLOWED_UPLOAD_FILES = _BuildUploadFileList()
219
222 """Returns master information.
223
224 This is an utility function to compute master information, either
225 for consumption here or from the node daemon.
226
227 @rtype: tuple
228 @return: master_netdev, master_ip, master_name
229 @raise RPCFail: in case of errors
230
231 """
232 try:
233 cfg = _GetConfig()
234 master_netdev = cfg.GetMasterNetdev()
235 master_ip = cfg.GetMasterIP()
236 master_node = cfg.GetMasterNode()
237 except errors.ConfigurationError, err:
238 _Fail("Cluster configuration incomplete: %s", err, exc=True)
239 return (master_netdev, master_ip, master_node)
240
243 """Activate local node as master node.
244
245 The function will either try activate the IP address of the master
246 (unless someone else has it) or also start the master daemons, based
247 on the start_daemons parameter.
248
249 @type start_daemons: boolean
250 @param start_daemons: whether to start the master daemons
251 (ganeti-masterd and ganeti-rapi), or (if false) activate the
252 master ip
253 @type no_voting: boolean
254 @param no_voting: whether to start ganeti-masterd without a node vote
255 (if start_daemons is True), but still non-interactively
256 @rtype: None
257
258 """
259
260 master_netdev, master_ip, _ = GetMasterInfo()
261
262 err_msgs = []
263
264 if start_daemons:
265 if no_voting:
266 masterd_args = "--no-voting --yes-do-it"
267 else:
268 masterd_args = ""
269
270 env = {
271 "EXTRA_MASTERD_ARGS": masterd_args,
272 }
273
274 result = utils.RunCmd([constants.DAEMON_UTIL, "start-master"], env=env)
275 if result.failed:
276 msg = "Can't start Ganeti master: %s" % result.output
277 logging.error(msg)
278 err_msgs.append(msg)
279
280 else:
281 if netutils.TcpPing(master_ip, constants.DEFAULT_NODED_PORT):
282 if netutils.OwnIpAddress(master_ip):
283
284 logging.debug("Master IP already configured, doing nothing")
285 else:
286 msg = "Someone else has the master ip, not activating"
287 logging.error(msg)
288 err_msgs.append(msg)
289 else:
290 result = utils.RunCmd(["ip", "address", "add", "%s/32" % master_ip,
291 "dev", master_netdev, "label",
292 "%s:0" % master_netdev])
293 if result.failed:
294 msg = "Can't activate master IP: %s" % result.output
295 logging.error(msg)
296 err_msgs.append(msg)
297
298 result = utils.RunCmd(["arping", "-q", "-U", "-c 3", "-I", master_netdev,
299 "-s", master_ip, master_ip])
300
301
302 if err_msgs:
303 _Fail("; ".join(err_msgs))
304
307 """Deactivate this node as master.
308
309 The function will always try to deactivate the IP address of the
310 master. It will also stop the master daemons depending on the
311 stop_daemons parameter.
312
313 @type stop_daemons: boolean
314 @param stop_daemons: whether to also stop the master daemons
315 (ganeti-masterd and ganeti-rapi)
316 @rtype: None
317
318 """
319
320
321
322
323 master_netdev, master_ip, _ = GetMasterInfo()
324
325 result = utils.RunCmd(["ip", "address", "del", "%s/32" % master_ip,
326 "dev", master_netdev])
327 if result.failed:
328 logging.error("Can't remove the master IP, error: %s", result.output)
329
330
331 if stop_daemons:
332 result = utils.RunCmd([constants.DAEMON_UTIL, "stop-master"])
333 if result.failed:
334 logging.error("Could not stop Ganeti master, command %s had exitcode %s"
335 " and error %s",
336 result.cmd, result.exit_code, result.output)
337
338
339 -def AddNode(dsa, dsapub, rsa, rsapub, sshkey, sshpub):
340 """Joins this node to the cluster.
341
342 This does the following:
343 - updates the hostkeys of the machine (rsa and dsa)
344 - adds the ssh private key to the user
345 - adds the ssh public key to the users' authorized_keys file
346
347 @type dsa: str
348 @param dsa: the DSA private key to write
349 @type dsapub: str
350 @param dsapub: the DSA public key to write
351 @type rsa: str
352 @param rsa: the RSA private key to write
353 @type rsapub: str
354 @param rsapub: the RSA public key to write
355 @type sshkey: str
356 @param sshkey: the SSH private key to write
357 @type sshpub: str
358 @param sshpub: the SSH public key to write
359 @rtype: boolean
360 @return: the success of the operation
361
362 """
363 sshd_keys = [(constants.SSH_HOST_RSA_PRIV, rsa, 0600),
364 (constants.SSH_HOST_RSA_PUB, rsapub, 0644),
365 (constants.SSH_HOST_DSA_PRIV, dsa, 0600),
366 (constants.SSH_HOST_DSA_PUB, dsapub, 0644)]
367 for name, content, mode in sshd_keys:
368 utils.WriteFile(name, data=content, mode=mode)
369
370 try:
371 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS,
372 mkdir=True)
373 except errors.OpExecError, err:
374 _Fail("Error while processing user ssh files: %s", err, exc=True)
375
376 for name, content in [(priv_key, sshkey), (pub_key, sshpub)]:
377 utils.WriteFile(name, data=content, mode=0600)
378
379 utils.AddAuthorizedKey(auth_keys, sshpub)
380
381 result = utils.RunCmd([constants.DAEMON_UTIL, "reload-ssh-keys"])
382 if result.failed:
383 _Fail("Unable to reload SSH keys (command %r, exit code %s, output %r)",
384 result.cmd, result.exit_code, result.output)
385
388 """Cleans up and remove the current node.
389
390 This function cleans up and prepares the current node to be removed
391 from the cluster.
392
393 If processing is successful, then it raises an
394 L{errors.QuitGanetiException} which is used as a special case to
395 shutdown the node daemon.
396
397 @param modify_ssh_setup: boolean
398
399 """
400 _CleanDirectory(constants.DATA_DIR)
401 _CleanDirectory(constants.CRYPTO_KEYS_DIR)
402 JobQueuePurge()
403
404 if modify_ssh_setup:
405 try:
406 priv_key, pub_key, auth_keys = ssh.GetUserFiles(constants.GANETI_RUNAS)
407
408 utils.RemoveAuthorizedKey(auth_keys, utils.ReadFile(pub_key))
409
410 utils.RemoveFile(priv_key)
411 utils.RemoveFile(pub_key)
412 except errors.OpExecError:
413 logging.exception("Error while processing ssh files")
414
415 try:
416 utils.RemoveFile(constants.CONFD_HMAC_KEY)
417 utils.RemoveFile(constants.RAPI_CERT_FILE)
418 utils.RemoveFile(constants.NODED_CERT_FILE)
419 except:
420 logging.exception("Error while removing cluster secrets")
421
422 result = utils.RunCmd([constants.DAEMON_UTIL, "stop", constants.CONFD])
423 if result.failed:
424 logging.error("Command %s failed with exitcode %s and error %s",
425 result.cmd, result.exit_code, result.output)
426
427
428 raise errors.QuitGanetiException(True, 'Shutdown scheduled')
429
432 """Gives back a hash with different information about the node.
433
434 @type vgname: C{string}
435 @param vgname: the name of the volume group to ask for disk space information
436 @type hypervisor_type: C{str}
437 @param hypervisor_type: the name of the hypervisor to ask for
438 memory information
439 @rtype: C{dict}
440 @return: dictionary with the following keys:
441 - vg_size is the size of the configured volume group in MiB
442 - vg_free is the free size of the volume group in MiB
443 - memory_dom0 is the memory allocated for domain0 in MiB
444 - memory_free is the currently available (free) ram in MiB
445 - memory_total is the total number of ram in MiB
446
447 """
448 outputarray = {}
449 vginfo = _GetVGInfo(vgname)
450 outputarray['vg_size'] = vginfo['vg_size']
451 outputarray['vg_free'] = vginfo['vg_free']
452
453 hyper = hypervisor.GetHypervisor(hypervisor_type)
454 hyp_info = hyper.GetNodeInfo()
455 if hyp_info is not None:
456 outputarray.update(hyp_info)
457
458 outputarray["bootid"] = utils.ReadFile(_BOOT_ID_PATH, size=128).rstrip("\n")
459
460 return outputarray
461
464 """Verify the status of the local node.
465
466 Based on the input L{what} parameter, various checks are done on the
467 local node.
468
469 If the I{filelist} key is present, this list of
470 files is checksummed and the file/checksum pairs are returned.
471
472 If the I{nodelist} key is present, we check that we have
473 connectivity via ssh with the target nodes (and check the hostname
474 report).
475
476 If the I{node-net-test} key is present, we check that we have
477 connectivity to the given nodes via both primary IP and, if
478 applicable, secondary IPs.
479
480 @type what: C{dict}
481 @param what: a dictionary of things to check:
482 - filelist: list of files for which to compute checksums
483 - nodelist: list of nodes we should check ssh communication with
484 - node-net-test: list of nodes we should check node daemon port
485 connectivity with
486 - hypervisor: list with hypervisors to run the verify for
487 @rtype: dict
488 @return: a dictionary with the same keys as the input dict, and
489 values representing the result of the checks
490
491 """
492 result = {}
493 my_name = netutils.HostInfo().name
494 port = netutils.GetDaemonPort(constants.NODED)
495
496 if constants.NV_HYPERVISOR in what:
497 result[constants.NV_HYPERVISOR] = tmp = {}
498 for hv_name in what[constants.NV_HYPERVISOR]:
499 try:
500 val = hypervisor.GetHypervisor(hv_name).Verify()
501 except errors.HypervisorError, err:
502 val = "Error while checking hypervisor: %s" % str(err)
503 tmp[hv_name] = val
504
505 if constants.NV_FILELIST in what:
506 result[constants.NV_FILELIST] = utils.FingerprintFiles(
507 what[constants.NV_FILELIST])
508
509 if constants.NV_NODELIST in what:
510 result[constants.NV_NODELIST] = tmp = {}
511 random.shuffle(what[constants.NV_NODELIST])
512 for node in what[constants.NV_NODELIST]:
513 success, message = _GetSshRunner(cluster_name).VerifyNodeHostname(node)
514 if not success:
515 tmp[node] = message
516
517 if constants.NV_NODENETTEST in what:
518 result[constants.NV_NODENETTEST] = tmp = {}
519 my_pip = my_sip = None
520 for name, pip, sip in what[constants.NV_NODENETTEST]:
521 if name == my_name:
522 my_pip = pip
523 my_sip = sip
524 break
525 if not my_pip:
526 tmp[my_name] = ("Can't find my own primary/secondary IP"
527 " in the node list")
528 else:
529 for name, pip, sip in what[constants.NV_NODENETTEST]:
530 fail = []
531 if not netutils.TcpPing(pip, port, source=my_pip):
532 fail.append("primary")
533 if sip != pip:
534 if not netutils.TcpPing(sip, port, source=my_sip):
535 fail.append("secondary")
536 if fail:
537 tmp[name] = ("failure using the %s interface(s)" %
538 " and ".join(fail))
539
540 if constants.NV_MASTERIP in what:
541
542
543 master_name, master_ip = what[constants.NV_MASTERIP]
544 if master_name == my_name:
545 source = constants.IP4_ADDRESS_LOCALHOST
546 else:
547 source = None
548 result[constants.NV_MASTERIP] = netutils.TcpPing(master_ip, port,
549 source=source)
550
551 if constants.NV_LVLIST in what:
552 try:
553 val = GetVolumeList(what[constants.NV_LVLIST])
554 except RPCFail, err:
555 val = str(err)
556 result[constants.NV_LVLIST] = val
557
558 if constants.NV_INSTANCELIST in what:
559
560 try:
561 val = GetInstanceList(what[constants.NV_INSTANCELIST])
562 except RPCFail, err:
563 val = str(err)
564 result[constants.NV_INSTANCELIST] = val
565
566 if constants.NV_VGLIST in what:
567 result[constants.NV_VGLIST] = utils.ListVolumeGroups()
568
569 if constants.NV_PVLIST in what:
570 result[constants.NV_PVLIST] = \
571 bdev.LogicalVolume.GetPVInfo(what[constants.NV_PVLIST],
572 filter_allocatable=False)
573
574 if constants.NV_VERSION in what:
575 result[constants.NV_VERSION] = (constants.PROTOCOL_VERSION,
576 constants.RELEASE_VERSION)
577
578 if constants.NV_HVINFO in what:
579 hyper = hypervisor.GetHypervisor(what[constants.NV_HVINFO])
580 result[constants.NV_HVINFO] = hyper.GetNodeInfo()
581
582 if constants.NV_DRBDLIST in what:
583 try:
584 used_minors = bdev.DRBD8.GetUsedDevs().keys()
585 except errors.BlockDeviceError, err:
586 logging.warning("Can't get used minors list", exc_info=True)
587 used_minors = str(err)
588 result[constants.NV_DRBDLIST] = used_minors
589
590 if constants.NV_DRBDHELPER in what:
591 status = True
592 try:
593 payload = bdev.BaseDRBD.GetUsermodeHelper()
594 except errors.BlockDeviceError, err:
595 logging.error("Can't get DRBD usermode helper: %s", str(err))
596 status = False
597 payload = str(err)
598 result[constants.NV_DRBDHELPER] = (status, payload)
599
600 if constants.NV_NODESETUP in what:
601 result[constants.NV_NODESETUP] = tmpr = []
602 if not os.path.isdir("/sys/block") or not os.path.isdir("/sys/class/net"):
603 tmpr.append("The sysfs filesytem doesn't seem to be mounted"
604 " under /sys, missing required directories /sys/block"
605 " and /sys/class/net")
606 if (not os.path.isdir("/proc/sys") or
607 not os.path.isfile("/proc/sysrq-trigger")):
608 tmpr.append("The procfs filesystem doesn't seem to be mounted"
609 " under /proc, missing required directory /proc/sys and"
610 " the file /proc/sysrq-trigger")
611
612 if constants.NV_TIME in what:
613 result[constants.NV_TIME] = utils.SplitTime(time.time())
614
615 if constants.NV_OSLIST in what:
616 result[constants.NV_OSLIST] = DiagnoseOS()
617
618 return result
619
622 """Compute list of logical volumes and their size.
623
624 @type vg_name: str
625 @param vg_name: the volume group whose LVs we should list
626 @rtype: dict
627 @return:
628 dictionary of all partions (key) with value being a tuple of
629 their size (in MiB), inactive and online status::
630
631 {'test1': ('20.06', True, True)}
632
633 in case of errors, a string is returned with the error
634 details.
635
636 """
637 lvs = {}
638 sep = '|'
639 result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
640 "--separator=%s" % sep,
641 "-olv_name,lv_size,lv_attr", vg_name])
642 if result.failed:
643 _Fail("Failed to list logical volumes, lvs output: %s", result.output)
644
645 valid_line_re = re.compile("^ *([^|]+)\|([0-9.]+)\|([^|]{6})\|?$")
646 for line in result.stdout.splitlines():
647 line = line.strip()
648 match = valid_line_re.match(line)
649 if not match:
650 logging.error("Invalid line returned from lvs output: '%s'", line)
651 continue
652 name, size, attr = match.groups()
653 inactive = attr[4] == '-'
654 online = attr[5] == 'o'
655 virtual = attr[0] == 'v'
656 if virtual:
657
658
659 continue
660 lvs[name] = (size, inactive, online)
661
662 return lvs
663
666 """List the volume groups and their size.
667
668 @rtype: dict
669 @return: dictionary with keys volume name and values the
670 size of the volume
671
672 """
673 return utils.ListVolumeGroups()
674
677 """List all volumes on this node.
678
679 @rtype: list
680 @return:
681 A list of dictionaries, each having four keys:
682 - name: the logical volume name,
683 - size: the size of the logical volume
684 - dev: the physical device on which the LV lives
685 - vg: the volume group to which it belongs
686
687 In case of errors, we return an empty list and log the
688 error.
689
690 Note that since a logical volume can live on multiple physical
691 volumes, the resulting list might include a logical volume
692 multiple times.
693
694 """
695 result = utils.RunCmd(["lvs", "--noheadings", "--units=m", "--nosuffix",
696 "--separator=|",
697 "--options=lv_name,lv_size,devices,vg_name"])
698 if result.failed:
699 _Fail("Failed to list logical volumes, lvs output: %s",
700 result.output)
701
702 def parse_dev(dev):
703 return dev.split('(')[0]
704
705 def handle_dev(dev):
706 return [parse_dev(x) for x in dev.split(",")]
707
708 def map_line(line):
709 line = [v.strip() for v in line]
710 return [{'name': line[0], 'size': line[1],
711 'dev': dev, 'vg': line[3]} for dev in handle_dev(line[2])]
712
713 all_devs = []
714 for line in result.stdout.splitlines():
715 if line.count('|') >= 3:
716 all_devs.extend(map_line(line.split('|')))
717 else:
718 logging.warning("Strange line in the output from lvs: '%s'", line)
719 return all_devs
720
723 """Check if a list of bridges exist on the current node.
724
725 @rtype: boolean
726 @return: C{True} if all of them exist, C{False} otherwise
727
728 """
729 missing = []
730 for bridge in bridges_list:
731 if not utils.BridgeExists(bridge):
732 missing.append(bridge)
733
734 if missing:
735 _Fail("Missing bridges %s", utils.CommaJoin(missing))
736
739 """Provides a list of instances.
740
741 @type hypervisor_list: list
742 @param hypervisor_list: the list of hypervisors to query information
743
744 @rtype: list
745 @return: a list of all running instances on the current node
746 - instance1.example.com
747 - instance2.example.com
748
749 """
750 results = []
751 for hname in hypervisor_list:
752 try:
753 names = hypervisor.GetHypervisor(hname).ListInstances()
754 results.extend(names)
755 except errors.HypervisorError, err:
756 _Fail("Error enumerating instances (hypervisor %s): %s",
757 hname, err, exc=True)
758
759 return results
760
763 """Gives back the information about an instance as a dictionary.
764
765 @type instance: string
766 @param instance: the instance name
767 @type hname: string
768 @param hname: the hypervisor type of the instance
769
770 @rtype: dict
771 @return: dictionary with the following keys:
772 - memory: memory size of instance (int)
773 - state: xen state of instance (string)
774 - time: cpu time of instance (float)
775
776 """
777 output = {}
778
779 iinfo = hypervisor.GetHypervisor(hname).GetInstanceInfo(instance)
780 if iinfo is not None:
781 output['memory'] = iinfo[2]
782 output['state'] = iinfo[4]
783 output['time'] = iinfo[5]
784
785 return output
786
789 """Gives whether an instance can be migrated.
790
791 @type instance: L{objects.Instance}
792 @param instance: object representing the instance to be checked.
793
794 @rtype: tuple
795 @return: tuple of (result, description) where:
796 - result: whether the instance can be migrated or not
797 - description: a description of the issue, if relevant
798
799 """
800 hyper = hypervisor.GetHypervisor(instance.hypervisor)
801 iname = instance.name
802 if iname not in hyper.ListInstances():
803 _Fail("Instance %s is not running", iname)
804
805 for idx in range(len(instance.disks)):
806 link_name = _GetBlockDevSymlinkPath(iname, idx)
807 if not os.path.islink(link_name):
808 logging.warning("Instance %s is missing symlink %s for disk %d",
809 iname, link_name, idx)
810
813 """Gather data about all instances.
814
815 This is the equivalent of L{GetInstanceInfo}, except that it
816 computes data for all instances at once, thus being faster if one
817 needs data about more than one instance.
818
819 @type hypervisor_list: list
820 @param hypervisor_list: list of hypervisors to query for instance data
821
822 @rtype: dict
823 @return: dictionary of instance: data, with data having the following keys:
824 - memory: memory size of instance (int)
825 - state: xen state of instance (string)
826 - time: cpu time of instance (float)
827 - vcpus: the number of vcpus
828
829 """
830 output = {}
831
832 for hname in hypervisor_list:
833 iinfo = hypervisor.GetHypervisor(hname).GetAllInstancesInfo()
834 if iinfo:
835 for name, _, memory, vcpus, state, times in iinfo:
836 value = {
837 'memory': memory,
838 'vcpus': vcpus,
839 'state': state,
840 'time': times,
841 }
842 if name in output:
843
844
845
846 for key in 'memory', 'vcpus':
847 if value[key] != output[name][key]:
848 _Fail("Instance %s is running twice"
849 " with different parameters", name)
850 output[name] = value
851
852 return output
853
856 """Compute the OS log filename for a given instance and operation.
857
858 The instance name and os name are passed in as strings since not all
859 operations have these as part of an instance object.
860
861 @type kind: string
862 @param kind: the operation type (e.g. add, import, etc.)
863 @type os_name: string
864 @param os_name: the os name
865 @type instance: string
866 @param instance: the name of the instance being imported/added/etc.
867
868 """
869
870 base = ("%s-%s-%s-%s.log" %
871 (kind, os_name, instance, utils.TimestampForFilename()))
872 return utils.PathJoin(constants.LOG_OS_DIR, base)
873
876 """Add an OS to an instance.
877
878 @type instance: L{objects.Instance}
879 @param instance: Instance whose OS is to be installed
880 @type reinstall: boolean
881 @param reinstall: whether this is an instance reinstall
882 @type debug: integer
883 @param debug: debug level, passed to the OS scripts
884 @rtype: None
885
886 """
887 inst_os = OSFromDisk(instance.os)
888
889 create_env = OSEnvironment(instance, inst_os, debug)
890 if reinstall:
891 create_env['INSTANCE_REINSTALL'] = "1"
892
893 logfile = _InstanceLogName("add", instance.os, instance.name)
894
895 result = utils.RunCmd([inst_os.create_script], env=create_env,
896 cwd=inst_os.path, output=logfile,)
897 if result.failed:
898 logging.error("os create command '%s' returned error: %s, logfile: %s,"
899 " output: %s", result.cmd, result.fail_reason, logfile,
900 result.output)
901 lines = [utils.SafeEncode(val)
902 for val in utils.TailFile(logfile, lines=20)]
903 _Fail("OS create script failed (%s), last lines in the"
904 " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
905
908 """Run the OS rename script for an instance.
909
910 @type instance: L{objects.Instance}
911 @param instance: Instance whose OS is to be installed
912 @type old_name: string
913 @param old_name: previous instance name
914 @type debug: integer
915 @param debug: debug level, passed to the OS scripts
916 @rtype: boolean
917 @return: the success of the operation
918
919 """
920 inst_os = OSFromDisk(instance.os)
921
922 rename_env = OSEnvironment(instance, inst_os, debug)
923 rename_env['OLD_INSTANCE_NAME'] = old_name
924
925 logfile = _InstanceLogName("rename", instance.os,
926 "%s-%s" % (old_name, instance.name))
927
928 result = utils.RunCmd([inst_os.rename_script], env=rename_env,
929 cwd=inst_os.path, output=logfile)
930
931 if result.failed:
932 logging.error("os create command '%s' returned error: %s output: %s",
933 result.cmd, result.fail_reason, result.output)
934 lines = [utils.SafeEncode(val)
935 for val in utils.TailFile(logfile, lines=20)]
936 _Fail("OS rename script failed (%s), last lines in the"
937 " log file:\n%s", result.fail_reason, "\n".join(lines), log=False)
938
941 """Get information about the volume group.
942
943 @type vg_name: str
944 @param vg_name: the volume group which we query
945 @rtype: dict
946 @return:
947 A dictionary with the following keys:
948 - C{vg_size} is the total size of the volume group in MiB
949 - C{vg_free} is the free size of the volume group in MiB
950 - C{pv_count} are the number of physical disks in that VG
951
952 If an error occurs during gathering of data, we return the same dict
953 with keys all set to None.
954
955 """
956 retdic = dict.fromkeys(["vg_size", "vg_free", "pv_count"])
957
958 retval = utils.RunCmd(["vgs", "-ovg_size,vg_free,pv_count", "--noheadings",
959 "--nosuffix", "--units=m", "--separator=:", vg_name])
960
961 if retval.failed:
962 logging.error("volume group %s not present", vg_name)
963 return retdic
964 valarr = retval.stdout.strip().rstrip(':').split(':')
965 if len(valarr) == 3:
966 try:
967 retdic = {
968 "vg_size": int(round(float(valarr[0]), 0)),
969 "vg_free": int(round(float(valarr[1]), 0)),
970 "pv_count": int(valarr[2]),
971 }
972 except (TypeError, ValueError), err:
973 logging.exception("Fail to parse vgs output: %s", err)
974 else:
975 logging.error("vgs output has the wrong number of fields (expected"
976 " three): %s", str(valarr))
977 return retdic
978
983
986 """Set up symlinks to a instance's block device.
987
988 This is an auxiliary function run when an instance is start (on the primary
989 node) or when an instance is migrated (on the target node).
990
991
992 @param instance_name: the name of the target instance
993 @param device_path: path of the physical block device, on the node
994 @param idx: the disk index
995 @return: absolute path to the disk's symlink
996
997 """
998 link_name = _GetBlockDevSymlinkPath(instance_name, idx)
999 try:
1000 os.symlink(device_path, link_name)
1001 except OSError, err:
1002 if err.errno == errno.EEXIST:
1003 if (not os.path.islink(link_name) or
1004 os.readlink(link_name) != device_path):
1005 os.remove(link_name)
1006 os.symlink(device_path, link_name)
1007 else:
1008 raise
1009
1010 return link_name
1011
1014 """Remove the block device symlinks belonging to the given instance.
1015
1016 """
1017 for idx, _ in enumerate(disks):
1018 link_name = _GetBlockDevSymlinkPath(instance_name, idx)
1019 if os.path.islink(link_name):
1020 try:
1021 os.remove(link_name)
1022 except OSError:
1023 logging.exception("Can't remove symlink '%s'", link_name)
1024
1027 """Set up an instance's block device(s).
1028
1029 This is run on the primary node at instance startup. The block
1030 devices must be already assembled.
1031
1032 @type instance: L{objects.Instance}
1033 @param instance: the instance whose disks we shoul assemble
1034 @rtype: list
1035 @return: list of (disk_object, device_path)
1036
1037 """
1038 block_devices = []
1039 for idx, disk in enumerate(instance.disks):
1040 device = _RecursiveFindBD(disk)
1041 if device is None:
1042 raise errors.BlockDeviceError("Block device '%s' is not set up." %
1043 str(disk))
1044 device.Open()
1045 try:
1046 link_name = _SymlinkBlockDev(instance.name, device.dev_path, idx)
1047 except OSError, e:
1048 raise errors.BlockDeviceError("Cannot create block device symlink: %s" %
1049 e.strerror)
1050
1051 block_devices.append((disk, link_name))
1052
1053 return block_devices
1054
1057 """Start an instance.
1058
1059 @type instance: L{objects.Instance}
1060 @param instance: the instance object
1061 @rtype: None
1062
1063 """
1064 running_instances = GetInstanceList([instance.hypervisor])
1065
1066 if instance.name in running_instances:
1067 logging.info("Instance %s already running, not starting", instance.name)
1068 return
1069
1070 try:
1071 block_devices = _GatherAndLinkBlockDevs(instance)
1072 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1073 hyper.StartInstance(instance, block_devices)
1074 except errors.BlockDeviceError, err:
1075 _Fail("Block device error: %s", err, exc=True)
1076 except errors.HypervisorError, err:
1077 _RemoveBlockDevLinks(instance.name, instance.disks)
1078 _Fail("Hypervisor error: %s", err, exc=True)
1079
1082 """Shut an instance down.
1083
1084 @note: this functions uses polling with a hardcoded timeout.
1085
1086 @type instance: L{objects.Instance}
1087 @param instance: the instance object
1088 @type timeout: integer
1089 @param timeout: maximum timeout for soft shutdown
1090 @rtype: None
1091
1092 """
1093 hv_name = instance.hypervisor
1094 hyper = hypervisor.GetHypervisor(hv_name)
1095 iname = instance.name
1096
1097 if instance.name not in hyper.ListInstances():
1098 logging.info("Instance %s not running, doing nothing", iname)
1099 return
1100
1101 class _TryShutdown:
1102 def __init__(self):
1103 self.tried_once = False
1104
1105 def __call__(self):
1106 if iname not in hyper.ListInstances():
1107 return
1108
1109 try:
1110 hyper.StopInstance(instance, retry=self.tried_once)
1111 except errors.HypervisorError, err:
1112 if iname not in hyper.ListInstances():
1113
1114
1115 return
1116
1117 _Fail("Failed to stop instance %s: %s", iname, err)
1118
1119 self.tried_once = True
1120
1121 raise utils.RetryAgain()
1122
1123 try:
1124 utils.Retry(_TryShutdown(), 5, timeout)
1125 except utils.RetryTimeout:
1126
1127 logging.error("Shutdown of '%s' unsuccessful, forcing", iname)
1128
1129 try:
1130 hyper.StopInstance(instance, force=True)
1131 except errors.HypervisorError, err:
1132 if iname in hyper.ListInstances():
1133
1134
1135 _Fail("Failed to force stop instance %s: %s", iname, err)
1136
1137 time.sleep(1)
1138
1139 if iname in hyper.ListInstances():
1140 _Fail("Could not shutdown instance %s even by destroy", iname)
1141
1142 try:
1143 hyper.CleanupInstance(instance.name)
1144 except errors.HypervisorError, err:
1145 logging.warning("Failed to execute post-shutdown cleanup step: %s", err)
1146
1147 _RemoveBlockDevLinks(iname, instance.disks)
1148
1151 """Reboot an instance.
1152
1153 @type instance: L{objects.Instance}
1154 @param instance: the instance object to reboot
1155 @type reboot_type: str
1156 @param reboot_type: the type of reboot, one the following
1157 constants:
1158 - L{constants.INSTANCE_REBOOT_SOFT}: only reboot the
1159 instance OS, do not recreate the VM
1160 - L{constants.INSTANCE_REBOOT_HARD}: tear down and
1161 restart the VM (at the hypervisor level)
1162 - the other reboot type (L{constants.INSTANCE_REBOOT_FULL}) is
1163 not accepted here, since that mode is handled differently, in
1164 cmdlib, and translates into full stop and start of the
1165 instance (instead of a call_instance_reboot RPC)
1166 @type shutdown_timeout: integer
1167 @param shutdown_timeout: maximum timeout for soft shutdown
1168 @rtype: None
1169
1170 """
1171 running_instances = GetInstanceList([instance.hypervisor])
1172
1173 if instance.name not in running_instances:
1174 _Fail("Cannot reboot instance %s that is not running", instance.name)
1175
1176 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1177 if reboot_type == constants.INSTANCE_REBOOT_SOFT:
1178 try:
1179 hyper.RebootInstance(instance)
1180 except errors.HypervisorError, err:
1181 _Fail("Failed to soft reboot instance %s: %s", instance.name, err)
1182 elif reboot_type == constants.INSTANCE_REBOOT_HARD:
1183 try:
1184 InstanceShutdown(instance, shutdown_timeout)
1185 return StartInstance(instance)
1186 except errors.HypervisorError, err:
1187 _Fail("Failed to hard reboot instance %s: %s", instance.name, err)
1188 else:
1189 _Fail("Invalid reboot_type received: %s", reboot_type)
1190
1205
1208 """Prepare the node to accept an instance.
1209
1210 @type instance: L{objects.Instance}
1211 @param instance: the instance definition
1212 @type info: string/data (opaque)
1213 @param info: migration information, from the source node
1214 @type target: string
1215 @param target: target host (usually ip), on this node
1216
1217 """
1218 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1219 try:
1220 hyper.AcceptInstance(instance, info, target)
1221 except errors.HypervisorError, err:
1222 _Fail("Failed to accept instance: %s", err, exc=True)
1223
1226 """Finalize any preparation to accept an instance.
1227
1228 @type instance: L{objects.Instance}
1229 @param instance: the instance definition
1230 @type info: string/data (opaque)
1231 @param info: migration information, from the source node
1232 @type success: boolean
1233 @param success: whether the migration was a success or a failure
1234
1235 """
1236 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1237 try:
1238 hyper.FinalizeMigration(instance, info, success)
1239 except errors.HypervisorError, err:
1240 _Fail("Failed to finalize migration: %s", err, exc=True)
1241
1244 """Migrates an instance to another node.
1245
1246 @type instance: L{objects.Instance}
1247 @param instance: the instance definition
1248 @type target: string
1249 @param target: the target node name
1250 @type live: boolean
1251 @param live: whether the migration should be done live or not (the
1252 interpretation of this parameter is left to the hypervisor)
1253 @rtype: tuple
1254 @return: a tuple of (success, msg) where:
1255 - succes is a boolean denoting the success/failure of the operation
1256 - msg is a string with details in case of failure
1257
1258 """
1259 hyper = hypervisor.GetHypervisor(instance.hypervisor)
1260
1261 try:
1262 hyper.MigrateInstance(instance, target, live)
1263 except errors.HypervisorError, err:
1264 _Fail("Failed to migrate instance: %s", err, exc=True)
1265
1268 """Creates a block device for an instance.
1269
1270 @type disk: L{objects.Disk}
1271 @param disk: the object describing the disk we should create
1272 @type size: int
1273 @param size: the size of the physical underlying device, in MiB
1274 @type owner: str
1275 @param owner: the name of the instance for which disk is created,
1276 used for device cache data
1277 @type on_primary: boolean
1278 @param on_primary: indicates if it is the primary node or not
1279 @type info: string
1280 @param info: string that will be sent to the physical device
1281 creation, used for example to set (LVM) tags on LVs
1282
1283 @return: the new unique_id of the device (this can sometime be
1284 computed only after creation), or None. On secondary nodes,
1285 it's not required to return anything.
1286
1287 """
1288
1289
1290 clist = []
1291 if disk.children:
1292 for child in disk.children:
1293 try:
1294 crdev = _RecursiveAssembleBD(child, owner, on_primary)
1295 except errors.BlockDeviceError, err:
1296 _Fail("Can't assemble device %s: %s", child, err)
1297 if on_primary or disk.AssembleOnSecondary():
1298
1299
1300 try:
1301
1302 crdev.Open()
1303 except errors.BlockDeviceError, err:
1304 _Fail("Can't make child '%s' read-write: %s", child, err)
1305 clist.append(crdev)
1306
1307 try:
1308 device = bdev.Create(disk.dev_type, disk.physical_id, clist, disk.size)
1309 except errors.BlockDeviceError, err:
1310 _Fail("Can't create block device: %s", err)
1311
1312 if on_primary or disk.AssembleOnSecondary():
1313 try:
1314 device.Assemble()
1315 except errors.BlockDeviceError, err:
1316 _Fail("Can't assemble device after creation, unusual event: %s", err)
1317 device.SetSyncSpeed(constants.SYNC_SPEED)
1318 if on_primary or disk.OpenOnSecondary():
1319 try:
1320 device.Open(force=True)
1321 except errors.BlockDeviceError, err:
1322 _Fail("Can't make device r/w after creation, unusual event: %s", err)
1323 DevCacheManager.UpdateCache(device.dev_path, owner,
1324 on_primary, disk.iv_name)
1325
1326 device.SetInfo(info)
1327
1328 return device.unique_id
1329
1332 """Remove a block device.
1333
1334 @note: This is intended to be called recursively.
1335
1336 @type disk: L{objects.Disk}
1337 @param disk: the disk object we should remove
1338 @rtype: boolean
1339 @return: the success of the operation
1340
1341 """
1342 msgs = []
1343 try:
1344 rdev = _RecursiveFindBD(disk)
1345 except errors.BlockDeviceError, err:
1346
1347 logging.info("Can't attach to device %s in remove", disk)
1348 rdev = None
1349 if rdev is not None:
1350 r_path = rdev.dev_path
1351 try:
1352 rdev.Remove()
1353 except errors.BlockDeviceError, err:
1354 msgs.append(str(err))
1355 if not msgs:
1356 DevCacheManager.RemoveCache(r_path)
1357
1358 if disk.children:
1359 for child in disk.children:
1360 try:
1361 BlockdevRemove(child)
1362 except RPCFail, err:
1363 msgs.append(str(err))
1364
1365 if msgs:
1366 _Fail("; ".join(msgs))
1367
1370 """Activate a block device for an instance.
1371
1372 This is run on the primary and secondary nodes for an instance.
1373
1374 @note: this function is called recursively.
1375
1376 @type disk: L{objects.Disk}
1377 @param disk: the disk we try to assemble
1378 @type owner: str
1379 @param owner: the name of the instance which owns the disk
1380 @type as_primary: boolean
1381 @param as_primary: if we should make the block device
1382 read/write
1383
1384 @return: the assembled device or None (in case no device
1385 was assembled)
1386 @raise errors.BlockDeviceError: in case there is an error
1387 during the activation of the children or the device
1388 itself
1389
1390 """
1391 children = []
1392 if disk.children:
1393 mcn = disk.ChildrenNeeded()
1394 if mcn == -1:
1395 mcn = 0
1396 else:
1397 mcn = len(disk.children) - mcn
1398 for chld_disk in disk.children:
1399 try:
1400 cdev = _RecursiveAssembleBD(chld_disk, owner, as_primary)
1401 except errors.BlockDeviceError, err:
1402 if children.count(None) >= mcn:
1403 raise
1404 cdev = None
1405 logging.error("Error in child activation (but continuing): %s",
1406 str(err))
1407 children.append(cdev)
1408
1409 if as_primary or disk.AssembleOnSecondary():
1410 r_dev = bdev.Assemble(disk.dev_type, disk.physical_id, children, disk.size)
1411 r_dev.SetSyncSpeed(constants.SYNC_SPEED)
1412 result = r_dev
1413 if as_primary or disk.OpenOnSecondary():
1414 r_dev.Open()
1415 DevCacheManager.UpdateCache(r_dev.dev_path, owner,
1416 as_primary, disk.iv_name)
1417
1418 else:
1419 result = True
1420 return result
1421
1424 """Activate a block device for an instance.
1425
1426 This is a wrapper over _RecursiveAssembleBD.
1427
1428 @rtype: str or boolean
1429 @return: a C{/dev/...} path for primary nodes, and
1430 C{True} for secondary nodes
1431
1432 """
1433 try:
1434 result = _RecursiveAssembleBD(disk, owner, as_primary)
1435 if isinstance(result, bdev.BlockDev):
1436
1437 result = result.dev_path
1438 except errors.BlockDeviceError, err:
1439 _Fail("Error while assembling disk: %s", err, exc=True)
1440
1441 return result
1442
1445 """Shut down a block device.
1446
1447 First, if the device is assembled (Attach() is successful), then
1448 the device is shutdown. Then the children of the device are
1449 shutdown.
1450
1451 This function is called recursively. Note that we don't cache the
1452 children or such, as oppossed to assemble, shutdown of different
1453 devices doesn't require that the upper device was active.
1454
1455 @type disk: L{objects.Disk}
1456 @param disk: the description of the disk we should
1457 shutdown
1458 @rtype: None
1459
1460 """
1461 msgs = []
1462 r_dev = _RecursiveFindBD(disk)
1463 if r_dev is not None:
1464 r_path = r_dev.dev_path
1465 try:
1466 r_dev.Shutdown()
1467 DevCacheManager.RemoveCache(r_path)
1468 except errors.BlockDeviceError, err:
1469 msgs.append(str(err))
1470
1471 if disk.children:
1472 for child in disk.children:
1473 try:
1474 BlockdevShutdown(child)
1475 except RPCFail, err:
1476 msgs.append(str(err))
1477
1478 if msgs:
1479 _Fail("; ".join(msgs))
1480
1483 """Extend a mirrored block device.
1484
1485 @type parent_cdev: L{objects.Disk}
1486 @param parent_cdev: the disk to which we should add children
1487 @type new_cdevs: list of L{objects.Disk}
1488 @param new_cdevs: the list of children which we should add
1489 @rtype: None
1490
1491 """
1492 parent_bdev = _RecursiveFindBD(parent_cdev)
1493 if parent_bdev is None:
1494 _Fail("Can't find parent device '%s' in add children", parent_cdev)
1495 new_bdevs = [_RecursiveFindBD(disk) for disk in new_cdevs]
1496 if new_bdevs.count(None) > 0:
1497 _Fail("Can't find new device(s) to add: %s:%s", new_bdevs, new_cdevs)
1498 parent_bdev.AddChildren(new_bdevs)
1499
1502 """Shrink a mirrored block device.
1503
1504 @type parent_cdev: L{objects.Disk}
1505 @param parent_cdev: the disk from which we should remove children
1506 @type new_cdevs: list of L{objects.Disk}
1507 @param new_cdevs: the list of children which we should remove
1508 @rtype: None
1509
1510 """
1511 parent_bdev = _RecursiveFindBD(parent_cdev)
1512 if parent_bdev is None:
1513 _Fail("Can't find parent device '%s' in remove children", parent_cdev)
1514 devs = []
1515 for disk in new_cdevs:
1516 rpath = disk.StaticDevPath()
1517 if rpath is None:
1518 bd = _RecursiveFindBD(disk)
1519 if bd is None:
1520 _Fail("Can't find device %s while removing children", disk)
1521 else:
1522 devs.append(bd.dev_path)
1523 else:
1524 if not utils.IsNormAbsPath(rpath):
1525 _Fail("Strange path returned from StaticDevPath: '%s'", rpath)
1526 devs.append(rpath)
1527 parent_bdev.RemoveChildren(devs)
1528
1531 """Get the mirroring status of a list of devices.
1532
1533 @type disks: list of L{objects.Disk}
1534 @param disks: the list of disks which we should query
1535 @rtype: disk
1536 @return:
1537 a list of (mirror_done, estimated_time) tuples, which
1538 are the result of L{bdev.BlockDev.CombinedSyncStatus}
1539 @raise errors.BlockDeviceError: if any of the disks cannot be
1540 found
1541
1542 """
1543 stats = []
1544 for dsk in disks:
1545 rbd = _RecursiveFindBD(dsk)
1546 if rbd is None:
1547 _Fail("Can't find device %s", dsk)
1548
1549 stats.append(rbd.CombinedSyncStatus())
1550
1551 return stats
1552
1555 """Check if a device is activated.
1556
1557 If so, return information about the real device.
1558
1559 @type disk: L{objects.Disk}
1560 @param disk: the disk object we need to find
1561
1562 @return: None if the device can't be found,
1563 otherwise the device instance
1564
1565 """
1566 children = []
1567 if disk.children:
1568 for chdisk in disk.children:
1569 children.append(_RecursiveFindBD(chdisk))
1570
1571 return bdev.FindDevice(disk.dev_type, disk.physical_id, children, disk.size)
1572
1575 """Opens the underlying block device of a disk.
1576
1577 @type disk: L{objects.Disk}
1578 @param disk: the disk object we want to open
1579
1580 """
1581 real_disk = _RecursiveFindBD(disk)
1582 if real_disk is None:
1583 _Fail("Block device '%s' is not set up", disk)
1584
1585 real_disk.Open()
1586
1587 return real_disk
1588
1591 """Check if a device is activated.
1592
1593 If it is, return information about the real device.
1594
1595 @type disk: L{objects.Disk}
1596 @param disk: the disk to find
1597 @rtype: None or objects.BlockDevStatus
1598 @return: None if the disk cannot be found, otherwise a the current
1599 information
1600
1601 """
1602 try:
1603 rbd = _RecursiveFindBD(disk)
1604 except errors.BlockDeviceError, err:
1605 _Fail("Failed to find device: %s", err, exc=True)
1606
1607 if rbd is None:
1608 return None
1609
1610 return rbd.GetSyncStatus()
1611
1614 """Computes the size of the given disks.
1615
1616 If a disk is not found, returns None instead.
1617
1618 @type disks: list of L{objects.Disk}
1619 @param disks: the list of disk to compute the size for
1620 @rtype: list
1621 @return: list with elements None if the disk cannot be found,
1622 otherwise the size
1623
1624 """
1625 result = []
1626 for cf in disks:
1627 try:
1628 rbd = _RecursiveFindBD(cf)
1629 except errors.BlockDeviceError:
1630 result.append(None)
1631 continue
1632 if rbd is None:
1633 result.append(None)
1634 else:
1635 result.append(rbd.GetActualSize())
1636 return result
1637
1640 """Export a block device to a remote node.
1641
1642 @type disk: L{objects.Disk}
1643 @param disk: the description of the disk to export
1644 @type dest_node: str
1645 @param dest_node: the destination node to export to
1646 @type dest_path: str
1647 @param dest_path: the destination path on the target node
1648 @type cluster_name: str
1649 @param cluster_name: the cluster name, needed for SSH hostalias
1650 @rtype: None
1651
1652 """
1653 real_disk = _OpenRealBD(disk)
1654
1655
1656 expcmd = utils.BuildShellCmd("set -e; set -o pipefail; "
1657 "dd if=%s bs=1048576 count=%s",
1658 real_disk.dev_path, str(disk.size))
1659
1660
1661
1662
1663
1664
1665
1666 destcmd = utils.BuildShellCmd("dd of=%s conv=nocreat,notrunc bs=65536"
1667 " oflag=dsync", dest_path)
1668
1669 remotecmd = _GetSshRunner(cluster_name).BuildCmd(dest_node,
1670 constants.GANETI_RUNAS,
1671 destcmd)
1672
1673
1674 command = '|'.join([expcmd, utils.ShellQuoteArgs(remotecmd)])
1675
1676 result = utils.RunCmd(["bash", "-c", command])
1677
1678 if result.failed:
1679 _Fail("Disk copy command '%s' returned error: %s"
1680 " output: %s", command, result.fail_reason, result.output)
1681
1682
1683 -def UploadFile(file_name, data, mode, uid, gid, atime, mtime):
1684 """Write a file to the filesystem.
1685
1686 This allows the master to overwrite(!) a file. It will only perform
1687 the operation if the file belongs to a list of configuration files.
1688
1689 @type file_name: str
1690 @param file_name: the target file name
1691 @type data: str
1692 @param data: the new contents of the file
1693 @type mode: int
1694 @param mode: the mode to give the file (can be None)
1695 @type uid: int
1696 @param uid: the owner of the file (can be -1 for default)
1697 @type gid: int
1698 @param gid: the group of the file (can be -1 for default)
1699 @type atime: float
1700 @param atime: the atime to set on the file (can be None)
1701 @type mtime: float
1702 @param mtime: the mtime to set on the file (can be None)
1703 @rtype: None
1704
1705 """
1706 if not os.path.isabs(file_name):
1707 _Fail("Filename passed to UploadFile is not absolute: '%s'", file_name)
1708
1709 if file_name not in _ALLOWED_UPLOAD_FILES:
1710 _Fail("Filename passed to UploadFile not in allowed upload targets: '%s'",
1711 file_name)
1712
1713 raw_data = _Decompress(data)
1714
1715 utils.WriteFile(file_name, data=raw_data, mode=mode, uid=uid, gid=gid,
1716 atime=atime, mtime=mtime)
1717
1720 """Update all ssconf files.
1721
1722 Wrapper around the SimpleStore.WriteFiles.
1723
1724 """
1725 ssconf.SimpleStore().WriteFiles(values)
1726
1729 """Format an EnvironmentError exception.
1730
1731 If the L{err} argument has an errno attribute, it will be looked up
1732 and converted into a textual C{E...} description. Otherwise the
1733 string representation of the error will be returned.
1734
1735 @type err: L{EnvironmentError}
1736 @param err: the exception to format
1737
1738 """
1739 if hasattr(err, 'errno'):
1740 detail = errno.errorcode[err.errno]
1741 else:
1742 detail = str(err)
1743 return detail
1744
1747 """Compute and return the API version of a given OS.
1748
1749 This function will try to read the API version of the OS residing in
1750 the 'os_dir' directory.
1751
1752 @type os_dir: str
1753 @param os_dir: the directory in which we should look for the OS
1754 @rtype: tuple
1755 @return: tuple (status, data) with status denoting the validity and
1756 data holding either the vaid versions or an error message
1757
1758 """
1759 api_file = utils.PathJoin(os_dir, constants.OS_API_FILE)
1760
1761 try:
1762 st = os.stat(api_file)
1763 except EnvironmentError, err:
1764 return False, ("Required file '%s' not found under path %s: %s" %
1765 (constants.OS_API_FILE, os_dir, _ErrnoOrStr(err)))
1766
1767 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
1768 return False, ("File '%s' in %s is not a regular file" %
1769 (constants.OS_API_FILE, os_dir))
1770
1771 try:
1772 api_versions = utils.ReadFile(api_file).splitlines()
1773 except EnvironmentError, err:
1774 return False, ("Error while reading the API version file at %s: %s" %
1775 (api_file, _ErrnoOrStr(err)))
1776
1777 try:
1778 api_versions = [int(version.strip()) for version in api_versions]
1779 except (TypeError, ValueError), err:
1780 return False, ("API version(s) can't be converted to integer: %s" %
1781 str(err))
1782
1783 return True, api_versions
1784
1787 """Compute the validity for all OSes.
1788
1789 @type top_dirs: list
1790 @param top_dirs: the list of directories in which to
1791 search (if not given defaults to
1792 L{constants.OS_SEARCH_PATH})
1793 @rtype: list of L{objects.OS}
1794 @return: a list of tuples (name, path, status, diagnose, variants,
1795 parameters, api_version) for all (potential) OSes under all
1796 search paths, where:
1797 - name is the (potential) OS name
1798 - path is the full path to the OS
1799 - status True/False is the validity of the OS
1800 - diagnose is the error message for an invalid OS, otherwise empty
1801 - variants is a list of supported OS variants, if any
1802 - parameters is a list of (name, help) parameters, if any
1803 - api_version is a list of support OS API versions
1804
1805 """
1806 if top_dirs is None:
1807 top_dirs = constants.OS_SEARCH_PATH
1808
1809 result = []
1810 for dir_name in top_dirs:
1811 if os.path.isdir(dir_name):
1812 try:
1813 f_names = utils.ListVisibleFiles(dir_name)
1814 except EnvironmentError, err:
1815 logging.exception("Can't list the OS directory %s: %s", dir_name, err)
1816 break
1817 for name in f_names:
1818 os_path = utils.PathJoin(dir_name, name)
1819 status, os_inst = _TryOSFromDisk(name, base_dir=dir_name)
1820 if status:
1821 diagnose = ""
1822 variants = os_inst.supported_variants
1823 parameters = os_inst.supported_parameters
1824 api_versions = os_inst.api_versions
1825 else:
1826 diagnose = os_inst
1827 variants = parameters = api_versions = []
1828 result.append((name, os_path, status, diagnose, variants,
1829 parameters, api_versions))
1830
1831 return result
1832
1835 """Create an OS instance from disk.
1836
1837 This function will return an OS instance if the given name is a
1838 valid OS name.
1839
1840 @type base_dir: string
1841 @keyword base_dir: Base directory containing OS installations.
1842 Defaults to a search in all the OS_SEARCH_PATH dirs.
1843 @rtype: tuple
1844 @return: success and either the OS instance if we find a valid one,
1845 or error message
1846
1847 """
1848 if base_dir is None:
1849 os_dir = utils.FindFile(name, constants.OS_SEARCH_PATH, os.path.isdir)
1850 else:
1851 os_dir = utils.FindFile(name, [base_dir], os.path.isdir)
1852
1853 if os_dir is None:
1854 return False, "Directory for OS %s not found in search path" % name
1855
1856 status, api_versions = _OSOndiskAPIVersion(os_dir)
1857 if not status:
1858
1859 return status, api_versions
1860
1861 if not constants.OS_API_VERSIONS.intersection(api_versions):
1862 return False, ("API version mismatch for path '%s': found %s, want %s." %
1863 (os_dir, api_versions, constants.OS_API_VERSIONS))
1864
1865
1866 os_files = dict.fromkeys(constants.OS_SCRIPTS)
1867
1868 if max(api_versions) >= constants.OS_API_V15:
1869 os_files[constants.OS_VARIANTS_FILE] = ''
1870
1871 if max(api_versions) >= constants.OS_API_V20:
1872 os_files[constants.OS_PARAMETERS_FILE] = ''
1873 else:
1874 del os_files[constants.OS_SCRIPT_VERIFY]
1875
1876 for filename in os_files:
1877 os_files[filename] = utils.PathJoin(os_dir, filename)
1878
1879 try:
1880 st = os.stat(os_files[filename])
1881 except EnvironmentError, err:
1882 return False, ("File '%s' under path '%s' is missing (%s)" %
1883 (filename, os_dir, _ErrnoOrStr(err)))
1884
1885 if not stat.S_ISREG(stat.S_IFMT(st.st_mode)):
1886 return False, ("File '%s' under path '%s' is not a regular file" %
1887 (filename, os_dir))
1888
1889 if filename in constants.OS_SCRIPTS:
1890 if stat.S_IMODE(st.st_mode) & stat.S_IXUSR != stat.S_IXUSR:
1891 return False, ("File '%s' under path '%s' is not executable" %
1892 (filename, os_dir))
1893
1894 variants = []
1895 if constants.OS_VARIANTS_FILE in os_files:
1896 variants_file = os_files[constants.OS_VARIANTS_FILE]
1897 try:
1898 variants = utils.ReadFile(variants_file).splitlines()
1899 except EnvironmentError, err:
1900 return False, ("Error while reading the OS variants file at %s: %s" %
1901 (variants_file, _ErrnoOrStr(err)))
1902 if not variants:
1903 return False, ("No supported os variant found")
1904
1905 parameters = []
1906 if constants.OS_PARAMETERS_FILE in os_files:
1907 parameters_file = os_files[constants.OS_PARAMETERS_FILE]
1908 try:
1909 parameters = utils.ReadFile(parameters_file).splitlines()
1910 except EnvironmentError, err:
1911 return False, ("Error while reading the OS parameters file at %s: %s" %
1912 (parameters_file, _ErrnoOrStr(err)))
1913 parameters = [v.split(None, 1) for v in parameters]
1914
1915 os_obj = objects.OS(name=name, path=os_dir,
1916 create_script=os_files[constants.OS_SCRIPT_CREATE],
1917 export_script=os_files[constants.OS_SCRIPT_EXPORT],
1918 import_script=os_files[constants.OS_SCRIPT_IMPORT],
1919 rename_script=os_files[constants.OS_SCRIPT_RENAME],
1920 verify_script=os_files.get(constants.OS_SCRIPT_VERIFY,
1921 None),
1922 supported_variants=variants,
1923 supported_parameters=parameters,
1924 api_versions=api_versions)
1925 return True, os_obj
1926
1929 """Create an OS instance from disk.
1930
1931 This function will return an OS instance if the given name is a
1932 valid OS name. Otherwise, it will raise an appropriate
1933 L{RPCFail} exception, detailing why this is not a valid OS.
1934
1935 This is just a wrapper over L{_TryOSFromDisk}, which doesn't raise
1936 an exception but returns true/false status data.
1937
1938 @type base_dir: string
1939 @keyword base_dir: Base directory containing OS installations.
1940 Defaults to a search in all the OS_SEARCH_PATH dirs.
1941 @rtype: L{objects.OS}
1942 @return: the OS instance if we find a valid one
1943 @raise RPCFail: if we don't find a valid OS
1944
1945 """
1946 name_only = objects.OS.GetName(name)
1947 status, payload = _TryOSFromDisk(name_only, base_dir)
1948
1949 if not status:
1950 _Fail(payload)
1951
1952 return payload
1953
1954
1955 -def OSCoreEnv(os_name, inst_os, os_params, debug=0):
1956 """Calculate the basic environment for an os script.
1957
1958 @type os_name: str
1959 @param os_name: full operating system name (including variant)
1960 @type inst_os: L{objects.OS}
1961 @param inst_os: operating system for which the environment is being built
1962 @type os_params: dict
1963 @param os_params: the OS parameters
1964 @type debug: integer
1965 @param debug: debug level (0 or 1, for OS Api 10)
1966 @rtype: dict
1967 @return: dict of environment variables
1968 @raise errors.BlockDeviceError: if the block device
1969 cannot be found
1970
1971 """
1972 result = {}
1973 api_version = \
1974 max(constants.OS_API_VERSIONS.intersection(inst_os.api_versions))
1975 result['OS_API_VERSION'] = '%d' % api_version
1976 result['OS_NAME'] = inst_os.name
1977 result['DEBUG_LEVEL'] = '%d' % debug
1978
1979
1980 if api_version >= constants.OS_API_V15:
1981 variant = objects.OS.GetVariant(os_name)
1982 if not variant:
1983 variant = inst_os.supported_variants[0]
1984 result['OS_VARIANT'] = variant
1985
1986
1987 for pname, pvalue in os_params.items():
1988 result['OSP_%s' % pname.upper()] = pvalue
1989
1990 return result
1991
1994 """Calculate the environment for an os script.
1995
1996 @type instance: L{objects.Instance}
1997 @param instance: target instance for the os script run
1998 @type inst_os: L{objects.OS}
1999 @param inst_os: operating system for which the environment is being built
2000 @type debug: integer
2001 @param debug: debug level (0 or 1, for OS Api 10)
2002 @rtype: dict
2003 @return: dict of environment variables
2004 @raise errors.BlockDeviceError: if the block device
2005 cannot be found
2006
2007 """
2008 result = OSCoreEnv(instance.os, inst_os, instance.osparams, debug=debug)
2009
2010 result['INSTANCE_NAME'] = instance.name
2011 result['INSTANCE_OS'] = instance.os
2012 result['HYPERVISOR'] = instance.hypervisor
2013 result['DISK_COUNT'] = '%d' % len(instance.disks)
2014 result['NIC_COUNT'] = '%d' % len(instance.nics)
2015
2016
2017 for idx, disk in enumerate(instance.disks):
2018 real_disk = _OpenRealBD(disk)
2019 result['DISK_%d_PATH' % idx] = real_disk.dev_path
2020 result['DISK_%d_ACCESS' % idx] = disk.mode
2021 if constants.HV_DISK_TYPE in instance.hvparams:
2022 result['DISK_%d_FRONTEND_TYPE' % idx] = \
2023 instance.hvparams[constants.HV_DISK_TYPE]
2024 if disk.dev_type in constants.LDS_BLOCK:
2025 result['DISK_%d_BACKEND_TYPE' % idx] = 'block'
2026 elif disk.dev_type == constants.LD_FILE:
2027 result['DISK_%d_BACKEND_TYPE' % idx] = \
2028 'file:%s' % disk.physical_id[0]
2029
2030
2031 for idx, nic in enumerate(instance.nics):
2032 result['NIC_%d_MAC' % idx] = nic.mac
2033 if nic.ip:
2034 result['NIC_%d_IP' % idx] = nic.ip
2035 result['NIC_%d_MODE' % idx] = nic.nicparams[constants.NIC_MODE]
2036 if nic.nicparams[constants.NIC_MODE] == constants.NIC_MODE_BRIDGED:
2037 result['NIC_%d_BRIDGE' % idx] = nic.nicparams[constants.NIC_LINK]
2038 if nic.nicparams[constants.NIC_LINK]:
2039 result['NIC_%d_LINK' % idx] = nic.nicparams[constants.NIC_LINK]
2040 if constants.HV_NIC_TYPE in instance.hvparams:
2041 result['NIC_%d_FRONTEND_TYPE' % idx] = \
2042 instance.hvparams[constants.HV_NIC_TYPE]
2043
2044
2045 for source, kind in [(instance.beparams, "BE"), (instance.hvparams, "HV")]:
2046 for key, value in source.items():
2047 result["INSTANCE_%s_%s" % (kind, key)] = str(value)
2048
2049 return result
2050
2053 """Grow a stack of block devices.
2054
2055 This function is called recursively, with the childrens being the
2056 first ones to resize.
2057
2058 @type disk: L{objects.Disk}
2059 @param disk: the disk to be grown
2060 @rtype: (status, result)
2061 @return: a tuple with the status of the operation
2062 (True/False), and the errors message if status
2063 is False
2064
2065 """
2066 r_dev = _RecursiveFindBD(disk)
2067 if r_dev is None:
2068 _Fail("Cannot find block device %s", disk)
2069
2070 try:
2071 r_dev.Grow(amount)
2072 except errors.BlockDeviceError, err:
2073 _Fail("Failed to grow block device: %s", err, exc=True)
2074
2077 """Create a snapshot copy of a block device.
2078
2079 This function is called recursively, and the snapshot is actually created
2080 just for the leaf lvm backend device.
2081
2082 @type disk: L{objects.Disk}
2083 @param disk: the disk to be snapshotted
2084 @rtype: string
2085 @return: snapshot disk path
2086
2087 """
2088 if disk.dev_type == constants.LD_DRBD8:
2089 if not disk.children:
2090 _Fail("DRBD device '%s' without backing storage cannot be snapshotted",
2091 disk.unique_id)
2092 return BlockdevSnapshot(disk.children[0])
2093 elif disk.dev_type == constants.LD_LV:
2094 r_dev = _RecursiveFindBD(disk)
2095 if r_dev is not None:
2096
2097
2098 return r_dev.Snapshot(disk.size)
2099 else:
2100 _Fail("Cannot find block device %s", disk)
2101 else:
2102 _Fail("Cannot snapshot non-lvm block device '%s' of type '%s'",
2103 disk.unique_id, disk.dev_type)
2104
2107 """Write out the export configuration information.
2108
2109 @type instance: L{objects.Instance}
2110 @param instance: the instance which we export, used for
2111 saving configuration
2112 @type snap_disks: list of L{objects.Disk}
2113 @param snap_disks: list of snapshot block devices, which
2114 will be used to get the actual name of the dump file
2115
2116 @rtype: None
2117
2118 """
2119 destdir = utils.PathJoin(constants.EXPORT_DIR, instance.name + ".new")
2120 finaldestdir = utils.PathJoin(constants.EXPORT_DIR, instance.name)
2121
2122 config = objects.SerializableConfigParser()
2123
2124 config.add_section(constants.INISECT_EXP)
2125 config.set(constants.INISECT_EXP, 'version', '0')
2126 config.set(constants.INISECT_EXP, 'timestamp', '%d' % int(time.time()))
2127 config.set(constants.INISECT_EXP, 'source', instance.primary_node)
2128 config.set(constants.INISECT_EXP, 'os', instance.os)
2129 config.set(constants.INISECT_EXP, 'compression', 'gzip')
2130
2131 config.add_section(constants.INISECT_INS)
2132 config.set(constants.INISECT_INS, 'name', instance.name)
2133 config.set(constants.INISECT_INS, 'memory', '%d' %
2134 instance.beparams[constants.BE_MEMORY])
2135 config.set(constants.INISECT_INS, 'vcpus', '%d' %
2136 instance.beparams[constants.BE_VCPUS])
2137 config.set(constants.INISECT_INS, 'disk_template', instance.disk_template)
2138 config.set(constants.INISECT_INS, 'hypervisor', instance.hypervisor)
2139
2140 nic_total = 0
2141 for nic_count, nic in enumerate(instance.nics):
2142 nic_total += 1
2143 config.set(constants.INISECT_INS, 'nic%d_mac' %
2144 nic_count, '%s' % nic.mac)
2145 config.set(constants.INISECT_INS, 'nic%d_ip' % nic_count, '%s' % nic.ip)
2146 for param in constants.NICS_PARAMETER_TYPES:
2147 config.set(constants.INISECT_INS, 'nic%d_%s' % (nic_count, param),
2148 '%s' % nic.nicparams.get(param, None))
2149
2150 config.set(constants.INISECT_INS, 'nic_count' , '%d' % nic_total)
2151
2152 disk_total = 0
2153 for disk_count, disk in enumerate(snap_disks):
2154 if disk:
2155 disk_total += 1
2156 config.set(constants.INISECT_INS, 'disk%d_ivname' % disk_count,
2157 ('%s' % disk.iv_name))
2158 config.set(constants.INISECT_INS, 'disk%d_dump' % disk_count,
2159 ('%s' % disk.physical_id[1]))
2160 config.set(constants.INISECT_INS, 'disk%d_size' % disk_count,
2161 ('%d' % disk.size))
2162
2163 config.set(constants.INISECT_INS, 'disk_count' , '%d' % disk_total)
2164
2165
2166
2167 config.add_section(constants.INISECT_HYP)
2168 for name, value in instance.hvparams.items():
2169 if name not in constants.HVC_GLOBALS:
2170 config.set(constants.INISECT_HYP, name, str(value))
2171
2172 config.add_section(constants.INISECT_BEP)
2173 for name, value in instance.beparams.items():
2174 config.set(constants.INISECT_BEP, name, str(value))
2175
2176 config.add_section(constants.INISECT_OSP)
2177 for name, value in instance.osparams.items():
2178 config.set(constants.INISECT_OSP, name, str(value))
2179
2180 utils.WriteFile(utils.PathJoin(destdir, constants.EXPORT_CONF_FILE),
2181 data=config.Dumps())
2182 shutil.rmtree(finaldestdir, ignore_errors=True)
2183 shutil.move(destdir, finaldestdir)
2184
2207
2220
2223 """Remove an existing export from the node.
2224
2225 @type export: str
2226 @param export: the name of the export to remove
2227 @rtype: None
2228
2229 """
2230 target = utils.PathJoin(constants.EXPORT_DIR, export)
2231
2232 try:
2233 shutil.rmtree(target)
2234 except EnvironmentError, err:
2235 _Fail("Error while removing the export: %s", err, exc=True)
2236
2239 """Rename a list of block devices.
2240
2241 @type devlist: list of tuples
2242 @param devlist: list of tuples of the form (disk,
2243 new_logical_id, new_physical_id); disk is an
2244 L{objects.Disk} object describing the current disk,
2245 and new logical_id/physical_id is the name we
2246 rename it to
2247 @rtype: boolean
2248 @return: True if all renames succeeded, False otherwise
2249
2250 """
2251 msgs = []
2252 result = True
2253 for disk, unique_id in devlist:
2254 dev = _RecursiveFindBD(disk)
2255 if dev is None:
2256 msgs.append("Can't find device %s in rename" % str(disk))
2257 result = False
2258 continue
2259 try:
2260 old_rpath = dev.dev_path
2261 dev.Rename(unique_id)
2262 new_rpath = dev.dev_path
2263 if old_rpath != new_rpath:
2264 DevCacheManager.RemoveCache(old_rpath)
2265
2266
2267
2268
2269
2270 except errors.BlockDeviceError, err:
2271 msgs.append("Can't rename device '%s' to '%s': %s" %
2272 (dev, unique_id, err))
2273 logging.exception("Can't rename device '%s' to '%s'", dev, unique_id)
2274 result = False
2275 if not result:
2276 _Fail("; ".join(msgs))
2277
2302
2305 """Create file storage directory.
2306
2307 @type file_storage_dir: str
2308 @param file_storage_dir: directory to create
2309
2310 @rtype: tuple
2311 @return: tuple with first element a boolean indicating wheter dir
2312 creation was successful or not
2313
2314 """
2315 file_storage_dir = _TransformFileStorageDir(file_storage_dir)
2316 if os.path.exists(file_storage_dir):
2317 if not os.path.isdir(file_storage_dir):
2318 _Fail("Specified storage dir '%s' is not a directory",
2319 file_storage_dir)
2320 else:
2321 try:
2322 os.makedirs(file_storage_dir, 0750)
2323 except OSError, err:
2324 _Fail("Cannot create file storage directory '%s': %s",
2325 file_storage_dir, err, exc=True)
2326
2329 """Remove file storage directory.
2330
2331 Remove it only if it's empty. If not log an error and return.
2332
2333 @type file_storage_dir: str
2334 @param file_storage_dir: the directory we should cleanup
2335 @rtype: tuple (success,)
2336 @return: tuple of one element, C{success}, denoting
2337 whether the operation was successful
2338
2339 """
2340 file_storage_dir = _TransformFileStorageDir(file_storage_dir)
2341 if os.path.exists(file_storage_dir):
2342 if not os.path.isdir(file_storage_dir):
2343 _Fail("Specified Storage directory '%s' is not a directory",
2344 file_storage_dir)
2345
2346 try:
2347 os.rmdir(file_storage_dir)
2348 except OSError, err:
2349 _Fail("Cannot remove file storage directory '%s': %s",
2350 file_storage_dir, err)
2351
2354 """Rename the file storage directory.
2355
2356 @type old_file_storage_dir: str
2357 @param old_file_storage_dir: the current path
2358 @type new_file_storage_dir: str
2359 @param new_file_storage_dir: the name we should rename to
2360 @rtype: tuple (success,)
2361 @return: tuple of one element, C{success}, denoting
2362 whether the operation was successful
2363
2364 """
2365 old_file_storage_dir = _TransformFileStorageDir(old_file_storage_dir)
2366 new_file_storage_dir = _TransformFileStorageDir(new_file_storage_dir)
2367 if not os.path.exists(new_file_storage_dir):
2368 if os.path.isdir(old_file_storage_dir):
2369 try:
2370 os.rename(old_file_storage_dir, new_file_storage_dir)
2371 except OSError, err:
2372 _Fail("Cannot rename '%s' to '%s': %s",
2373 old_file_storage_dir, new_file_storage_dir, err)
2374 else:
2375 _Fail("Specified storage dir '%s' is not a directory",
2376 old_file_storage_dir)
2377 else:
2378 if os.path.exists(old_file_storage_dir):
2379 _Fail("Cannot rename '%s' to '%s': both locations exist",
2380 old_file_storage_dir, new_file_storage_dir)
2381
2384 """Checks whether the given filename is in the queue directory.
2385
2386 @type file_name: str
2387 @param file_name: the file name we should check
2388 @rtype: None
2389 @raises RPCFail: if the file is not valid
2390
2391 """
2392 queue_dir = os.path.normpath(constants.QUEUE_DIR)
2393 result = (os.path.commonprefix([queue_dir, file_name]) == queue_dir)
2394
2395 if not result:
2396 _Fail("Passed job queue file '%s' does not belong to"
2397 " the queue directory '%s'", file_name, queue_dir)
2398
2401 """Updates a file in the queue directory.
2402
2403 This is just a wrapper over L{utils.WriteFile}, with proper
2404 checking.
2405
2406 @type file_name: str
2407 @param file_name: the job file name
2408 @type content: str
2409 @param content: the new job contents
2410 @rtype: boolean
2411 @return: the success of the operation
2412
2413 """
2414 _EnsureJobQueueFile(file_name)
2415
2416
2417 utils.WriteFile(file_name, data=_Decompress(content))
2418
2421 """Renames a job queue file.
2422
2423 This is just a wrapper over os.rename with proper checking.
2424
2425 @type old: str
2426 @param old: the old (actual) file name
2427 @type new: str
2428 @param new: the desired file name
2429 @rtype: tuple
2430 @return: the success of the operation and payload
2431
2432 """
2433 _EnsureJobQueueFile(old)
2434 _EnsureJobQueueFile(new)
2435
2436 utils.RenameFile(old, new, mkdir=True)
2437
2440 """Closes the given block devices.
2441
2442 This means they will be switched to secondary mode (in case of
2443 DRBD).
2444
2445 @param instance_name: if the argument is not empty, the symlinks
2446 of this instance will be removed
2447 @type disks: list of L{objects.Disk}
2448 @param disks: the list of disks to be closed
2449 @rtype: tuple (success, message)
2450 @return: a tuple of success and message, where success
2451 indicates the succes of the operation, and message
2452 which will contain the error details in case we
2453 failed
2454
2455 """
2456 bdevs = []
2457 for cf in disks:
2458 rd = _RecursiveFindBD(cf)
2459 if rd is None:
2460 _Fail("Can't find device %s", cf)
2461 bdevs.append(rd)
2462
2463 msg = []
2464 for rd in bdevs:
2465 try:
2466 rd.Close()
2467 except errors.BlockDeviceError, err:
2468 msg.append(str(err))
2469 if msg:
2470 _Fail("Can't make devices secondary: %s", ",".join(msg))
2471 else:
2472 if instance_name:
2473 _RemoveBlockDevLinks(instance_name, disks)
2474
2477 """Validates the given hypervisor parameters.
2478
2479 @type hvname: string
2480 @param hvname: the hypervisor name
2481 @type hvparams: dict
2482 @param hvparams: the hypervisor parameters to be validated
2483 @rtype: None
2484
2485 """
2486 try:
2487 hv_type = hypervisor.GetHypervisor(hvname)
2488 hv_type.ValidateParameters(hvparams)
2489 except errors.HypervisorError, err:
2490 _Fail(str(err), log=False)
2491
2494 """Check whether a list of parameters is supported by the OS.
2495
2496 @type os_obj: L{objects.OS}
2497 @param os_obj: OS object to check
2498 @type parameters: list
2499 @param parameters: the list of parameters to check
2500
2501 """
2502 supported = [v[0] for v in os_obj.supported_parameters]
2503 delta = frozenset(parameters).difference(supported)
2504 if delta:
2505 _Fail("The following parameters are not supported"
2506 " by the OS %s: %s" % (os_obj.name, utils.CommaJoin(delta)))
2507
2508
2509 -def ValidateOS(required, osname, checks, osparams):
2510 """Validate the given OS' parameters.
2511
2512 @type required: boolean
2513 @param required: whether absence of the OS should translate into
2514 failure or not
2515 @type osname: string
2516 @param osname: the OS to be validated
2517 @type checks: list
2518 @param checks: list of the checks to run (currently only 'parameters')
2519 @type osparams: dict
2520 @param osparams: dictionary with OS parameters
2521 @rtype: boolean
2522 @return: True if the validation passed, or False if the OS was not
2523 found and L{required} was false
2524
2525 """
2526 if not constants.OS_VALIDATE_CALLS.issuperset(checks):
2527 _Fail("Unknown checks required for OS %s: %s", osname,
2528 set(checks).difference(constants.OS_VALIDATE_CALLS))
2529
2530 name_only = objects.OS.GetName(osname)
2531 status, tbv = _TryOSFromDisk(name_only, None)
2532
2533 if not status:
2534 if required:
2535 _Fail(tbv)
2536 else:
2537 return False
2538
2539 if max(tbv.api_versions) < constants.OS_API_V20:
2540 return True
2541
2542 if constants.OS_VALIDATE_PARAMETERS in checks:
2543 _CheckOSPList(tbv, osparams.keys())
2544
2545 validate_env = OSCoreEnv(osname, tbv, osparams)
2546 result = utils.RunCmd([tbv.verify_script] + checks, env=validate_env,
2547 cwd=tbv.path)
2548 if result.failed:
2549 logging.error("os validate command '%s' returned error: %s output: %s",
2550 result.cmd, result.fail_reason, result.output)
2551 _Fail("OS validation script failed (%s), output: %s",
2552 result.fail_reason, result.output, log=False)
2553
2554 return True
2555
2578
2587
2590 """Creates a new X509 certificate for SSL/TLS.
2591
2592 @type validity: int
2593 @param validity: Validity in seconds
2594 @rtype: tuple; (string, string)
2595 @return: Certificate name and public part
2596
2597 """
2598 (key_pem, cert_pem) = \
2599 utils.GenerateSelfSignedX509Cert(netutils.HostInfo.SysName(),
2600 min(validity, _MAX_SSL_CERT_VALIDITY))
2601
2602 cert_dir = tempfile.mkdtemp(dir=cryptodir,
2603 prefix="x509-%s-" % utils.TimestampForFilename())
2604 try:
2605 name = os.path.basename(cert_dir)
2606 assert len(name) > 5
2607
2608 (_, key_file, cert_file) = _GetX509Filenames(cryptodir, name)
2609
2610 utils.WriteFile(key_file, mode=0400, data=key_pem)
2611 utils.WriteFile(cert_file, mode=0400, data=cert_pem)
2612
2613
2614 return (name, cert_pem)
2615 except Exception:
2616 shutil.rmtree(cert_dir, ignore_errors=True)
2617 raise
2618
2621 """Removes a X509 certificate.
2622
2623 @type name: string
2624 @param name: Certificate name
2625
2626 """
2627 (cert_dir, key_file, cert_file) = _GetX509Filenames(cryptodir, name)
2628
2629 utils.RemoveFile(key_file)
2630 utils.RemoveFile(cert_file)
2631
2632 try:
2633 os.rmdir(cert_dir)
2634 except EnvironmentError, err:
2635 _Fail("Cannot remove certificate directory '%s': %s",
2636 cert_dir, err)
2637
2640 """Returns the command for the requested input/output.
2641
2642 @type instance: L{objects.Instance}
2643 @param instance: The instance object
2644 @param mode: Import/export mode
2645 @param ieio: Input/output type
2646 @param ieargs: Input/output arguments
2647
2648 """
2649 assert mode in (constants.IEM_IMPORT, constants.IEM_EXPORT)
2650
2651 env = None
2652 prefix = None
2653 suffix = None
2654 exp_size = None
2655
2656 if ieio == constants.IEIO_FILE:
2657 (filename, ) = ieargs
2658
2659 if not utils.IsNormAbsPath(filename):
2660 _Fail("Path '%s' is not normalized or absolute", filename)
2661
2662 directory = os.path.normpath(os.path.dirname(filename))
2663
2664 if (os.path.commonprefix([constants.EXPORT_DIR, directory]) !=
2665 constants.EXPORT_DIR):
2666 _Fail("File '%s' is not under exports directory '%s'",
2667 filename, constants.EXPORT_DIR)
2668
2669
2670 utils.Makedirs(directory, mode=0750)
2671
2672 quoted_filename = utils.ShellQuote(filename)
2673
2674 if mode == constants.IEM_IMPORT:
2675 suffix = "> %s" % quoted_filename
2676 elif mode == constants.IEM_EXPORT:
2677 suffix = "< %s" % quoted_filename
2678
2679
2680 try:
2681 st = os.stat(filename)
2682 except EnvironmentError, err:
2683 logging.error("Can't stat(2) %s: %s", filename, err)
2684 else:
2685 exp_size = utils.BytesToMebibyte(st.st_size)
2686
2687 elif ieio == constants.IEIO_RAW_DISK:
2688 (disk, ) = ieargs
2689
2690 real_disk = _OpenRealBD(disk)
2691
2692 if mode == constants.IEM_IMPORT:
2693
2694
2695
2696
2697
2698
2699 suffix = utils.BuildShellCmd(("| dd of=%s conv=nocreat,notrunc"
2700 " bs=%s oflag=dsync"),
2701 real_disk.dev_path,
2702 str(64 * 1024))
2703
2704 elif mode == constants.IEM_EXPORT:
2705
2706 prefix = utils.BuildShellCmd("dd if=%s bs=%s count=%s |",
2707 real_disk.dev_path,
2708 str(1024 * 1024),
2709 str(disk.size))
2710 exp_size = disk.size
2711
2712 elif ieio == constants.IEIO_SCRIPT:
2713 (disk, disk_index, ) = ieargs
2714
2715 assert isinstance(disk_index, (int, long))
2716
2717 real_disk = _OpenRealBD(disk)
2718
2719 inst_os = OSFromDisk(instance.os)
2720 env = OSEnvironment(instance, inst_os)
2721
2722 if mode == constants.IEM_IMPORT:
2723 env["IMPORT_DEVICE"] = env["DISK_%d_PATH" % disk_index]
2724 env["IMPORT_INDEX"] = str(disk_index)
2725 script = inst_os.import_script
2726
2727 elif mode == constants.IEM_EXPORT:
2728 env["EXPORT_DEVICE"] = real_disk.dev_path
2729 env["EXPORT_INDEX"] = str(disk_index)
2730 script = inst_os.export_script
2731
2732
2733 script_cmd = utils.BuildShellCmd("( cd %s && %s; )", inst_os.path, script)
2734
2735 if mode == constants.IEM_IMPORT:
2736 suffix = "| %s" % script_cmd
2737
2738 elif mode == constants.IEM_EXPORT:
2739 prefix = "%s |" % script_cmd
2740
2741
2742 exp_size = constants.IE_CUSTOM_SIZE
2743
2744 else:
2745 _Fail("Invalid %s I/O mode %r", mode, ieio)
2746
2747 return (env, prefix, suffix, exp_size)
2748
2757
2760 """Starts an import or export daemon.
2761
2762 @param mode: Import/output mode
2763 @type opts: L{objects.ImportExportOptions}
2764 @param opts: Daemon options
2765 @type host: string
2766 @param host: Remote host for export (None for import)
2767 @type port: int
2768 @param port: Remote port for export (None for import)
2769 @type instance: L{objects.Instance}
2770 @param instance: Instance object
2771 @param ieio: Input/output type
2772 @param ieioargs: Input/output arguments
2773
2774 """
2775 if mode == constants.IEM_IMPORT:
2776 prefix = "import"
2777
2778 if not (host is None and port is None):
2779 _Fail("Can not specify host or port on import")
2780
2781 elif mode == constants.IEM_EXPORT:
2782 prefix = "export"
2783
2784 if host is None or port is None:
2785 _Fail("Host and port must be specified for an export")
2786
2787 else:
2788 _Fail("Invalid mode %r", mode)
2789
2790 if (opts.key_name is None) ^ (opts.ca_pem is None):
2791 _Fail("Cluster certificate can only be used for both key and CA")
2792
2793 (cmd_env, cmd_prefix, cmd_suffix, exp_size) = \
2794 _GetImportExportIoCommand(instance, mode, ieio, ieioargs)
2795
2796 if opts.key_name is None:
2797
2798 key_path = constants.NODED_CERT_FILE
2799 cert_path = constants.NODED_CERT_FILE
2800 assert opts.ca_pem is None
2801 else:
2802 (_, key_path, cert_path) = _GetX509Filenames(constants.CRYPTO_KEYS_DIR,
2803 opts.key_name)
2804 assert opts.ca_pem is not None
2805
2806 for i in [key_path, cert_path]:
2807 if not os.path.exists(i):
2808 _Fail("File '%s' does not exist" % i)
2809
2810 status_dir = _CreateImportExportStatusDir(prefix)
2811 try:
2812 status_file = utils.PathJoin(status_dir, _IES_STATUS_FILE)
2813 pid_file = utils.PathJoin(status_dir, _IES_PID_FILE)
2814 ca_file = utils.PathJoin(status_dir, _IES_CA_FILE)
2815
2816 if opts.ca_pem is None:
2817
2818 ca = utils.ReadFile(constants.NODED_CERT_FILE)
2819 else:
2820 ca = opts.ca_pem
2821
2822
2823 utils.WriteFile(ca_file, data=ca, mode=0400)
2824
2825 cmd = [
2826 constants.IMPORT_EXPORT_DAEMON,
2827 status_file, mode,
2828 "--key=%s" % key_path,
2829 "--cert=%s" % cert_path,
2830 "--ca=%s" % ca_file,
2831 ]
2832
2833 if host:
2834 cmd.append("--host=%s" % host)
2835
2836 if port:
2837 cmd.append("--port=%s" % port)
2838
2839 if opts.compress:
2840 cmd.append("--compress=%s" % opts.compress)
2841
2842 if opts.magic:
2843 cmd.append("--magic=%s" % opts.magic)
2844
2845 if exp_size is not None:
2846 cmd.append("--expected-size=%s" % exp_size)
2847
2848 if cmd_prefix:
2849 cmd.append("--cmd-prefix=%s" % cmd_prefix)
2850
2851 if cmd_suffix:
2852 cmd.append("--cmd-suffix=%s" % cmd_suffix)
2853
2854 logfile = _InstanceLogName(prefix, instance.os, instance.name)
2855
2856
2857
2858 utils.StartDaemon(cmd, env=cmd_env, pidfile=pid_file,
2859 output=logfile)
2860
2861
2862 return os.path.basename(status_dir)
2863
2864 except Exception:
2865 shutil.rmtree(status_dir, ignore_errors=True)
2866 raise
2867
2870 """Returns import/export daemon status.
2871
2872 @type names: sequence
2873 @param names: List of names
2874 @rtype: List of dicts
2875 @return: Returns a list of the state of each named import/export or None if a
2876 status couldn't be read
2877
2878 """
2879 result = []
2880
2881 for name in names:
2882 status_file = utils.PathJoin(constants.IMPORT_EXPORT_DIR, name,
2883 _IES_STATUS_FILE)
2884
2885 try:
2886 data = utils.ReadFile(status_file)
2887 except EnvironmentError, err:
2888 if err.errno != errno.ENOENT:
2889 raise
2890 data = None
2891
2892 if not data:
2893 result.append(None)
2894 continue
2895
2896 result.append(serializer.LoadJson(data))
2897
2898 return result
2899
2914
2917 """Cleanup after an import or export.
2918
2919 If the import/export daemon is still running it's killed. Afterwards the
2920 whole status directory is removed.
2921
2922 """
2923 logging.info("Finalizing import/export %s", name)
2924
2925 status_dir = utils.PathJoin(constants.IMPORT_EXPORT_DIR, name)
2926
2927 pid = utils.ReadLockedPidFile(utils.PathJoin(status_dir, _IES_PID_FILE))
2928
2929 if pid:
2930 logging.info("Import/export %s is still running with PID %s",
2931 name, pid)
2932 utils.KillProcess(pid, waitpid=False)
2933
2934 shutil.rmtree(status_dir, ignore_errors=True)
2935
2938 """Sets the physical ID on disks and returns the block devices.
2939
2940 """
2941
2942 my_name = netutils.HostInfo().name
2943 for cf in disks:
2944 cf.SetPhysicalID(my_name, nodes_ip)
2945
2946 bdevs = []
2947
2948 for cf in disks:
2949 rd = _RecursiveFindBD(cf)
2950 if rd is None:
2951 _Fail("Can't find device %s", cf)
2952 bdevs.append(rd)
2953 return bdevs
2954
2957 """Disconnects the network on a list of drbd devices.
2958
2959 """
2960 bdevs = _FindDisks(nodes_ip, disks)
2961
2962
2963 for rd in bdevs:
2964 try:
2965 rd.DisconnectNet()
2966 except errors.BlockDeviceError, err:
2967 _Fail("Can't change network configuration to standalone mode: %s",
2968 err, exc=True)
2969
2970
2971 -def DrbdAttachNet(nodes_ip, disks, instance_name, multimaster):
2972 """Attaches the network on a list of drbd devices.
2973
2974 """
2975 bdevs = _FindDisks(nodes_ip, disks)
2976
2977 if multimaster:
2978 for idx, rd in enumerate(bdevs):
2979 try:
2980 _SymlinkBlockDev(instance_name, rd.dev_path, idx)
2981 except EnvironmentError, err:
2982 _Fail("Can't create symlink: %s", err)
2983
2984
2985 for rd in bdevs:
2986 try:
2987 rd.AttachNet(multimaster)
2988 except errors.BlockDeviceError, err:
2989 _Fail("Can't change network configuration: %s", err)
2990
2991
2992
2993
2994
2995
2996
2997 def _Attach():
2998 all_connected = True
2999
3000 for rd in bdevs:
3001 stats = rd.GetProcStatus()
3002
3003 all_connected = (all_connected and
3004 (stats.is_connected or stats.is_in_resync))
3005
3006 if stats.is_standalone:
3007
3008
3009
3010 try:
3011 rd.AttachNet(multimaster)
3012 except errors.BlockDeviceError, err:
3013 _Fail("Can't change network configuration: %s", err)
3014
3015 if not all_connected:
3016 raise utils.RetryAgain()
3017
3018 try:
3019
3020 utils.Retry(_Attach, (0.1, 1.5, 5.0), 2 * 60)
3021 except utils.RetryTimeout:
3022 _Fail("Timeout in disk reconnecting")
3023
3024 if multimaster:
3025
3026 for rd in bdevs:
3027 try:
3028 rd.Open()
3029 except errors.BlockDeviceError, err:
3030 _Fail("Can't change to primary mode: %s", err)
3031
3034 """Wait until DRBDs have synchronized.
3035
3036 """
3037 def _helper(rd):
3038 stats = rd.GetProcStatus()
3039 if not (stats.is_connected or stats.is_in_resync):
3040 raise utils.RetryAgain()
3041 return stats
3042
3043 bdevs = _FindDisks(nodes_ip, disks)
3044
3045 min_resync = 100
3046 alldone = True
3047 for rd in bdevs:
3048 try:
3049
3050 stats = utils.Retry(_helper, 1, 15, args=[rd])
3051 except utils.RetryTimeout:
3052 stats = rd.GetProcStatus()
3053
3054 if not (stats.is_connected or stats.is_in_resync):
3055 _Fail("DRBD device %s is not in sync: stats=%s", rd, stats)
3056 alldone = alldone and (not stats.is_in_resync)
3057 if stats.sync_percent is not None:
3058 min_resync = min(min_resync, stats.sync_percent)
3059
3060 return (alldone, min_resync)
3061
3071
3074 """Hard-powercycle the node.
3075
3076 Because we need to return first, and schedule the powercycle in the
3077 background, we won't be able to report failures nicely.
3078
3079 """
3080 hyper = hypervisor.GetHypervisor(hypervisor_type)
3081 try:
3082 pid = os.fork()
3083 except OSError:
3084
3085 pid = 0
3086 if pid > 0:
3087 return "Reboot scheduled in 5 seconds"
3088
3089 try:
3090 utils.Mlockall()
3091 except Exception:
3092 pass
3093 time.sleep(5)
3094 hyper.PowercycleNode()
3095
3098 """Hook runner.
3099
3100 This class is instantiated on the node side (ganeti-noded) and not
3101 on the master side.
3102
3103 """
3104 - def __init__(self, hooks_base_dir=None):
3105 """Constructor for hooks runner.
3106
3107 @type hooks_base_dir: str or None
3108 @param hooks_base_dir: if not None, this overrides the
3109 L{constants.HOOKS_BASE_DIR} (useful for unittests)
3110
3111 """
3112 if hooks_base_dir is None:
3113 hooks_base_dir = constants.HOOKS_BASE_DIR
3114
3115
3116 self._BASE_DIR = hooks_base_dir
3117
3118 - def RunHooks(self, hpath, phase, env):
3119 """Run the scripts in the hooks directory.
3120
3121 @type hpath: str
3122 @param hpath: the path to the hooks directory which
3123 holds the scripts
3124 @type phase: str
3125 @param phase: either L{constants.HOOKS_PHASE_PRE} or
3126 L{constants.HOOKS_PHASE_POST}
3127 @type env: dict
3128 @param env: dictionary with the environment for the hook
3129 @rtype: list
3130 @return: list of 3-element tuples:
3131 - script path
3132 - script result, either L{constants.HKR_SUCCESS} or
3133 L{constants.HKR_FAIL}
3134 - output of the script
3135
3136 @raise errors.ProgrammerError: for invalid input
3137 parameters
3138
3139 """
3140 if phase == constants.HOOKS_PHASE_PRE:
3141 suffix = "pre"
3142 elif phase == constants.HOOKS_PHASE_POST:
3143 suffix = "post"
3144 else:
3145 _Fail("Unknown hooks phase '%s'", phase)
3146
3147
3148 subdir = "%s-%s.d" % (hpath, suffix)
3149 dir_name = utils.PathJoin(self._BASE_DIR, subdir)
3150
3151 results = []
3152
3153 if not os.path.isdir(dir_name):
3154
3155
3156 return results
3157
3158 runparts_results = utils.RunParts(dir_name, env=env, reset_env=True)
3159
3160 for (relname, relstatus, runresult) in runparts_results:
3161 if relstatus == constants.RUNPARTS_SKIP:
3162 rrval = constants.HKR_SKIP
3163 output = ""
3164 elif relstatus == constants.RUNPARTS_ERR:
3165 rrval = constants.HKR_FAIL
3166 output = "Hook script execution error: %s" % runresult
3167 elif relstatus == constants.RUNPARTS_RUN:
3168 if runresult.failed:
3169 rrval = constants.HKR_FAIL
3170 else:
3171 rrval = constants.HKR_SUCCESS
3172 output = utils.SafeEncode(runresult.output.strip())
3173 results.append(("%s/%s" % (subdir, relname), rrval, output))
3174
3175 return results
3176
3179 """IAllocator runner.
3180
3181 This class is instantiated on the node side (ganeti-noded) and not on
3182 the master side.
3183
3184 """
3185 @staticmethod
3186 - def Run(name, idata):
3187 """Run an iallocator script.
3188
3189 @type name: str
3190 @param name: the iallocator script name
3191 @type idata: str
3192 @param idata: the allocator input data
3193
3194 @rtype: tuple
3195 @return: two element tuple of:
3196 - status
3197 - either error message or stdout of allocator (for success)
3198
3199 """
3200 alloc_script = utils.FindFile(name, constants.IALLOCATOR_SEARCH_PATH,
3201 os.path.isfile)
3202 if alloc_script is None:
3203 _Fail("iallocator module '%s' not found in the search path", name)
3204
3205 fd, fin_name = tempfile.mkstemp(prefix="ganeti-iallocator.")
3206 try:
3207 os.write(fd, idata)
3208 os.close(fd)
3209 result = utils.RunCmd([alloc_script, fin_name])
3210 if result.failed:
3211 _Fail("iallocator module '%s' failed: %s, output '%s'",
3212 name, result.fail_reason, result.output)
3213 finally:
3214 os.unlink(fin_name)
3215
3216 return result.stdout
3217
3220 """Simple class for managing a cache of block device information.
3221
3222 """
3223 _DEV_PREFIX = "/dev/"
3224 _ROOT_DIR = constants.BDEV_CACHE_DIR
3225
3226 @classmethod
3228 """Converts a /dev/name path to the cache file name.
3229
3230 This replaces slashes with underscores and strips the /dev
3231 prefix. It then returns the full path to the cache file.
3232
3233 @type dev_path: str
3234 @param dev_path: the C{/dev/} path name
3235 @rtype: str
3236 @return: the converted path name
3237
3238 """
3239 if dev_path.startswith(cls._DEV_PREFIX):
3240 dev_path = dev_path[len(cls._DEV_PREFIX):]
3241 dev_path = dev_path.replace("/", "_")
3242 fpath = utils.PathJoin(cls._ROOT_DIR, "bdev_%s" % dev_path)
3243 return fpath
3244
3245 @classmethod
3246 - def UpdateCache(cls, dev_path, owner, on_primary, iv_name):
3247 """Updates the cache information for a given device.
3248
3249 @type dev_path: str
3250 @param dev_path: the pathname of the device
3251 @type owner: str
3252 @param owner: the owner (instance name) of the device
3253 @type on_primary: bool
3254 @param on_primary: whether this is the primary
3255 node nor not
3256 @type iv_name: str
3257 @param iv_name: the instance-visible name of the
3258 device, as in objects.Disk.iv_name
3259
3260 @rtype: None
3261
3262 """
3263 if dev_path is None:
3264 logging.error("DevCacheManager.UpdateCache got a None dev_path")
3265 return
3266 fpath = cls._ConvertPath(dev_path)
3267 if on_primary:
3268 state = "primary"
3269 else:
3270 state = "secondary"
3271 if iv_name is None:
3272 iv_name = "not_visible"
3273 fdata = "%s %s %s\n" % (str(owner), state, iv_name)
3274 try:
3275 utils.WriteFile(fpath, data=fdata)
3276 except EnvironmentError, err:
3277 logging.exception("Can't update bdev cache for %s: %s", dev_path, err)
3278
3279 @classmethod
3281 """Remove data for a dev_path.
3282
3283 This is just a wrapper over L{utils.RemoveFile} with a converted
3284 path name and logging.
3285
3286 @type dev_path: str
3287 @param dev_path: the pathname of the device
3288
3289 @rtype: None
3290
3291 """
3292 if dev_path is None:
3293 logging.error("DevCacheManager.RemoveCache got a None dev_path")
3294 return
3295 fpath = cls._ConvertPath(dev_path)
3296 try:
3297 utils.RemoveFile(fpath)
3298 except EnvironmentError, err:
3299 logging.exception("Can't update bdev cache for %s: %s", dev_path, err)
3300