1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Ganeti node daemon"""
32
33
34
35
36
37
38
39
40
41 import os
42 import sys
43 import logging
44 import signal
45 import codecs
46
47 from optparse import OptionParser
48
49 from ganeti import backend
50 from ganeti import constants
51 from ganeti import objects
52 from ganeti import errors
53 from ganeti import jstore
54 from ganeti import daemon
55 from ganeti import http
56 from ganeti import utils
57 from ganeti.storage import container
58 from ganeti import serializer
59 from ganeti import netutils
60 from ganeti import pathutils
61 from ganeti import ssconf
62
63 import ganeti.http.server
64
65
66 queue_lock = None
70 """Extend the reason trail with noded information
71
72 The trail is extended by appending the name of the noded functionality
73 """
74 assert trail is not None
75 trail_source = "%s:%s" % (constants.OPCODE_REASON_SRC_NODED, source)
76 trail.append((trail_source, reason, utils.EpochNano()))
77
80 """Try to prepare the queue lock.
81
82 @return: None for success, otherwise an exception object
83
84 """
85 global queue_lock
86
87 if queue_lock is not None:
88 return None
89
90
91 try:
92 queue_lock = jstore.InitAndVerifyQueue(must_lock=False)
93 return None
94 except EnvironmentError, err:
95 return err
96
99 """Decorator for job queue manipulating functions.
100
101 """
102 QUEUE_LOCK_TIMEOUT = 10
103
104 def wrapper(*args, **kwargs):
105
106
107 if _PrepareQueueLock() is not None:
108 raise errors.JobQueueError("Job queue failed initialization,"
109 " cannot update jobs")
110 queue_lock.Exclusive(blocking=True, timeout=QUEUE_LOCK_TIMEOUT)
111 try:
112 return fn(*args, **kwargs)
113 finally:
114 queue_lock.Unlock()
115
116 return wrapper
117
132
135 """Returns value or, if evaluating to False, a default value.
136
137 Returns the given value, unless it evaluates to False. In the latter case the
138 default value is returned.
139
140 @param value: Value to return if it doesn't evaluate to False
141 @param default: Default value
142 @return: Given value or the default
143
144 """
145 if value:
146 return value
147
148 return default
149
152 """Subclass ensuring request handlers are locked in RAM.
153
154 """
159
162 """The server implementation.
163
164 This class holds all methods exposed over the RPC interface.
165
166 """
167
168
169
173
175 """Handle a request.
176
177 """
178
179 if req.request_method.upper() != http.HTTP_POST:
180 raise http.HttpBadRequest("Only the POST method is supported")
181
182 path = req.request_path
183 if path.startswith("/"):
184 path = path[1:]
185
186 method = getattr(self, "perspective_%s" % path, None)
187 if method is None:
188 raise http.HttpNotFound()
189
190 try:
191 result = (True, method(serializer.LoadJson(req.request_body)))
192
193 except backend.RPCFail, err:
194
195
196
197 result = (False, str(err))
198 except errors.QuitGanetiException, err:
199
200 logging.info("Shutting down the node daemon, arguments: %s",
201 str(err.args))
202 os.kill(self.noded_pid, signal.SIGTERM)
203
204
205 result = err.args
206 except Exception, err:
207 logging.exception("Error in RPC call")
208 result = (False, "Error while executing backend function: %s" % str(err))
209
210 return serializer.DumpJson(result)
211
212
213
214 @staticmethod
216 """Create a block device.
217
218 """
219 (bdev_s, size, owner, on_primary, info, excl_stor) = params
220 bdev = objects.Disk.FromDict(bdev_s)
221 if bdev is None:
222 raise ValueError("can't unserialize data!")
223 return backend.BlockdevCreate(bdev, size, owner, on_primary, info,
224 excl_stor)
225
226 @staticmethod
235
236 @staticmethod
244
245 @staticmethod
253
254 @staticmethod
262
263 @staticmethod
271
272 @staticmethod
279
280 @staticmethod
291
292 @staticmethod
302
303 @staticmethod
305 """Add a child to a mirror device.
306
307 Note: this is only valid for mirror devices. It's the caller's duty
308 to send a correct disk, otherwise we raise an error.
309
310 """
311 bdev_s, ndev_s = params
312 bdev = objects.Disk.FromDict(bdev_s)
313 ndevs = [objects.Disk.FromDict(disk_s) for disk_s in ndev_s]
314 if bdev is None or ndevs.count(None) > 0:
315 raise ValueError("can't unserialize data!")
316 return backend.BlockdevAddchildren(bdev, ndevs)
317
318 @staticmethod
320 """Remove a child from a mirror device.
321
322 This is only valid for mirror devices, of course. It's the callers
323 duty to send a correct disk, otherwise we raise an error.
324
325 """
326 bdev_s, ndev_s = params
327 bdev = objects.Disk.FromDict(bdev_s)
328 ndevs = [objects.Disk.FromDict(disk_s) for disk_s in ndev_s]
329 if bdev is None or ndevs.count(None) > 0:
330 raise ValueError("can't unserialize data!")
331 return backend.BlockdevRemovechildren(bdev, ndevs)
332
333 @staticmethod
342
343 @staticmethod
345 """Return the mirror status for a list of disks.
346
347 """
348 (node_disks, ) = params
349
350 disks = [objects.Disk.FromDict(dsk_s) for dsk_s in node_disks]
351
352 result = []
353
354 for (success, status) in backend.BlockdevGetmirrorstatusMulti(disks):
355 if success:
356 result.append((success, status.ToDict()))
357 else:
358 result.append((success, status))
359
360 return result
361
362 @staticmethod
364 """Expose the FindBlockDevice functionality for a disk.
365
366 This will try to find but not activate a disk.
367
368 """
369 disk = objects.Disk.FromDict(params[0])
370
371 result = backend.BlockdevFind(disk)
372 if result is None:
373 return None
374
375 return result.ToDict()
376
377 @staticmethod
379 """Create a snapshot device.
380
381 Note that this is only valid for LVM and ExtStorage disks, if we get passed
382 something else we raise an exception. The snapshot device can be
383 remove by calling the generic block device remove call.
384
385 """
386 (disk, snap_name, snap_size) = params
387 cfbd = objects.Disk.FromDict(disk)
388 return backend.BlockdevSnapshot(cfbd, snap_name, snap_size)
389
390 @staticmethod
392 """Grow a stack of devices.
393
394 """
395 if len(params) < 5:
396 raise ValueError("Received only %s parameters in blockdev_grow,"
397 " old master?" % len(params))
398 cfbd = objects.Disk.FromDict(params[0])
399 amount = params[1]
400 dryrun = params[2]
401 backingstore = params[3]
402 excl_stor = params[4]
403 return backend.BlockdevGrow(cfbd, amount, dryrun, backingstore, excl_stor)
404
405 @staticmethod
412
413 @staticmethod
415 """Opens the given block devices.
416
417 """
418 disks = [objects.Disk.FromDict(cf) for cf in params[1]]
419 exclusive = params[2]
420 return backend.BlockdevOpen(params[0], disks, exclusive)
421
422 @staticmethod
429
430 @staticmethod
438
439
440
441 @staticmethod
443 """Disconnects the network connection of drbd disks.
444
445 Note that this is only valid for drbd disks, so the members of the
446 disk list must all be drbd devices.
447
448 """
449 (disks,) = params
450 disks = [objects.Disk.FromDict(disk) for disk in disks]
451 return backend.DrbdDisconnectNet(disks)
452
453 @staticmethod
455 """Attaches the network connection of drbd disks.
456
457 Note that this is only valid for drbd disks, so the members of the
458 disk list must all be drbd devices.
459
460 """
461 disks, multimaster = params
462 disks = [objects.Disk.FromDict(disk) for disk in disks]
463 return backend.DrbdAttachNet(disks, multimaster)
464
465 @staticmethod
467 """Wait until DRBD disks are synched.
468
469 Note that this is only valid for drbd disks, so the members of the
470 disk list must all be drbd devices.
471
472 """
473 (disks,) = params
474 disks = [objects.Disk.FromDict(disk) for disk in disks]
475 return backend.DrbdWaitSync(disks)
476
477 @staticmethod
479 """Checks if the drbd devices need activation
480
481 Note that this is only valid for drbd disks, so the members of the
482 disk list must all be drbd devices.
483
484 """
485 (disks,) = params
486 disks = [objects.Disk.FromDict(disk) for disk in disks]
487 return backend.DrbdNeedsActivation(disks)
488
489 @staticmethod
495
496
497
498 @staticmethod
513
514 @staticmethod
516 """Query information about an existing export on this node.
517
518 The given path may not contain an export, in which case we return
519 None.
520
521 """
522 path = params[0]
523 return backend.ExportInfo(path)
524
525 @staticmethod
527 """List the available exports on this node.
528
529 Note that as opposed to export_info, which may query data about an
530 export in any path, this only queries the standard Ganeti path
531 (pathutils.EXPORT_DIR).
532
533 """
534 return backend.ListExports()
535
536 @staticmethod
538 """Remove an export.
539
540 """
541 export = params[0]
542 return backend.RemoveExport(export)
543
544
545 @staticmethod
547 """Query the list of block devices
548
549 """
550 devices = params[0]
551 return backend.GetBlockDevSizes(devices)
552
553
554
555 @staticmethod
557 """Query the list of logical volumes in a given volume group.
558
559 """
560 vgname = params[0]
561 return backend.GetVolumeList(vgname)
562
563 @staticmethod
569
570
571
572 @staticmethod
574 """Get list of storage units.
575
576 """
577 (su_name, su_args, name, fields) = params
578 return container.GetStorage(su_name, *su_args).List(name, fields)
579
580 @staticmethod
587
588 @staticmethod
595
596
597
598 @staticmethod
600 """Check if all bridges given exist on this node.
601
602 """
603 bridges_list = params[0]
604 return backend.BridgesExist(bridges_list)
605
606
607
608 @staticmethod
610 """Install an OS on a given instance.
611
612 """
613 inst_s = params[0]
614 inst = objects.Instance.FromDict(inst_s)
615 reinstall = params[1]
616 debug = params[2]
617 return backend.InstanceOsAdd(inst, reinstall, debug)
618
619 @staticmethod
627
628 @staticmethod
638
639 @staticmethod
648
649 @staticmethod
651 """Hotplugs device to a running instance.
652
653 """
654 (idict, action, dev_type, ddict, extra, seq) = params
655 instance = objects.Instance.FromDict(idict)
656 if dev_type == constants.HOTPLUG_TARGET_DISK:
657 device = objects.Disk.FromDict(ddict)
658 elif dev_type == constants.HOTPLUG_TARGET_NIC:
659 device = objects.NIC.FromDict(ddict)
660 else:
661 assert dev_type in constants.HOTPLUG_ALL_TARGETS
662 return backend.HotplugDevice(instance, action, dev_type, device, extra, seq)
663
664 @staticmethod
671
672 @staticmethod
679
680 @staticmethod
687
688 @staticmethod
696
697 @staticmethod
705
706 @staticmethod
714
715 @staticmethod
723
724 @staticmethod
731
732 @staticmethod
744
745 @staticmethod
753
754 @staticmethod
756 """Query instance information.
757
758 """
759 (instance_name, hypervisor_name, hvparams) = params
760 return backend.GetInstanceInfo(instance_name, hypervisor_name, hvparams)
761
762 @staticmethod
769
770 @staticmethod
772 """Query information about all instances.
773
774 """
775 (hypervisor_list, all_hvparams) = params
776 return backend.GetAllInstancesInfo(hypervisor_list, all_hvparams)
777
778 @staticmethod
784
785 @staticmethod
787 """Query the list of running instances.
788
789 """
790 (hypervisor_list, hvparams) = params
791 return backend.GetInstanceList(hypervisor_list, hvparams)
792
793
794
795 @staticmethod
797 """Checks if a node has the given ip address.
798
799 """
800 return netutils.IPAddress.Own(params[0])
801
802 @staticmethod
804 """Query node information.
805
806 """
807 (storage_units, hv_specs) = params
808 return backend.GetNodeInfo(storage_units, hv_specs)
809
810 @staticmethod
812 """Modify a node entry in /etc/hosts.
813
814 """
815 backend.EtcHostsModify(params[0], params[1], params[2])
816
817 return True
818
819 @staticmethod
821 """Run a verify sequence on this node.
822
823 """
824 (what, cluster_name, hvparams, node_groups, groups_cfg) = params
825 return backend.VerifyNode(what, cluster_name, hvparams,
826 node_groups, groups_cfg)
827
828 @classmethod
830 """Run a light verify sequence on this node.
831
832 This call is meant to perform a less strict verification of the node in
833 certain situations. Right now, it is invoked only when a node is just about
834 to be added to a cluster, and even then, it performs the same checks as
835 L{perspective_node_verify}.
836 """
837 return cls.perspective_node_verify(params)
838
839 @staticmethod
845
846 @staticmethod
853
854 @staticmethod
861
862 @staticmethod
868
869 @staticmethod
871 """Change the master IP netmask.
872
873 """
874 return backend.ChangeMasterNetmask(params[0], params[1], params[2],
875 params[3])
876
877 @staticmethod
883
884 @staticmethod
886 """Query the list of all logical volume groups.
887
888 """
889 return backend.NodeVolumes()
890
891 @staticmethod
893 """Demote a node from the master candidate role.
894
895 """
896 return backend.DemoteFromMC()
897
898 @staticmethod
900 """Tries to powercycle the node.
901
902 """
903 (hypervisor_type, hvparams) = params
904 return backend.PowercycleNode(hypervisor_type, hvparams)
905
906 @staticmethod
913
914 @staticmethod
916 """Gets the node's public crypto tokens.
917
918 """
919 token_requests = params[0]
920 return backend.GetCryptoTokens(token_requests)
921
922 @staticmethod
924 """Ensure daemon is running.
925
926 """
927 (daemon_name, run) = params
928 return backend.EnsureDaemon(daemon_name, run)
929
930 @staticmethod
932 """Distributes a new node's SSH key if authorized.
933
934 """
935 (node_uuid, node_name, potential_master_candidates,
936 to_authorized_keys, to_public_keys, get_public_keys) = params
937 return backend.AddNodeSshKey(node_uuid, node_name,
938 potential_master_candidates,
939 to_authorized_keys=to_authorized_keys,
940 to_public_keys=to_public_keys,
941 get_public_keys=get_public_keys)
942
943 @staticmethod
945 """Generates a new root SSH key pair on the node.
946
947 """
948 (node_uuids, node_names, master_candidate_uuids,
949 potential_master_candidates) = params
950 return backend.RenewSshKeys(node_uuids, node_names,
951 master_candidate_uuids,
952 potential_master_candidates)
953
954 @staticmethod
956 """Removes a node's SSH key from the other nodes' SSH files.
957
958 """
959 (node_uuid, node_name,
960 master_candidate_uuids, potential_master_candidates,
961 from_authorized_keys, from_public_keys, clear_authorized_keys,
962 clear_public_keys, readd) = params
963 return backend.RemoveNodeSshKey(node_uuid, node_name,
964 master_candidate_uuids,
965 potential_master_candidates,
966 from_authorized_keys=from_authorized_keys,
967 from_public_keys=from_public_keys,
968 clear_authorized_keys=clear_authorized_keys,
969 clear_public_keys=clear_public_keys,
970 readd=readd)
971
972
973
974 @staticmethod
980
981 @staticmethod
983 """Upload a file.
984
985 Note that the backend implementation imposes strict rules on which
986 files are accepted.
987
988 """
989 return backend.UploadFile(*(params[0]))
990
991 @staticmethod
993 """Upload a file.
994
995 Note that the backend implementation imposes strict rules on which
996 files are accepted.
997
998 """
999 return backend.UploadFile(*params)
1000
1001 @staticmethod
1007
1008 @staticmethod
1019
1020 @staticmethod
1022 """Runs a restricted command.
1023
1024 """
1025 (cmd, ) = params
1026
1027 return backend.RunRestrictedCmd(cmd)
1028
1029 @staticmethod
1036
1037 @staticmethod
1043
1044 @staticmethod
1051
1052 @staticmethod
1054 """Get info on whether a file exists and its properties.
1055
1056 """
1057 (path, ) = params
1058 return backend.GetFileInfo(path)
1059
1060
1061
1062 @staticmethod
1064 """Query detailed information about existing OSes.
1065
1066 """
1067 return backend.DiagnoseOS()
1068
1069 @staticmethod
1071 """Run a given OS' validation routine.
1072
1073 """
1074 required, name, checks, params, force_variant = params
1075 return backend.ValidateOS(required, name, checks, params, force_variant)
1076
1077 @staticmethod
1085
1086
1087
1088 @staticmethod
1090 """Query detailed information about existing extstorage providers.
1091
1092 """
1093 return backend.DiagnoseExtStorage()
1094
1095
1096
1097 @staticmethod
1099 """Run hook scripts.
1100
1101 """
1102 hpath, phase, env = params
1103 hr = backend.HooksRunner()
1104 return hr.RunHooks(hpath, phase, env)
1105
1106
1107
1108 @staticmethod
1110 """Run an iallocator script.
1111
1112 """
1113 name, idata, ial_params_dict = params
1114 ial_params = []
1115 for ial_param in ial_params_dict.items():
1116 if ial_param[1] is not None:
1117 ial_params.append("--" + ial_param[0] + "=" + ial_param[1])
1118 else:
1119 ial_params.append("--" + ial_param[0])
1120 iar = backend.IAllocatorRunner()
1121 return iar.Run(name, idata, ial_params)
1122
1123
1124
1125 @staticmethod
1127 """Run test delay.
1128
1129 """
1130 duration = params[0]
1131 status, rval = utils.TestDelay(duration)
1132 if not status:
1133 raise backend.RPCFail(rval)
1134 return rval
1135
1136
1137
1138 @staticmethod
1140 """Create the file storage directory.
1141
1142 """
1143 file_storage_dir = params[0]
1144 return backend.CreateFileStorageDir(file_storage_dir)
1145
1146 @staticmethod
1148 """Remove the file storage directory.
1149
1150 """
1151 file_storage_dir = params[0]
1152 return backend.RemoveFileStorageDir(file_storage_dir)
1153
1154 @staticmethod
1156 """Rename the file storage directory.
1157
1158 """
1159 old_file_storage_dir = params[0]
1160 new_file_storage_dir = params[1]
1161 return backend.RenameFileStorageDir(old_file_storage_dir,
1162 new_file_storage_dir)
1163
1164
1165
1166 @staticmethod
1167 @_RequireJobQueueLock
1169 """Update job queue.
1170
1171 """
1172 (file_name, content) = params
1173 return backend.JobQueueUpdate(file_name, content)
1174
1175 @staticmethod
1176 @_RequireJobQueueLock
1182
1183 @staticmethod
1184 @_RequireJobQueueLock
1186 """Rename a job queue file.
1187
1188 """
1189
1190 return [backend.JobQueueRename(old, new) for old, new in params[0]]
1191
1192 @staticmethod
1193 @_RequireJobQueueLock
1195 """Set job queue's drain flag.
1196
1197 """
1198 (flag, ) = params
1199
1200 return jstore.SetDrainFlag(flag)
1201
1202
1203
1204 @staticmethod
1206 """Validate the hypervisor parameters.
1207
1208 """
1209 (hvname, hvparams) = params
1210 return backend.ValidateHVParams(hvname, hvparams)
1211
1212
1213
1214 @staticmethod
1216 """Creates a new X509 certificate for SSL/TLS.
1217
1218 """
1219 (validity, ) = params
1220 return backend.CreateX509Certificate(validity)
1221
1222 @staticmethod
1229
1230
1231
1232 @staticmethod
1234 """Starts an import daemon.
1235
1236 """
1237 (opts_s, instance, component, (dest, dest_args)) = params
1238
1239 opts = objects.ImportExportOptions.FromDict(opts_s)
1240
1241 return backend.StartImportExportDaemon(constants.IEM_IMPORT, opts,
1242 None, None,
1243 objects.Instance.FromDict(instance),
1244 component, dest,
1245 _DecodeImportExportIO(dest,
1246 dest_args))
1247
1248 @staticmethod
1250 """Starts an export daemon.
1251
1252 """
1253 (opts_s, host, port, instance, component, (source, source_args)) = params
1254
1255 opts = objects.ImportExportOptions.FromDict(opts_s)
1256
1257 return backend.StartImportExportDaemon(constants.IEM_EXPORT, opts,
1258 host, port,
1259 objects.Instance.FromDict(instance),
1260 component, source,
1261 _DecodeImportExportIO(source,
1262 source_args))
1263
1264 @staticmethod
1270
1271 @staticmethod
1277
1278 @staticmethod
1284
1287 """Initial checks whether to run or exit with a failure.
1288
1289 """
1290 if args:
1291 print >> sys.stderr, ("Usage: %s [-f] [-d] [-p port] [-b ADDRESS]" %
1292 sys.argv[0])
1293 sys.exit(constants.EXIT_FAILURE)
1294 try:
1295 codecs.lookup("string-escape")
1296 except LookupError:
1297 print >> sys.stderr, ("Can't load the string-escape code which is part"
1298 " of the Python installation. Is your installation"
1299 " complete/correct? Aborting.")
1300 sys.exit(constants.EXIT_FAILURE)
1301
1304 """Callback function to verify a peer against the candidate cert map.
1305
1306 Note that we have a chicken-and-egg problem during cluster init and upgrade.
1307 This method checks whether the incoming connection comes from a master
1308 candidate by comparing it to the master certificate map in the cluster
1309 configuration. However, during cluster init and cluster upgrade there
1310 are various RPC calls done to the master node itself, before the candidate
1311 certificate list is established and the cluster configuration is written.
1312 In this case, we cannot check against the master candidate map.
1313
1314 This problem is solved by checking whether the candidate map is empty. An
1315 initialized 2.11 or higher cluster has at least one entry for the master
1316 node in the candidate map. If the map is empty, we know that we are still
1317 in the bootstrap/upgrade phase. In this case, we read the server certificate
1318 digest and compare it to the incoming request.
1319
1320 This means that after an upgrade of Ganeti, the system continues to operate
1321 like before, using server certificates only. After the client certificates
1322 are generated with ``gnt-cluster renew-crypto --new-node-certificates``,
1323 RPC communication is switched to using client certificates and the trick of
1324 using server certificates does not work anymore.
1325
1326 @type conn: C{OpenSSL.SSL.Connection}
1327 @param conn: the OpenSSL connection object
1328 @type cert: C{OpenSSL.X509}
1329 @param cert: the peer's SSL certificate
1330 @type errdepth: integer
1331 @param errdepth: number of the step in the certificate chain starting at 0
1332 for the actual client certificate.
1333
1334 """
1335
1336
1337
1338
1339
1340
1341 if errdepth > 0:
1342 server_digest = utils.GetCertificateDigest(
1343 cert_filename=pathutils.NODED_CERT_FILE)
1344 match = cert.digest("sha1") == server_digest
1345 if not match:
1346 logging.debug("Received certificate from the certificate chain, which"
1347 " does not match the server certficate. Digest of the"
1348 " received certificate: %s. Digest of the server"
1349 " certificate: %s.", cert.digest("sha1"), server_digest)
1350 return match
1351 elif errdepth == 0:
1352 sstore = ssconf.SimpleStore()
1353 try:
1354 candidate_certs = sstore.GetMasterCandidatesCertMap()
1355 except errors.ConfigurationError:
1356 logging.info("No candidate certificates found. Switching to "
1357 "bootstrap/update mode.")
1358 candidate_certs = None
1359 if not candidate_certs:
1360 candidate_certs = {
1361 constants.CRYPTO_BOOTSTRAP: utils.GetCertificateDigest(
1362 cert_filename=pathutils.NODED_CERT_FILE)}
1363 match = cert.digest("sha1") in candidate_certs.values()
1364 if not match:
1365 logging.debug("Received certificate which is not a certificate of a"
1366 " master candidate. Certificate digest: %s. List of master"
1367 " candidate certificate digests: %s.", cert.digest("sha1"),
1368 str(candidate_certs))
1369 return match
1370 else:
1371 logging.error("Invalid errdepth value: %s.", errdepth)
1372 return False
1373
1377 """Preparation node daemon function, executed with the PID file held.
1378
1379 """
1380 if options.mlock:
1381 request_executor_class = MlockallRequestExecutor
1382 try:
1383 utils.Mlockall()
1384 except errors.NoCtypesError:
1385 logging.warning("Cannot set memory lock, ctypes module not found")
1386 request_executor_class = http.server.HttpServerRequestExecutor
1387 else:
1388 request_executor_class = http.server.HttpServerRequestExecutor
1389
1390
1391 if options.ssl:
1392 ssl_params = http.HttpSslParams(ssl_key_path=options.ssl_key,
1393 ssl_cert_path=options.ssl_cert)
1394 else:
1395 ssl_params = None
1396
1397 err = _PrepareQueueLock()
1398 if err is not None:
1399
1400
1401
1402 logging.critical("Can't init/verify the queue, proceeding anyway: %s", err)
1403
1404 handler = NodeRequestHandler()
1405
1406 mainloop = daemon.Mainloop()
1407 server = \
1408 http.server.HttpServer(mainloop, options.bind_address, options.port,
1409 handler, ssl_params=ssl_params, ssl_verify_peer=True,
1410 request_executor_class=request_executor_class,
1411 ssl_verify_callback=SSLVerifyPeer)
1412 server.Start()
1413
1414 return (mainloop, server)
1415
1416
1417 -def ExecNoded(options, args, prep_data):
1418 """Main node daemon function, executed with the PID file held.
1419
1420 """
1421 (mainloop, server) = prep_data
1422 try:
1423 mainloop.Run()
1424 finally:
1425 server.Stop()
1426
1429 """Main function for the node daemon.
1430
1431 """
1432 parser = OptionParser(description="Ganeti node daemon",
1433 usage=("%prog [-f] [-d] [-p port] [-b ADDRESS]"
1434 " [-i INTERFACE]"),
1435 version="%%prog (ganeti) %s" %
1436 constants.RELEASE_VERSION)
1437 parser.add_option("--no-mlock", dest="mlock",
1438 help="Do not mlock the node memory in ram",
1439 default=True, action="store_false")
1440
1441 daemon.GenericMain(constants.NODED, parser, CheckNoded, PrepNoded, ExecNoded,
1442 default_ssl_cert=pathutils.NODED_CERT_FILE,
1443 default_ssl_key=pathutils.NODED_CERT_FILE,
1444 console_logging=True,
1445 warn_breach=True)
1446