1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 """Logical units dealing with instance operations (start/stop/...).
32
33 Those operations have in common that they affect the operating system in a
34 running instance directly.
35
36 """
37
38 import logging
39
40 from ganeti import constants
41 from ganeti import errors
42 from ganeti import hypervisor
43 from ganeti import locking
44 from ganeti import objects
45 from ganeti import utils
46 from ganeti.cmdlib.base import LogicalUnit, NoHooksLU
47 from ganeti.cmdlib.common import INSTANCE_ONLINE, INSTANCE_DOWN, \
48 CheckHVParams, CheckInstanceState, CheckNodeOnline, GetUpdatedParams, \
49 CheckOSParams, CheckOSImage, ShareAll
50 from ganeti.cmdlib.instance_storage import StartInstanceDisks, \
51 ShutdownInstanceDisks, ImageDisks
52 from ganeti.cmdlib.instance_utils import BuildInstanceHookEnvByObject, \
53 CheckInstanceBridgesExist, CheckNodeFreeMemory, UpdateMetadata
54 from ganeti.hypervisor import hv_base
55
56
64
65
67 """Starts an instance.
68
69 """
70 HPATH = "instance-start"
71 HTYPE = constants.HTYPE_INSTANCE
72 REQ_BGL = False
73
80
84
88
90 """Build hooks env.
91
92 This runs on master, primary and secondary nodes of the instance.
93
94 """
95 env = {
96 "FORCE": self.op.force,
97 }
98
99 env.update(BuildInstanceHookEnvByObject(self, self.instance))
100
101 return env
102
110
112 """Check prerequisites.
113
114 This checks that the instance is in the cluster.
115
116 """
117 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
118 assert self.instance is not None, \
119 "Cannot retrieve locked instance %s" % self.op.instance_name
120
121 cluster = self.cfg.GetClusterInfo()
122
123 if self.op.hvparams:
124
125 utils.ForceDictType(self.op.hvparams, constants.HVS_PARAMETER_TYPES)
126 filled_hvp = cluster.FillHV(self.instance)
127 filled_hvp.update(self.op.hvparams)
128 hv_type = hypervisor.GetHypervisorClass(self.instance.hypervisor)
129 hv_type.CheckParameterSyntax(filled_hvp)
130 CheckHVParams(self, self.cfg.GetInstanceNodes(self.instance.uuid),
131 self.instance.hypervisor, filled_hvp)
132
133 CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
134
135 self.primary_offline = \
136 self.cfg.GetNodeInfo(self.instance.primary_node).offline
137
138 if self.primary_offline and self.op.ignore_offline_nodes:
139 self.LogWarning("Ignoring offline primary node")
140
141 if self.op.hvparams or self.op.beparams:
142 self.LogWarning("Overridden parameters are ignored")
143 else:
144 CheckNodeOnline(self, self.instance.primary_node)
145
146 bep = self.cfg.GetClusterInfo().FillBE(self.instance)
147 bep.update(self.op.beparams)
148
149
150 CheckInstanceBridgesExist(self, self.instance)
151
152 remote_info = self.rpc.call_instance_info(
153 self.instance.primary_node, self.instance.name,
154 self.instance.hypervisor, cluster.hvparams[self.instance.hypervisor])
155 remote_info.Raise("Error checking node %s" %
156 self.cfg.GetNodeName(self.instance.primary_node),
157 prereq=True, ecode=errors.ECODE_ENVIRON)
158
159 self.requires_cleanup = False
160
161 if remote_info.payload:
162 if _IsInstanceUserDown(self.cfg.GetClusterInfo(),
163 self.instance,
164 remote_info.payload):
165 self.requires_cleanup = True
166 else:
167 CheckNodeFreeMemory(
168 self, self.instance.primary_node,
169 "starting instance %s" % self.instance.name,
170 bep[constants.BE_MINMEM], self.instance.hypervisor,
171 self.cfg.GetClusterInfo().hvparams[self.instance.hypervisor])
172
173 - def Exec(self, feedback_fn):
174 """Start the instance.
175
176 """
177 if not self.op.no_remember:
178 self.instance = self.cfg.MarkInstanceUp(self.instance.uuid)
179
180 if self.primary_offline:
181 assert self.op.ignore_offline_nodes
182 self.LogInfo("Primary node offline, marked instance as started")
183 else:
184 if self.requires_cleanup:
185 result = self.rpc.call_instance_shutdown(
186 self.instance.primary_node,
187 self.instance,
188 self.op.shutdown_timeout, self.op.reason)
189 result.Raise("Could not shutdown instance '%s'" % self.instance.name)
190
191 ShutdownInstanceDisks(self, self.instance)
192
193 StartInstanceDisks(self, self.instance, self.op.force)
194 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
195
196 result = \
197 self.rpc.call_instance_start(self.instance.primary_node,
198 (self.instance, self.op.hvparams,
199 self.op.beparams),
200 self.op.startup_paused, self.op.reason)
201 if result.fail_msg:
202 ShutdownInstanceDisks(self, self.instance)
203 result.Raise("Could not start instance '%s'" % self.instance.name)
204
205
207 """Shutdown an instance.
208
209 """
210 HPATH = "instance-stop"
211 HTYPE = constants.HTYPE_INSTANCE
212 REQ_BGL = False
213
216
218 """Check arguments.
219
220 """
221 if self.op.no_remember and self.op.admin_state_source is not None:
222 self.LogWarning("Parameter 'admin_state_source' has no effect if used"
223 " with parameter 'no_remember'")
224
225 if self.op.admin_state_source is None:
226 self.op.admin_state_source = constants.ADMIN_SOURCE
227
229 """Build hooks env.
230
231 This runs on master, primary and secondary nodes of the instance.
232
233 """
234 env = BuildInstanceHookEnvByObject(self, self.instance)
235 env["TIMEOUT"] = self.op.timeout
236 return env
237
245
247 """Check prerequisites.
248
249 This checks that the instance is in the cluster.
250
251 """
252 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
253 assert self.instance is not None, \
254 "Cannot retrieve locked instance %s" % self.op.instance_name
255
256 if self.op.force:
257 self.LogWarning("Ignoring offline instance check")
258 else:
259 CheckInstanceState(self, self.instance, INSTANCE_ONLINE)
260
261 self.primary_offline = \
262 self.cfg.GetNodeInfo(self.instance.primary_node).offline
263
264 if self.primary_offline and self.op.ignore_offline_nodes:
265 self.LogWarning("Ignoring offline primary node")
266 else:
267 CheckNodeOnline(self, self.instance.primary_node)
268
269 if self.op.admin_state_source == constants.USER_SOURCE:
270 cluster = self.cfg.GetClusterInfo()
271
272 result = self.rpc.call_instance_info(
273 self.instance.primary_node,
274 self.instance.name,
275 self.instance.hypervisor,
276 cluster.hvparams[self.instance.hypervisor])
277 result.Raise("Error checking instance '%s'" % self.instance.name,
278 prereq=True)
279
280 if not _IsInstanceUserDown(cluster,
281 self.instance,
282 result.payload):
283 raise errors.OpPrereqError("Instance '%s' was not shutdown by the user"
284 % self.instance.name)
285
286 - def Exec(self, feedback_fn):
311
312
314 """Reinstall an instance.
315
316 """
317 HPATH = "instance-reinstall"
318 HTYPE = constants.HTYPE_INSTANCE
319 REQ_BGL = False
320
323
326
334
342
365
367 "Handle the OS parameter merging and validation for the target instance."
368 node_uuids = list(self.cfg.GetInstanceNodes(instance.uuid))
369
370 self.op.osparams = self.op.osparams or {}
371 self.op.osparams_private = self.op.osparams_private or {}
372 self.op.osparams_secret = self.op.osparams_secret or {}
373
374
375 params_public = GetUpdatedParams(instance.osparams, self.op.osparams)
376 params_private = GetUpdatedParams(instance.osparams_private,
377 self.op.osparams_private)
378 params_secret = self.op.osparams_secret
379
380
381 if self.op.os_type is not None:
382 instance_os = self.op.os_type
383 else:
384 instance_os = instance.os
385
386 cluster = self.cfg.GetClusterInfo()
387 self.osparams = cluster.SimpleFillOS(
388 instance_os,
389 params_public,
390 os_params_private=params_private,
391 os_params_secret=params_secret
392 )
393
394 self.osparams_private = params_private
395 self.osparams_secret = params_secret
396
397 CheckOSParams(self, True, node_uuids, instance_os, self.osparams,
398 self.op.force_variant)
399
401 """Reinstall OS scripts on an instance.
402
403 @type instance: L{objects.Instance}
404 @param instance: instance of which the OS scripts should run
405
406 @type osparams: L{dict}
407 @param osparams: OS parameters
408
409 @type debug_level: non-negative int
410 @param debug_level: debug level
411
412 @rtype: NoneType
413 @return: None
414 @raise errors.OpExecError: in case of failure
415
416 """
417 self.LogInfo("Running instance OS create scripts...")
418 result = self.rpc.call_instance_os_add(instance.primary_node,
419 (instance, osparams),
420 True,
421 debug_level)
422 result.Raise("Could not install OS for instance '%s' on node '%s'" %
423 (instance.name, self.cfg.GetNodeName(instance.primary_node)))
424
425 - def Exec(self, feedback_fn):
426 """Reinstall the instance.
427
428 """
429 os_image = objects.GetOSImage(self.op.osparams)
430
431 if os_image is not None:
432 feedback_fn("Using OS image '%s', not changing instance"
433 " configuration" % os_image)
434 else:
435 os_image = objects.GetOSImage(self.instance.osparams)
436
437 os_type = self.op.os_type
438
439 if os_type is not None:
440 feedback_fn("Changing OS scripts to '%s'..." % os_type)
441 self.instance.os = os_type
442 self.cfg.Update(self.instance, feedback_fn)
443 else:
444 os_type = self.instance.os
445
446 if not os_image and not os_type:
447 self.LogInfo("No OS scripts or OS image specified or found in the"
448 " instance's configuration, nothing to install")
449 else:
450 StartInstanceDisks(self, self.instance, None)
451 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
452 try:
453 if os_image:
454 ImageDisks(self, self.instance, os_image)
455
456 if os_type:
457 self._ReinstallOSScripts(self.instance, self.osparams,
458 self.op.debug_level)
459
460 UpdateMetadata(feedback_fn, self.rpc, self.instance,
461 osparams_public=self.osparams,
462 osparams_private=self.osparams_private,
463 osparams_secret=self.osparams_secret)
464 finally:
465 ShutdownInstanceDisks(self, self.instance)
466
467
469 """Reboot an instance.
470
471 """
472 HPATH = "instance-reboot"
473 HTYPE = constants.HTYPE_INSTANCE
474 REQ_BGL = False
475
478
480 """Build hooks env.
481
482 This runs on master, primary and secondary nodes of the instance.
483
484 """
485 env = {
486 "IGNORE_SECONDARIES": self.op.ignore_secondaries,
487 "REBOOT_TYPE": self.op.reboot_type,
488 "SHUTDOWN_TIMEOUT": self.op.shutdown_timeout,
489 }
490
491 env.update(BuildInstanceHookEnvByObject(self, self.instance))
492
493 return env
494
502
517
518 - def Exec(self, feedback_fn):
519 """Reboot the instance.
520
521 """
522 cluster = self.cfg.GetClusterInfo()
523 remote_info = self.rpc.call_instance_info(
524 self.instance.primary_node, self.instance.name,
525 self.instance.hypervisor, cluster.hvparams[self.instance.hypervisor])
526 remote_info.Raise("Error checking node %s" %
527 self.cfg.GetNodeName(self.instance.primary_node))
528 instance_running = bool(remote_info.payload)
529
530 current_node_uuid = self.instance.primary_node
531
532 if instance_running and \
533 self.op.reboot_type in [constants.INSTANCE_REBOOT_SOFT,
534 constants.INSTANCE_REBOOT_HARD]:
535 result = self.rpc.call_instance_reboot(current_node_uuid, self.instance,
536 self.op.reboot_type,
537 self.op.shutdown_timeout,
538 self.op.reason)
539 result.Raise("Could not reboot instance")
540 else:
541 if instance_running:
542 result = self.rpc.call_instance_shutdown(current_node_uuid,
543 self.instance,
544 self.op.shutdown_timeout,
545 self.op.reason)
546 result.Raise("Could not shutdown instance for full reboot")
547 ShutdownInstanceDisks(self, self.instance)
548 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
549 else:
550 self.LogInfo("Instance %s was already stopped, starting now",
551 self.instance.name)
552 StartInstanceDisks(self, self.instance, self.op.ignore_secondaries)
553 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
554 result = self.rpc.call_instance_start(current_node_uuid,
555 (self.instance, None, None), False,
556 self.op.reason)
557 msg = result.fail_msg
558 if msg:
559 ShutdownInstanceDisks(self, self.instance)
560 self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)
561 raise errors.OpExecError("Could not start instance for"
562 " full reboot: %s" % msg)
563
564 self.cfg.MarkInstanceUp(self.instance.uuid)
565
566
589
590
592 """Connect to an instance's console.
593
594 This is somewhat special in that it returns the command line that
595 you need to run on the master node in order to connect to the
596 console.
597
598 """
599 REQ_BGL = False
600
604
606 """Check prerequisites.
607
608 This checks that the instance is in the cluster.
609
610 """
611 self.instance = self.cfg.GetInstanceInfo(self.op.instance_uuid)
612 assert self.instance is not None, \
613 "Cannot retrieve locked instance %s" % self.op.instance_name
614 CheckNodeOnline(self, self.instance.primary_node)
615
616 - def Exec(self, feedback_fn):
646