1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22 """Logical units dealing with node groups."""
23
24 import itertools
25 import logging
26
27 from ganeti import constants
28 from ganeti import errors
29 from ganeti import locking
30 from ganeti import objects
31 from ganeti import qlang
32 from ganeti import query
33 from ganeti import utils
34 from ganeti.masterd import iallocator
35 from ganeti.cmdlib.base import LogicalUnit, NoHooksLU, QueryBase, \
36 ResultWithJobs
37 from ganeti.cmdlib.common import MergeAndVerifyHvState, \
38 MergeAndVerifyDiskState, GetWantedNodes, GetUpdatedParams, \
39 CheckNodeGroupInstances, GetUpdatedIPolicy, \
40 ComputeNewInstanceViolations, GetDefaultIAllocator, ShareAll, \
41 CheckInstancesNodeGroups, LoadNodeEvacResult, MapInstanceLvsToNodes, \
42 CheckIpolicyVsDiskTemplates
43
44 import ganeti.masterd.instance
48 """Logical unit for creating node groups.
49
50 """
51 HPATH = "group-add"
52 HTYPE = constants.HTYPE_GROUP
53 REQ_BGL = False
54
56
57
58
59 self.group_uuid = self.cfg.GenerateUniqueID(self.proc.GetECId())
60 self.needed_locks = {}
61 self.add_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
62
77
123
125 """Build hooks env.
126
127 """
128 return {
129 "GROUP_NAME": self.op.group_name,
130 }
131
133 """Build hooks nodes.
134
135 """
136 mn = self.cfg.GetMasterNode()
137 return ([mn], [mn])
138
139 - def Exec(self, feedback_fn):
140 """Add the node group to the cluster.
141
142 """
143 group_obj = objects.NodeGroup(name=self.op.group_name, members=[],
144 uuid=self.group_uuid,
145 alloc_policy=self.op.alloc_policy,
146 ndparams=self.op.ndparams,
147 diskparams=self.new_diskparams,
148 ipolicy=self.op.ipolicy,
149 hv_state_static=self.new_hv_state,
150 disk_state_static=self.new_disk_state)
151
152 self.cfg.AddNodeGroup(group_obj, self.proc.GetECId(), check_uuid=False)
153 del self.remove_locks[locking.LEVEL_NODEGROUP]
154
157 """Logical unit for assigning nodes to groups.
158
159 """
160 REQ_BGL = False
161
163
164 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
165 (self.op.node_uuids, self.op.nodes) = GetWantedNodes(self, self.op.nodes)
166
167
168
169
170 self.needed_locks = {
171 locking.LEVEL_NODEGROUP: set([self.group_uuid]),
172 locking.LEVEL_NODE: self.op.node_uuids,
173 }
174
176 if level == locking.LEVEL_NODEGROUP:
177 assert len(self.needed_locks[locking.LEVEL_NODEGROUP]) == 1
178
179
180
181 groups = self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids)
182
183 self.needed_locks[locking.LEVEL_NODEGROUP].update(groups)
184
186 """Check prerequisites.
187
188 """
189 assert self.needed_locks[locking.LEVEL_NODEGROUP]
190 assert (frozenset(self.owned_locks(locking.LEVEL_NODE)) ==
191 frozenset(self.op.node_uuids))
192
193 expected_locks = (set([self.group_uuid]) |
194 self.cfg.GetNodeGroupsFromNodes(self.op.node_uuids))
195 actual_locks = self.owned_locks(locking.LEVEL_NODEGROUP)
196 if actual_locks != expected_locks:
197 raise errors.OpExecError("Nodes changed groups since locks were acquired,"
198 " current groups are '%s', used to be '%s'" %
199 (utils.CommaJoin(expected_locks),
200 utils.CommaJoin(actual_locks)))
201
202 self.node_data = self.cfg.GetAllNodesInfo()
203 self.group = self.cfg.GetNodeGroup(self.group_uuid)
204 instance_data = self.cfg.GetAllInstancesInfo()
205
206 if self.group is None:
207 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
208 (self.op.group_name, self.group_uuid))
209
210 (new_splits, previous_splits) = \
211 self.CheckAssignmentForSplitInstances([(uuid, self.group_uuid)
212 for uuid in self.op.node_uuids],
213 self.node_data, instance_data)
214
215 if new_splits:
216 fmt_new_splits = utils.CommaJoin(utils.NiceSort(
217 self.cfg.GetInstanceNames(new_splits)))
218
219 if not self.op.force:
220 raise errors.OpExecError("The following instances get split by this"
221 " change and --force was not given: %s" %
222 fmt_new_splits)
223 else:
224 self.LogWarning("This operation will split the following instances: %s",
225 fmt_new_splits)
226
227 if previous_splits:
228 self.LogWarning("In addition, these already-split instances continue"
229 " to be split across groups: %s",
230 utils.CommaJoin(utils.NiceSort(
231 self.cfg.GetInstanceNames(previous_splits))))
232
233 - def Exec(self, feedback_fn):
234 """Assign nodes to a new group.
235
236 """
237 mods = [(node_uuid, self.group_uuid) for node_uuid in self.op.node_uuids]
238
239 self.cfg.AssignGroupNodes(mods)
240
241 @staticmethod
243 """Check for split instances after a node assignment.
244
245 This method considers a series of node assignments as an atomic operation,
246 and returns information about split instances after applying the set of
247 changes.
248
249 In particular, it returns information about newly split instances, and
250 instances that were already split, and remain so after the change.
251
252 Only instances whose disk template is listed in constants.DTS_INT_MIRROR are
253 considered.
254
255 @type changes: list of (node_uuid, new_group_uuid) pairs.
256 @param changes: list of node assignments to consider.
257 @param node_data: a dict with data for all nodes
258 @param instance_data: a dict with all instances to consider
259 @rtype: a two-tuple
260 @return: a list of instances that were previously okay and result split as a
261 consequence of this change, and a list of instances that were previously
262 split and this change does not fix.
263
264 """
265 changed_nodes = dict((uuid, group) for uuid, group in changes
266 if node_data[uuid].group != group)
267
268 all_split_instances = set()
269 previously_split_instances = set()
270
271 for inst in instance_data.values():
272 if inst.disk_template not in constants.DTS_INT_MIRROR:
273 continue
274
275 if len(set(node_data[node_uuid].group
276 for node_uuid in inst.all_nodes)) > 1:
277 previously_split_instances.add(inst.uuid)
278
279 if len(set(changed_nodes.get(node_uuid, node_data[node_uuid].group)
280 for node_uuid in inst.all_nodes)) > 1:
281 all_split_instances.add(inst.uuid)
282
283 return (list(all_split_instances - previously_split_instances),
284 list(previously_split_instances & all_split_instances))
285
288 FIELDS = query.GROUP_FIELDS
289
291 lu.needed_locks = {}
292
293 self._all_groups = lu.cfg.GetAllNodeGroupsInfo()
294 self._cluster = lu.cfg.GetClusterInfo()
295 name_to_uuid = dict((g.name, g.uuid) for g in self._all_groups.values())
296
297 if not self.names:
298 self.wanted = [name_to_uuid[name]
299 for name in utils.NiceSort(name_to_uuid.keys())]
300 else:
301
302 missing = []
303 self.wanted = []
304 all_uuid = frozenset(self._all_groups.keys())
305
306 for name in self.names:
307 if name in all_uuid:
308 self.wanted.append(name)
309 elif name in name_to_uuid:
310 self.wanted.append(name_to_uuid[name])
311 else:
312 missing.append(name)
313
314 if missing:
315 raise errors.OpPrereqError("Some groups do not exist: %s" %
316 utils.CommaJoin(missing),
317 errors.ECODE_NOENT)
318
321
323 """Computes the list of node groups and their attributes.
324
325 """
326 do_nodes = query.GQ_NODE in self.requested_data
327 do_instances = query.GQ_INST in self.requested_data
328
329 group_to_nodes = None
330 group_to_instances = None
331
332
333
334
335
336
337 if do_nodes or do_instances:
338 all_nodes = lu.cfg.GetAllNodesInfo()
339 group_to_nodes = dict((uuid, []) for uuid in self.wanted)
340 node_to_group = {}
341
342 for node in all_nodes.values():
343 if node.group in group_to_nodes:
344 group_to_nodes[node.group].append(node.uuid)
345 node_to_group[node.uuid] = node.group
346
347 if do_instances:
348 all_instances = lu.cfg.GetAllInstancesInfo()
349 group_to_instances = dict((uuid, []) for uuid in self.wanted)
350
351 for instance in all_instances.values():
352 node = instance.primary_node
353 if node in node_to_group:
354 group_to_instances[node_to_group[node]].append(instance.uuid)
355
356 if not do_nodes:
357
358 group_to_nodes = None
359
360 return query.GroupQueryData(self._cluster,
361 [self._all_groups[uuid]
362 for uuid in self.wanted],
363 group_to_nodes, group_to_instances,
364 query.GQ_DISKPARAMS in self.requested_data)
365
368 """Logical unit for querying node groups.
369
370 """
371 REQ_BGL = False
372
376
379
382
383 - def Exec(self, feedback_fn):
385
388 """Modifies the parameters of a node group.
389
390 """
391 HPATH = "group-modify"
392 HTYPE = constants.HTYPE_GROUP
393 REQ_BGL = False
394
396 all_changes = [
397 self.op.ndparams,
398 self.op.diskparams,
399 self.op.alloc_policy,
400 self.op.hv_state,
401 self.op.disk_state,
402 self.op.ipolicy,
403 ]
404
405 if all_changes.count(None) == len(all_changes):
406 raise errors.OpPrereqError("Please pass at least one modification",
407 errors.ECODE_INVAL)
408
410
411 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
412
413 self.needed_locks = {
414 locking.LEVEL_INSTANCE: [],
415 locking.LEVEL_NODEGROUP: [self.group_uuid],
416 }
417
418 self.share_locks[locking.LEVEL_INSTANCE] = 1
419
429
430 @staticmethod
438
469
471 """Check prerequisites.
472
473 """
474 owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
475
476
477 CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
478
479 self.group = self.cfg.GetNodeGroup(self.group_uuid)
480 cluster = self.cfg.GetClusterInfo()
481
482 if self.group is None:
483 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
484 (self.op.group_name, self.group_uuid))
485
486 if self.op.ndparams:
487 new_ndparams = GetUpdatedParams(self.group.ndparams, self.op.ndparams)
488 utils.ForceDictType(new_ndparams, constants.NDS_PARAMETER_TYPES)
489 self.new_ndparams = new_ndparams
490
491 if self.op.diskparams:
492 diskparams = self.group.diskparams
493 uavdp = self._UpdateAndVerifyDiskParams
494
495 new_diskparams = dict((dt,
496 uavdp(diskparams.get(dt, {}),
497 self.op.diskparams[dt]))
498 for dt in constants.DISK_TEMPLATES
499 if dt in self.op.diskparams)
500
501
502 self.new_diskparams = objects.FillDict(diskparams, new_diskparams)
503 try:
504 utils.VerifyDictOptions(self.new_diskparams, constants.DISK_DT_DEFAULTS)
505 except errors.OpPrereqError, err:
506 raise errors.OpPrereqError("While verify diskparams options: %s" % err,
507 errors.ECODE_INVAL)
508
509 if self.op.hv_state:
510 self.new_hv_state = MergeAndVerifyHvState(self.op.hv_state,
511 self.group.hv_state_static)
512
513 if self.op.disk_state:
514 self.new_disk_state = \
515 MergeAndVerifyDiskState(self.op.disk_state,
516 self.group.disk_state_static)
517
518 self._CheckIpolicy(cluster, owned_instance_names)
519
521 """Build hooks env.
522
523 """
524 return {
525 "GROUP_NAME": self.op.group_name,
526 "NEW_ALLOC_POLICY": self.op.alloc_policy,
527 }
528
530 """Build hooks nodes.
531
532 """
533 mn = self.cfg.GetMasterNode()
534 return ([mn], [mn])
535
536 - def Exec(self, feedback_fn):
537 """Modifies the node group.
538
539 """
540 result = []
541
542 if self.op.ndparams:
543 self.group.ndparams = self.new_ndparams
544 result.append(("ndparams", str(self.group.ndparams)))
545
546 if self.op.diskparams:
547 self.group.diskparams = self.new_diskparams
548 result.append(("diskparams", str(self.group.diskparams)))
549
550 if self.op.alloc_policy:
551 self.group.alloc_policy = self.op.alloc_policy
552
553 if self.op.hv_state:
554 self.group.hv_state_static = self.new_hv_state
555
556 if self.op.disk_state:
557 self.group.disk_state_static = self.new_disk_state
558
559 if self.op.ipolicy:
560 self.group.ipolicy = self.new_ipolicy
561
562 self.cfg.Update(self.group, feedback_fn)
563 return result
564
567 HPATH = "group-remove"
568 HTYPE = constants.HTYPE_GROUP
569 REQ_BGL = False
570
572
573 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
574 self.needed_locks = {
575 locking.LEVEL_NODEGROUP: [self.group_uuid],
576 }
577
579 """Check prerequisites.
580
581 This checks that the given group name exists as a node group, that is
582 empty (i.e., contains no nodes), and that is not the last group of the
583 cluster.
584
585 """
586
587 group_nodes = [node.uuid
588 for node in self.cfg.GetAllNodesInfo().values()
589 if node.group == self.group_uuid]
590
591 if group_nodes:
592 raise errors.OpPrereqError("Group '%s' not empty, has the following"
593 " nodes: %s" %
594 (self.op.group_name,
595 utils.CommaJoin(utils.NiceSort(group_nodes))),
596 errors.ECODE_STATE)
597
598
599 if len(self.cfg.GetNodeGroupList()) == 1:
600 raise errors.OpPrereqError("Group '%s' is the only group, cannot be"
601 " removed" % self.op.group_name,
602 errors.ECODE_STATE)
603
605 """Build hooks env.
606
607 """
608 return {
609 "GROUP_NAME": self.op.group_name,
610 }
611
613 """Build hooks nodes.
614
615 """
616 mn = self.cfg.GetMasterNode()
617 return ([mn], [mn])
618
619 - def Exec(self, feedback_fn):
620 """Remove the node group.
621
622 """
623 try:
624 self.cfg.RemoveNodeGroup(self.group_uuid)
625 except errors.ConfigurationError:
626 raise errors.OpExecError("Group '%s' with UUID %s disappeared" %
627 (self.op.group_name, self.group_uuid))
628
629 self.remove_locks[locking.LEVEL_NODEGROUP] = self.group_uuid
630
633 HPATH = "group-rename"
634 HTYPE = constants.HTYPE_GROUP
635 REQ_BGL = False
636
638
639 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
640
641 self.needed_locks = {
642 locking.LEVEL_NODEGROUP: [self.group_uuid],
643 }
644
646 """Check prerequisites.
647
648 Ensures requested new name is not yet used.
649
650 """
651 try:
652 new_name_uuid = self.cfg.LookupNodeGroup(self.op.new_name)
653 except errors.OpPrereqError:
654 pass
655 else:
656 raise errors.OpPrereqError("Desired new name '%s' clashes with existing"
657 " node group (UUID: %s)" %
658 (self.op.new_name, new_name_uuid),
659 errors.ECODE_EXISTS)
660
662 """Build hooks env.
663
664 """
665 return {
666 "OLD_NAME": self.op.group_name,
667 "NEW_NAME": self.op.new_name,
668 }
669
684
685 - def Exec(self, feedback_fn):
686 """Rename the node group.
687
688 """
689 group = self.cfg.GetNodeGroup(self.group_uuid)
690
691 if group is None:
692 raise errors.OpExecError("Could not retrieve group '%s' (UUID: %s)" %
693 (self.op.group_name, self.group_uuid))
694
695 group.name = self.op.new_name
696 self.cfg.Update(group, feedback_fn)
697
698 return self.op.new_name
699
702 HPATH = "group-evacuate"
703 HTYPE = constants.HTYPE_GROUP
704 REQ_BGL = False
705
707
708 self.group_uuid = self.cfg.LookupNodeGroup(self.op.group_name)
709
710 if self.op.target_groups:
711 self.req_target_uuids = map(self.cfg.LookupNodeGroup,
712 self.op.target_groups)
713 else:
714 self.req_target_uuids = []
715
716 if self.group_uuid in self.req_target_uuids:
717 raise errors.OpPrereqError("Group to be evacuated (%s) can not be used"
718 " as a target group (targets are %s)" %
719 (self.group_uuid,
720 utils.CommaJoin(self.req_target_uuids)),
721 errors.ECODE_INVAL)
722
723 self.op.iallocator = GetDefaultIAllocator(self.cfg, self.op.iallocator)
724
725 self.share_locks = ShareAll()
726 self.needed_locks = {
727 locking.LEVEL_INSTANCE: [],
728 locking.LEVEL_NODEGROUP: [],
729 locking.LEVEL_NODE: [],
730 }
731
733 if level == locking.LEVEL_INSTANCE:
734 assert not self.needed_locks[locking.LEVEL_INSTANCE]
735
736
737
738 self.needed_locks[locking.LEVEL_INSTANCE] = \
739 self.cfg.GetInstanceNames(
740 self.cfg.GetNodeGroupInstances(self.group_uuid))
741
742 elif level == locking.LEVEL_NODEGROUP:
743 assert not self.needed_locks[locking.LEVEL_NODEGROUP]
744
745 if self.req_target_uuids:
746 lock_groups = set([self.group_uuid] + self.req_target_uuids)
747
748
749
750 lock_groups.update(group_uuid
751 for instance_name in
752 self.owned_locks(locking.LEVEL_INSTANCE)
753 for group_uuid in
754 self.cfg.GetInstanceNodeGroups(
755 self.cfg.GetInstanceInfoByName(instance_name)
756 .uuid))
757 else:
758
759 lock_groups = locking.ALL_SET
760
761 self.needed_locks[locking.LEVEL_NODEGROUP] = lock_groups
762
763 elif level == locking.LEVEL_NODE:
764
765
766 self.recalculate_locks[locking.LEVEL_NODE] = constants.LOCKS_APPEND
767 self._LockInstancesNodes()
768
769
770 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
771 assert self.group_uuid in owned_groups
772 member_node_uuids = [node_uuid
773 for group in owned_groups
774 for node_uuid in
775 self.cfg.GetNodeGroup(group).members]
776 self.needed_locks[locking.LEVEL_NODE].extend(member_node_uuids)
777
779 owned_instance_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
780 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
781 owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
782
783 assert owned_groups.issuperset(self.req_target_uuids)
784 assert self.group_uuid in owned_groups
785
786
787 CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_instance_names)
788
789
790 self.instances = \
791 dict(self.cfg.GetMultiInstanceInfoByName(owned_instance_names))
792
793
794 CheckInstancesNodeGroups(self.cfg, self.instances,
795 owned_groups, owned_node_uuids, self.group_uuid)
796
797 if self.req_target_uuids:
798
799 self.target_uuids = self.req_target_uuids
800 else:
801
802 self.target_uuids = [group_uuid for group_uuid in owned_groups
803 if group_uuid != self.group_uuid]
804
805 if not self.target_uuids:
806 raise errors.OpPrereqError("There are no possible target groups",
807 errors.ECODE_INVAL)
808
810 """Build hooks env.
811
812 """
813 return {
814 "GROUP_NAME": self.op.group_name,
815 "TARGET_GROUPS": " ".join(self.target_uuids),
816 }
817
819 """Build hooks nodes.
820
821 """
822 mn = self.cfg.GetMasterNode()
823
824 assert self.group_uuid in self.owned_locks(locking.LEVEL_NODEGROUP)
825
826 run_nodes = [mn] + self.cfg.GetNodeGroup(self.group_uuid).members
827
828 return (run_nodes, run_nodes)
829
830 - def Exec(self, feedback_fn):
831 inst_names = list(self.owned_locks(locking.LEVEL_INSTANCE))
832
833 assert self.group_uuid not in self.target_uuids
834
835 req = iallocator.IAReqGroupChange(instances=inst_names,
836 target_groups=self.target_uuids)
837 ial = iallocator.IAllocator(self.cfg, self.rpc, req)
838
839 ial.Run(self.op.iallocator)
840
841 if not ial.success:
842 raise errors.OpPrereqError("Can't compute group evacuation using"
843 " iallocator '%s': %s" %
844 (self.op.iallocator, ial.info),
845 errors.ECODE_NORES)
846
847 jobs = LoadNodeEvacResult(self, ial.result, self.op.early_release, False)
848
849 self.LogInfo("Iallocator returned %s job(s) for evacuating node group %s",
850 len(jobs), self.op.group_name)
851
852 return ResultWithJobs(jobs)
853
856 """Verifies the status of all disks in a node group.
857
858 """
859 REQ_BGL = False
860
877
912
914 owned_inst_names = frozenset(self.owned_locks(locking.LEVEL_INSTANCE))
915 owned_groups = frozenset(self.owned_locks(locking.LEVEL_NODEGROUP))
916 owned_node_uuids = frozenset(self.owned_locks(locking.LEVEL_NODE))
917
918 assert self.group_uuid in owned_groups
919
920
921 CheckNodeGroupInstances(self.cfg, self.group_uuid, owned_inst_names)
922
923
924 self.instances = dict(self.cfg.GetMultiInstanceInfoByName(owned_inst_names))
925
926
927 CheckInstancesNodeGroups(self.cfg, self.instances,
928 owned_groups, owned_node_uuids, self.group_uuid)
929
930 - def _VerifyInstanceLvs(self, node_errors, offline_disk_instance_names,
931 missing_disks):
932 node_lv_to_inst = MapInstanceLvsToNodes(
933 [inst for inst in self.instances.values() if inst.disks_active])
934 if node_lv_to_inst:
935 node_uuids = utils.NiceSort(set(self.owned_locks(locking.LEVEL_NODE)) &
936 set(self.cfg.GetVmCapableNodeList()))
937
938 node_lvs = self.rpc.call_lv_list(node_uuids, [])
939
940 for (node_uuid, node_res) in node_lvs.items():
941 if node_res.offline:
942 continue
943
944 msg = node_res.fail_msg
945 if msg:
946 logging.warning("Error enumerating LVs on node %s: %s",
947 self.cfg.GetNodeName(node_uuid), msg)
948 node_errors[node_uuid] = msg
949 continue
950
951 for lv_name, (_, _, lv_online) in node_res.payload.items():
952 inst = node_lv_to_inst.pop((node_uuid, lv_name), None)
953 if not lv_online and inst is not None:
954 offline_disk_instance_names.add(inst.name)
955
956
957
958 for key, inst in node_lv_to_inst.iteritems():
959 missing_disks.setdefault(inst.name, []).append(list(key))
960
962 node_to_inst = {}
963 for inst in self.instances.values():
964 if not inst.disks_active or inst.disk_template != constants.DT_DRBD8:
965 continue
966
967 for node_uuid in itertools.chain([inst.primary_node],
968 inst.secondary_nodes):
969 node_to_inst.setdefault(node_uuid, []).append(inst)
970
971 nodes_ip = dict((uuid, node.secondary_ip) for (uuid, node)
972 in self.cfg.GetMultiNodeInfo(node_to_inst.keys()))
973 for (node_uuid, insts) in node_to_inst.items():
974 node_disks = [(inst.disks, inst) for inst in insts]
975 node_res = self.rpc.call_drbd_needs_activation(node_uuid, nodes_ip,
976 node_disks)
977 msg = node_res.fail_msg
978 if msg:
979 logging.warning("Error getting DRBD status on node %s: %s",
980 self.cfg.GetNodeName(node_uuid), msg)
981 node_errors[node_uuid] = msg
982 continue
983
984 faulty_disk_uuids = set(node_res.payload)
985 for inst in self.instances.values():
986 inst_disk_uuids = set([disk.uuid for disk in inst.disks])
987 if inst_disk_uuids.intersection(faulty_disk_uuids):
988 offline_disk_instance_names.add(inst.name)
989
990 - def Exec(self, feedback_fn):
991 """Verify integrity of cluster disks.
992
993 @rtype: tuple of three items
994 @return: a tuple of (dict of node-to-node_error, list of instances
995 which need activate-disks, dict of instance: (node, volume) for
996 missing volumes
997
998 """
999 node_errors = {}
1000 offline_disk_instance_names = set()
1001 missing_disks = {}
1002
1003 self._VerifyInstanceLvs(node_errors, offline_disk_instance_names,
1004 missing_disks)
1005 self._VerifyDrbdStates(node_errors, offline_disk_instance_names)
1006
1007 return (node_errors, list(offline_disk_instance_names), missing_disks)
1008