Safe Haskell | None |
---|
Module describing a node.
All updates are functional (copy-based) and return a new node with updated value.
Synopsis
- type TagMap = Map String Int
- data Node = Node {
- name :: String
- alias :: String
- tMem :: Double
- nMem :: Int
- iMem :: Int
- fMem :: Int
- fMemForth :: Int
- xMem :: Int
- tDsk :: Double
- fDsk :: Int
- fDskForth :: Int
- tCpu :: Double
- tCpuSpeed :: Double
- nCpu :: Int
- uCpu :: Int
- uCpuForth :: Int
- tSpindles :: Int
- fSpindles :: Int
- fSpindlesForth :: Int
- pList :: [Idx]
- pListForth :: [Idx]
- sList :: [Idx]
- sListForth :: [Idx]
- idx :: Ndx
- peers :: PeerMap
- failN1 :: Bool
- failN1Forth :: Bool
- rMem :: Int
- rMemForth :: Int
- pMem :: Double
- pMemForth :: Double
- pDsk :: Double
- pDskForth :: Double
- pRem :: Double
- pRemForth :: Double
- pCpu :: Double
- pCpuForth :: Double
- mDsk :: Double
- loDsk :: Int
- hiCpu :: Int
- hiSpindles :: Double
- instSpindles :: Double
- instSpindlesForth :: Double
- offline :: Bool
- isMaster :: Bool
- nTags :: [String]
- utilPool :: DynUtil
- utilLoad :: DynUtil
- utilLoadForth :: DynUtil
- pTags :: TagMap
- group :: Gdx
- iPolicy :: IPolicy
- exclStorage :: Bool
- migTags :: Set String
- rmigTags :: Set String
- locationTags :: Set String
- locationScore :: Int
- instanceMap :: Map (String, String) Int
- hypervisor :: Maybe Hypervisor
- pCpuEff :: Node -> Double
- pCpuEffForth :: Node -> Double
- type AssocList = [(Ndx, Node)]
- type List = Container Node
- noSecondary :: Ndx
- addTag :: Ord k => Map k Int -> k -> Map k Int
- addTags :: Ord k => Map k Int -> [k] -> Map k Int
- delTag :: Ord k => Map k Int -> k -> Map k Int
- delTags :: Ord k => Map k Int -> [k] -> Map k Int
- rejectAddTags :: TagMap -> [String] -> Bool
- conflictingPrimaries :: Node -> Int
- incIf :: Num a => Bool -> a -> a -> a
- decIf :: Num a => Bool -> a -> a -> a
- haveExclStorage :: List -> Bool
- create :: String -> Double -> Int -> Int -> Double -> Int -> Double -> Int -> Bool -> Int -> Int -> Gdx -> Bool -> Node
- mDskToloDsk :: Double -> Double -> Int
- mCpuTohiCpu :: Double -> Double -> Int
- computeHiSpindles :: Double -> Int -> Double
- setIdx :: Node -> Ndx -> Node
- setAlias :: Node -> String -> Node
- setOffline :: Node -> Bool -> Node
- setMaster :: Node -> Bool -> Node
- setNodeTags :: Node -> [String] -> Node
- setMigrationTags :: Node -> Set String -> Node
- setRecvMigrationTags :: Node -> Set String -> Node
- setLocationTags :: Node -> Set String -> Node
- setHypervisor :: Node -> Hypervisor -> Node
- setMdsk :: Node -> Double -> Node
- setMcpu :: Node -> Double -> Node
- setPolicy :: IPolicy -> Node -> Node
- computeMaxRes :: PeerMap -> Elem
- buildPeers :: Node -> List -> Node
- calcSpindleUse :: Bool -> Node -> Instance -> Double
- calcSpindleUseForth :: Bool -> Node -> Instance -> Double
- calcNewFreeSpindles :: Bool -> Node -> Instance -> Int
- calcNewFreeSpindlesForth :: Bool -> Node -> Instance -> Int
- calcFmemOfflineOrForthcoming :: Node -> Container Instance -> Int
- getInstanceDsrdLocScore :: Node -> Instance -> Int
- getLocationExclusionPairs :: Node -> Instance -> [(String, String)]
- setPri :: Node -> Instance -> Node
- setSec :: Node -> Instance -> Node
- computePDsk :: Int -> Double -> Double
- computeNewPDsk :: Node -> Int -> Int -> Double
- getPolicyHealth :: Node -> OpResult ()
- setCpuSpeed :: Node -> Double -> Node
- removePri :: Node -> Instance -> Node
- removeSec :: Node -> Instance -> Node
- addPri :: Node -> Instance -> OpResult Node
- addPriEx :: Bool -> Node -> Instance -> OpResult Node
- addSec :: Node -> Instance -> Ndx -> OpResult Node
- addSecEx :: Bool -> Node -> Instance -> Ndx -> OpResult Node
- addSecExEx :: Bool -> Bool -> Node -> Instance -> Ndx -> OpResult Node
- checkMigration :: Node -> Node -> OpResult ()
- availDisk :: Node -> Int
- iDsk :: Node -> Int
- reportedFreeMem :: Node -> Int
- recordedFreeMem :: Node -> Int
- missingMem :: Node -> Int
- unallocatedMem :: Node -> Int
- availMem :: Node -> Int
- prospectiveMem :: Node -> Instance -> Bool -> (Int, Int)
- availCpu :: Node -> Int
- instanceToEdges :: Instance -> [Edge]
- instancesToEdges :: List -> [Edge]
- nodesToBounds :: List -> Maybe Bounds
- nodeToSharedSecondaryEdge :: List -> Node -> [Edge]
- filterValid :: List -> [Edge] -> [Edge]
- mkNodeGraph :: List -> List -> Maybe Graph
- mkRebootNodeGraph :: List -> List -> List -> Maybe Graph
- showField :: Node -> String -> String
- showHeader :: String -> (String, Bool)
- list :: [String] -> Node -> [String]
- genOpSetOffline :: MonadFail m => Node -> Bool -> m OpCode
- genOobCommand :: MonadFail m => [Node] -> OobCommand -> m OpCode
- genPowerOnOpCodes :: MonadFail m => [Node] -> m [OpCode]
- genPowerOffOpCodes :: MonadFail m => [Node] -> m [OpCode]
- genAddTagsOpCode :: Node -> [String] -> OpCode
- defaultFields :: [String]
- computeGroups :: [Node] -> [(Gdx, [Node])]
Type declarations
The node type.
Node | |
|
pCpuEff :: Node -> Double Source #
Derived parameter: ratio of virutal to physical CPUs, weighted by CPU speed.
pCpuEffForth :: Node -> Double Source #
Derived parameter: ratio of virutal to physical CPUs, weighted by CPU speed and taking forthcoming instances into account.
noSecondary :: Ndx Source #
Constant node index for a non-moveable instance.
Helper functions
rejectAddTags :: TagMap -> [String] -> Bool Source #
Check if we can add a list of tags to a tagmap.
conflictingPrimaries :: Node -> Int Source #
Check how many primary instances have conflicting tags. The algorithm to compute this is to sum the count of all tags, then subtract the size of the tag map (since each tag has at least one, non-conflicting instance); this is equivalent to summing the values in the tag map minus one.
incIf :: Num a => Bool -> a -> a -> a Source #
Helper function to increment a base value depending on the passed boolean argument.
decIf :: Num a => Bool -> a -> a -> a Source #
Helper function to decrement a base value depending on the passed boolean argument.
haveExclStorage :: List -> Bool Source #
Is exclusive storage enabled on any node?
Initialization functions
create :: String -> Double -> Int -> Int -> Double -> Int -> Double -> Int -> Bool -> Int -> Int -> Gdx -> Bool -> Node Source #
Create a new node.
The index and the peers maps are empty, and will be need to be
update later via the setIdx
and buildPeers
functions.
mDskToloDsk :: Double -> Double -> Int Source #
Conversion formula from mDsk/tDsk to loDsk.
mCpuTohiCpu :: Double -> Double -> Int Source #
Conversion formula from mCpu/tCpu to hiCpu.
computeHiSpindles :: Double -> Int -> Double Source #
Conversiojn formula from spindles and spindle ratio to hiSpindles.
setIdx :: Node -> Ndx -> Node Source #
Changes the index.
This is used only during the building of the data structures.
setAlias :: Node -> String -> Node Source #
Changes the alias.
This is used only during the building of the data structures.
setOffline :: Node -> Bool -> Node Source #
Sets the offline attribute.
setNodeTags :: Node -> [String] -> Node Source #
Sets the node tags attribute
setMigrationTags :: Node -> Set String -> Node Source #
Set migration tags
setRecvMigrationTags :: Node -> Set String -> Node Source #
Set the migration tags a node is able to receive
setLocationTags :: Node -> Set String -> Node Source #
Set the location tags
setHypervisor :: Node -> Hypervisor -> Node Source #
Sets the hypervisor attribute.
setMcpu :: Node -> Double -> Node Source #
Sets the max cpu usage ratio. This will update the node's ipolicy, losing sharing (but it should be a seldomly done operation).
computeMaxRes :: PeerMap -> Elem Source #
Computes the maximum reserved memory for peers from a peer map.
calcSpindleUse :: Bool -> Node -> Instance -> Double Source #
Calculate the new spindle usage
calcSpindleUseForth :: Bool -> Node -> Instance -> Double Source #
Calculate the new spindle usage including forthcoming instances.
calcNewFreeSpindles :: Bool -> Node -> Instance -> Int Source #
Calculate the new number of free spindles
calcNewFreeSpindlesForth :: Bool -> Node -> Instance -> Int Source #
Calculate the new number of free spindles including forthcoming instances
getInstanceDsrdLocScore Source #
:: Node | the primary node of the instance |
-> Instance | the original instance |
-> Int | the desired location score of the instance |
Calculates the desired location score of an instance, given its primary node.
getLocationExclusionPairs Source #
Returns list of all pairs of node location and instance exclusion tags.
setPri :: Node -> Instance -> Node Source #
Assigns an instance to a node as primary and update the used VCPU count, utilisation data, tags map and desired location score.
setSec :: Node -> Instance -> Node Source #
Assigns an instance to a node as secondary and updates disk utilisation.
computePDsk :: Int -> Double -> Double Source #
Computes the new pDsk
value, handling nodes without local disk
storage (we consider all their disk unused).
computeNewPDsk :: Node -> Int -> Int -> Double Source #
Computes the new pDsk
value, handling the exclusive storage state.
Diagnostic functions
getPolicyHealth :: Node -> OpResult () Source #
For a node diagnose whether it conforms with all policies. The type is chosen to represent that of a no-op node operation.
Update functions
setCpuSpeed :: Node -> Double -> Node Source #
Set the CPU speed
:: Bool | Whether to override the N+1 and other soft checks, useful if we come from a worse status (e.g. offline). If this is True, forthcoming instances may exceed available Node resources. |
-> Node | The target node |
-> Instance | The instance to add |
-> OpResult Node | The result of the operation, either the new version of the node or a failure mode |
Adds a primary instance (extended version).
addSec :: Node -> Instance -> Ndx -> OpResult Node Source #
Adds a secondary instance (basic version).
addSecEx :: Bool -> Node -> Instance -> Ndx -> OpResult Node Source #
Adds a secondary instance (extended version).
addSecExEx :: Bool -> Bool -> Node -> Instance -> Ndx -> OpResult Node Source #
Adds a secondary instance (doubly extended version). The first parameter
tells addSecExEx
to ignore disks completly. There is only one legitimate
use case for this, and this is failing over a DRBD instance where the primary
node is offline (and hence will become the secondary afterwards).
checkMigration :: Node -> Node -> OpResult () Source #
Predicate on whether migration is supported between two nodes.
Stats functions
reportedFreeMem :: Node -> Int Source #
Returns state-of-world free memory on the node. | NOTE: This value is valid only before placement simulations. | TODO: Redefine this for memoy overcommitment.
recordedFreeMem :: Node -> Int Source #
Computes state-of-record free memory on the node. | TODO: Redefine this for memory overcommitment.
missingMem :: Node -> Int Source #
Computes the amount of missing memory on the node. NOTE: This formula uses free memory for calculations as opposed to used_memory in the definition, that's why it is the inverse. Explanations for missing memory (+) positive, (-) negative: (+) instances are using more memory that state-of-record - on KVM this might be due to the overhead per qemu process - on Xen manually upsized domains (xen mem-set) (+) on KVM non-qemu processes might be using more memory than what is reserved for node (no isolation) (-) on KVM qemu processes allocate memory on demand, thus an instance grows over its lifetime until it reaches state-of-record (+overhead) (-) on KVM KSM might be active (-) on Xen manually downsized domains (xen mem-set)
unallocatedMem :: Node -> Int Source #
Computes the guaranteed
free memory, that is the minimum of what
is reported by the node (available bytes) and our calculation based on
instance sizes (our records), thus considering missing memory.
NOTE 1: During placement simulations, the recorded memory changes, as
instances are added/removed from the node, thus we have to calculate the
missingMem (correction) before altering state-of-record and then
use that correction to estimate state-of-world memory usage _after_
the placements are done rather than doing min(record, world).
NOTE 2: This is still only an approximation on KVM. As we shuffle instances
during the simulation we are considering their state-of-record size, but
in the real world the moves would shuffle parts of missing memory as well.
Unfortunately as long as we don't have a more finegrained model that can
better explain missing memory (split down based on root causes), we can't
do better.
NOTE 3: This is a hard limit based on available bytes and our bookkeeping.
In case of memory overcommitment, both recordedFreeMem and reportedFreeMem
would be extended by swap size on KVM or baloon size on Xen (their nominal
and reported values).
availMem :: Node -> Int Source #
Computes the amount of available memory on a given node. Compared to unallocatedMem, this takes into account also memory reserved for secondary instances. NOTE: In case of memory overcommitment, there would be also an additional soft limit based on RAM size dedicated for instances and sum of state-of-record instance sizes (iMem): (tMem - nMem)*overcommit_ratio - iMem
:: Node | |
-> Instance | |
-> Bool | Operation: True if add, False for remove. |
-> (Int, Int) | Tuple (used_by_instances, guaranteed_free_mem) |
Prospective memory stats after instance operation.
Node graph functions
Making of a Graph from a node/instance list
instanceToEdges :: Instance -> [Edge] Source #
Transform an instance into a list of edges on the node graph
instancesToEdges :: List -> [Edge] Source #
Transform the list of instances into list of destination edges
nodesToBounds :: List -> Maybe Bounds Source #
Transform the list of nodes into vertices bounds. Returns Nothing is the list is empty.
nodeToSharedSecondaryEdge :: List -> Node -> [Edge] Source #
The clique of the primary nodes of the instances with a given secondary. Return the full graph of those nodes that are primary node of at least one instance that has the given node as secondary.
filterValid :: List -> [Edge] -> [Edge] Source #
Predicate of an edge having both vertices in a set of nodes.
mkNodeGraph :: List -> List -> Maybe Graph Source #
Transform a Node + Instance list into a NodeGraph type. Returns Nothing if the node list is empty.
mkRebootNodeGraph :: List -> List -> List -> Maybe Graph Source #
Transform a Nodes + Instances into a NodeGraph with all reboot exclusions. This includes edges between nodes that are the primary nodes of instances that have the same secondary node. Nodes not in the node list will not be part of the graph, but they are still considered for the edges arising from two instances having the same secondary node. Return Nothing if the node list is empty.
Display functions
:: Node | Node which we're querying |
-> String | Field name |
-> String | Field value as string |
Return a field for a given node.
showHeader :: String -> (String, Bool) Source #
Returns the header and numeric propery of a field.
genOpSetOffline :: MonadFail m => Node -> Bool -> m OpCode Source #
Generate OpCode for setting a node's offline status
genOobCommand :: MonadFail m => [Node] -> OobCommand -> m OpCode Source #
Generate OpCode for applying a OobCommand to the given nodes
genPowerOnOpCodes :: MonadFail m => [Node] -> m [OpCode] Source #
Generate OpCode for powering on a list of nodes
genPowerOffOpCodes :: MonadFail m => [Node] -> m [OpCode] Source #
Generate OpCodes for powering off a list of nodes
genAddTagsOpCode :: Node -> [String] -> OpCode Source #
Generate OpCodes for adding tags to a node
defaultFields :: [String] Source #
Constant holding the fields we're displaying by default.