mirror of
https://github.com/valitydev/salt.git
synced 2024-11-08 01:18:58 +00:00
Merge pull request #10486 from gtmanfred/glusterfs
Add glusterfs state and module
This commit is contained in:
commit
6692c242a9
192
salt/modules/glusterfs.py
Normal file
192
salt/modules/glusterfs.py
Normal file
@ -0,0 +1,192 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage glusterfs pool.
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import generators
|
||||
import logging
|
||||
import os.path
|
||||
import socket
|
||||
import re
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.utils.cloud as suc
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load this module if the gluster command exists
|
||||
'''
|
||||
if salt.utils.which('gluster') and salt.utils.which('dig'):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def list_peers():
|
||||
'''
|
||||
Return a list of gluster peers
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. clodeblock:: bash
|
||||
|
||||
salt '*' glusterfs.list_peers
|
||||
'''
|
||||
get_peer_list = 'gluster peer status | awk \'/Hostname/ {print $2}\''
|
||||
return __salt__['cmd.run'](get_peer_list).splitlines()
|
||||
|
||||
|
||||
def peer(name=None, **kwargs):
|
||||
'''
|
||||
Add another node into the peer probe.
|
||||
|
||||
Need to add the ability to add to use ip addresses
|
||||
|
||||
name
|
||||
The remote host with which to peer.
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. clodeblock:: bash
|
||||
|
||||
salt 'one.gluster.*' glusterfs.peer two
|
||||
'''
|
||||
if not suc.check_name(name, 'a-zA-Z0-9._-'):
|
||||
return 'Invalid characters in peer name'
|
||||
hosts_file = __salt__['hosts.list_hosts']()
|
||||
hosts_list = []
|
||||
for ip, hosts in hosts_file.items():
|
||||
hosts_list.extend(hosts)
|
||||
dig_info = __salt__['dig.A'](name)
|
||||
if dig_info or name in hosts_list:
|
||||
cmd = 'gluster peer probe {0}'.format(name)
|
||||
return __salt__['cmd.run'](cmd)
|
||||
return 'Node does not resolve to an ip address'
|
||||
|
||||
|
||||
def create(name,
|
||||
peers=None,
|
||||
brick='/srv/gluster/brick1',
|
||||
replica=False,
|
||||
count=2,
|
||||
**kwargs):
|
||||
'''
|
||||
Create a glusterfs volume.
|
||||
|
||||
name
|
||||
name of the gluster volume
|
||||
|
||||
brick
|
||||
filesystem path for the brick
|
||||
|
||||
peers
|
||||
peers that will be part of the cluster
|
||||
|
||||
replica
|
||||
replicated or distributed cluster
|
||||
|
||||
count
|
||||
number of nodes per replica block
|
||||
|
||||
short
|
||||
(optional) use short names for peering
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. clodeblock:: bash
|
||||
|
||||
salt 'one.gluster*' glusterfs.create mymount /srv/ \
|
||||
peers='["one", "two"]'
|
||||
|
||||
salt -G 'gluster:master' glusterfs.create mymount /srv/gluster/brick1 \
|
||||
peers='["one", "two", "three", "four"]' replica=True count=2 \
|
||||
short=True start=True
|
||||
'''
|
||||
check_peers = 'gluster peer status | awk \'/Hostname/ {print $2}\''
|
||||
active_peers = __salt__['cmd.run'](check_peers).splitlines()
|
||||
hostname = socket.gethostname()
|
||||
if 'short' in kwargs and kwargs['short']:
|
||||
hostname = hostname.split('.')[0]
|
||||
if not all([peer in active_peers for peer in peers if peer != hostname]):
|
||||
return 'Not all peers have been probed.'
|
||||
|
||||
if not os.path.exists(brick):
|
||||
return 'Brick path doesn\'t exist.'
|
||||
|
||||
if not suc.check_name(name, 'a-zA-Z0-9._-'):
|
||||
return 'Invalid characters in volume name'
|
||||
|
||||
if not all([suc.check_name(peer, 'a-zA-Z0-9._-') for peer in peers]):
|
||||
return 'Invalid characters in a peer name.'
|
||||
|
||||
cmd = 'gluster volume create {0} '.format(name)
|
||||
if replica:
|
||||
cmd += 'replica {0} '.format(count)
|
||||
for peer in peers:
|
||||
cmd += '{0}:{1} '.format(peer, brick)
|
||||
|
||||
log.debug('Clustering command:\n{0}'.format(cmd))
|
||||
ret = __salt__['cmd.run'](cmd)
|
||||
|
||||
if 'start' in kwargs and kwargs['start']:
|
||||
ret = __salt__['cmd.run']('gluster volume start {0}'.format(name))
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def list_volumes():
|
||||
'''
|
||||
List configured volumes
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. clodeblock:: bash
|
||||
|
||||
salt '*' glusterfs.list_volumes
|
||||
'''
|
||||
|
||||
return __salt__['cmd.run']('gluster volume list').splitlines()
|
||||
|
||||
|
||||
def status(name):
|
||||
'''
|
||||
Check the status of a gluster volume.
|
||||
|
||||
name
|
||||
Volume name
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. clodeblock:: bash
|
||||
|
||||
salt '*' glusterfs.status mycluster
|
||||
'''
|
||||
volumes = list_volumes()
|
||||
if name in volumes:
|
||||
cmd = 'gluster volume status {0}'.format(name)
|
||||
return __salt__['cmd.run'](cmd)
|
||||
return 'Volume {0} doesn\'t exist'.format(name)
|
||||
|
||||
|
||||
def start(name):
|
||||
'''
|
||||
Start a gluster volume.
|
||||
|
||||
name
|
||||
Volume name
|
||||
|
||||
CLI Example:
|
||||
|
||||
.. clodeblock:: bash
|
||||
|
||||
salt '*' glusterfs.start mycluster
|
||||
'''
|
||||
volumes = list_volumes()
|
||||
if name in volumes:
|
||||
cmd = 'gluster volume start {0}'.format(name)
|
||||
return __salt__['cmd.run'](cmd)
|
||||
return False
|
178
salt/states/glusterfs.py
Normal file
178
salt/states/glusterfs.py
Normal file
@ -0,0 +1,178 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
'''
|
||||
Manage glusterfs pool.
|
||||
'''
|
||||
|
||||
# Import python libs
|
||||
from __future__ import generators
|
||||
import logging
|
||||
import socket
|
||||
|
||||
# Import salt libs
|
||||
import salt.utils
|
||||
import salt.utils.cloud as suc
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def __virtual__():
|
||||
'''
|
||||
Only load this module if the gluster command exists
|
||||
'''
|
||||
return 'glusterfs' if 'glusterfs.list_volumes' in __salt__ else False
|
||||
|
||||
|
||||
def peered(name):
|
||||
'''
|
||||
Check if node is peered.
|
||||
|
||||
name
|
||||
The remote host with which to peer.
|
||||
|
||||
peer-cluster:
|
||||
glusterfs.peered:
|
||||
- name: two
|
||||
|
||||
peer-clusters:
|
||||
glusterfs.peered:
|
||||
- names:
|
||||
- one
|
||||
- two
|
||||
- three
|
||||
- four
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'comment': '',
|
||||
'result': False}
|
||||
|
||||
peers = __salt__['glusterfs.list_peers']()
|
||||
|
||||
if name in peers:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Host {0} already peered'.format(name)
|
||||
return ret
|
||||
elif __opts__['test']:
|
||||
ret['comment'] = 'Peer {0} will be added.'.format(name)
|
||||
ret['result'] = None
|
||||
return ret
|
||||
|
||||
if not suc.check_name(name, 'a-zA-Z0-9._-'):
|
||||
ret['comment'] = 'Invalid characters in peer name.'
|
||||
ret['result'] = False
|
||||
return ret
|
||||
|
||||
ret['comment'] = __salt__['glusterfs.peer'](name)
|
||||
|
||||
newpeers = __salt__['glusterfs.list_peers']()
|
||||
if name in newpeers:
|
||||
ret['result'] = True
|
||||
ret['changes'] = {'new': newpeers, 'old': peers}
|
||||
elif name == socket.gethostname().split('.')[0]:
|
||||
ret['result'] = True
|
||||
return ret
|
||||
else:
|
||||
ret['result'] = False
|
||||
return ret
|
||||
|
||||
|
||||
def created(name, peers=None, **kwargs):
|
||||
'''
|
||||
Check if volume already exists
|
||||
|
||||
name
|
||||
name of the volume
|
||||
|
||||
gluster-cluster:
|
||||
glusterfs.created:
|
||||
- name: mycluster
|
||||
- brick: /srv/gluster/drive1
|
||||
- replica: True
|
||||
- count: 2
|
||||
- short: True
|
||||
- start: True
|
||||
- peers:
|
||||
- one
|
||||
- two
|
||||
- three
|
||||
- four
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'comment': '',
|
||||
'result': False}
|
||||
volumes = __salt__['glusterfs.list_volumes']()
|
||||
if name in volumes:
|
||||
ret['result'] = True
|
||||
ret['comment'] = 'Volume {0} already exists.'.format(name)
|
||||
return ret
|
||||
elif __opts__['test']:
|
||||
ret['comment'] = 'Volume {0} will be created'.format(name)
|
||||
ret['result'] = None
|
||||
return ret
|
||||
|
||||
if not suc.check_name(name, 'a-zA-Z0-9._-'):
|
||||
ret['comment'] = 'Invalid characters in volume name.'
|
||||
ret['result'] = False
|
||||
return ret
|
||||
|
||||
if not all([suc.check_name(peer, 'a-zA-Z0-9._-') for peer in peers]):
|
||||
ret['comment'] = 'Invalid characters in a peer name.'
|
||||
ret['result'] = False
|
||||
return ret
|
||||
|
||||
ret['comment'] = __salt__['glusterfs.create'](name, peers, **kwargs)
|
||||
|
||||
if name in __salt__['glusterfs.list_volumes']():
|
||||
ret['changes'] = {'new': name, 'old': ''}
|
||||
ret['result'] = True
|
||||
|
||||
return ret
|
||||
|
||||
|
||||
def started(name, **kwargs):
|
||||
'''
|
||||
Check if volume has been started
|
||||
|
||||
name
|
||||
name of the volume
|
||||
gluster-started:
|
||||
glusterfs.started:
|
||||
- name: mycluster
|
||||
'''
|
||||
ret = {'name': name,
|
||||
'changes': {},
|
||||
'comment': '',
|
||||
'result': False}
|
||||
volumes = __salt__['glusterfs.list_volumes']()
|
||||
if not name in volumes:
|
||||
ret['result'] = False
|
||||
ret['comment'] = 'Volume {0} does not exist'.format(name)
|
||||
return ret
|
||||
|
||||
if not suc.check_name(name, 'a-zA-Z0-9._-'):
|
||||
ret['comment'] = 'Invalid characters in volume name.'
|
||||
ret['result'] = False
|
||||
return ret
|
||||
status = __salt__['glusterfs.status'](name)
|
||||
|
||||
if status != 'Volume {0} is not started'.format(name):
|
||||
ret['comment'] = status
|
||||
ret['result'] = True
|
||||
return ret
|
||||
elif __opts__['test']:
|
||||
ret['comment'] = 'Volume {0} will be created'.format(name)
|
||||
ret['result'] = None
|
||||
return ret
|
||||
|
||||
ret['comment'] = __salt__['glusterfs.start'](name)
|
||||
ret['result'] = True
|
||||
|
||||
status = __salt__['glusterfs.status'](name)
|
||||
if status == 'Volume {0} is not started'.format(name):
|
||||
ret['comment'] = status
|
||||
ret['result'] = False
|
||||
return ret
|
||||
|
||||
ret['change'] = {'new': 'started', 'old': ''}
|
||||
return ret
|
Loading…
Reference in New Issue
Block a user