Convert glusterfs module to use xml

The gluster cli has the option to produce xml output from most every
command, and to avoid pausing for questions when being run in script
mode. Make use of that capability and avoid scraping text output that is
often subject to change between versions.

This should help future-proof this module as any changes to the xml
output should be automatically integrated.

As part of this change, I added a glusterfs.info function and added the
force keywords to volume start and volume stop.

Fixes #30051
This commit is contained in:
Joe Julian 2015-12-28 21:24:12 -08:00
parent 3c63527313
commit 01a8e7ee10
2 changed files with 460 additions and 234 deletions

View File

@ -6,16 +6,17 @@ from __future__ import absolute_import
# Import python libs # Import python libs
import logging import logging
import xml.etree.ElementTree as ET
# Import 3rd-party libs # Import 3rd-party libs
# pylint: disable=import-error,redefined-builtin # pylint: disable=import-error,redefined-builtin
from salt.ext.six.moves import range from salt.ext.six.moves import range
from salt.ext.six.moves import shlex_quote as _cmd_quote
# pylint: enable=import-error,redefined-builtin # pylint: enable=import-error,redefined-builtin
# Import salt libs # Import salt libs
import salt.utils import salt.utils
import salt.utils.cloud as suc import salt.utils.cloud as suc
from salt.exceptions import CommandExecutionError, SaltInvocationError
log = logging.getLogger(__name__) log = logging.getLogger(__name__)
@ -41,6 +42,40 @@ def _get_minor_version():
return version return version
def _gluster(cmd):
'''
Perform a gluster command.
'''
# We will pass the command string as stdin to allow for much longer
# command strings. This is especially useful for creating large volumes
# where the list of bricks exceeds 128 characters.
return __salt__['cmd.run'](
'gluster --mode=script', stdin="{0}\n".format(cmd))
def _gluster_xml(cmd):
'''
Perform a gluster --xml command and check for and raise errors.
'''
root = ET.fromstring(
__salt__['cmd.run'](
'gluster --xml --mode=script', stdin="{0}\n".format(cmd)
).replace("\n", ""))
if int(root.find('opRet').text) != 0:
raise CommandExecutionError(root.find('opErrstr').text)
return root
def _etree_to_dict(t):
if len(t.getchildren()) > 0:
d = {}
for child in t.getchildren():
d[child.tag] = _etree_to_dict(child)
else:
d = t.text
return d
def list_peers(): def list_peers():
''' '''
Return a list of gluster peers Return a list of gluster peers
@ -70,12 +105,12 @@ def list_peers():
''' '''
get_peer_list = 'gluster peer status | awk \'/Hostname/ {print $2}\'' root = _gluster_xml('peer status')
result = __salt__['cmd.run'](get_peer_list, python_shell=True) result = [x.find('hostname').text for x in root.iter('peer')]
if 'No peers present' in result: if len(result) == 0:
return None return None
else: else:
return result.splitlines() return result
def peer(name): def peer(name):
@ -109,10 +144,11 @@ def peer(name):
''' '''
if suc.check_name(name, 'a-zA-Z0-9._-'): if suc.check_name(name, 'a-zA-Z0-9._-'):
return 'Invalid characters in peer name' raise SaltInvocationError(
'Invalid characters in peer name "{0}"'.format(name))
cmd = 'gluster peer probe {0}'.format(name) cmd = 'peer probe {0}'.format(name)
return __salt__['cmd.run'](cmd) return _gluster_xml(cmd).find('opRet').text == '0'
def create(name, bricks, stripe=False, replica=False, device_vg=False, def create(name, bricks, stripe=False, replica=False, device_vg=False,
@ -165,20 +201,22 @@ def create(name, bricks, stripe=False, replica=False, device_vg=False,
# Error for block devices with multiple bricks # Error for block devices with multiple bricks
if device_vg and len(bricks) > 1: if device_vg and len(bricks) > 1:
return 'Error: Block device backend volume does not support multipl' +\ raise SaltInvocationError('Block device backend volume does not ' +
'bricks' 'support multiple bricks')
# Validate bricks syntax # Validate bricks syntax
for brick in bricks: for brick in bricks:
try: try:
peer_name, path = brick.split(':') peer_name, path = brick.split(':')
if not path.startswith('/'): if not path.startswith('/'):
return 'Error: Brick paths must start with /' raise SaltInvocationError(
'Brick paths must start with / in {0}'.format(brick))
except ValueError: except ValueError:
return 'Error: Brick syntax is <peer>:<path>' raise SaltInvocationError(
'Brick syntax is <peer>:<path> got {0}'.format(brick))
# Format creation call # Format creation call
cmd = 'gluster volume create {0} '.format(name) cmd = 'volume create {0} '.format(name)
if stripe: if stripe:
cmd += 'stripe {0} '.format(stripe) cmd += 'stripe {0} '.format(stripe)
if replica: if replica:
@ -192,16 +230,11 @@ def create(name, bricks, stripe=False, replica=False, device_vg=False,
cmd += ' force' cmd += ' force'
log.debug('Clustering command:\n{0}'.format(cmd)) log.debug('Clustering command:\n{0}'.format(cmd))
ret = __salt__['cmd.run'](cmd) _gluster_xml(cmd)
if 'failed' in ret:
return ret
if start: if start:
result = __salt__['cmd.run']('gluster volume start {0}'.format(name)) _gluster_xml('gluster volume start {0}'.format(name))
if result.endswith('success'): return 'Volume {0} created and started'.format(name)
return 'Volume {0} created and started'.format(name)
else:
return result
else: else:
return 'Volume {0} created. Start volume to use'.format(name) return 'Volume {0} created. Start volume to use'.format(name)
@ -217,11 +250,10 @@ def list_volumes():
salt '*' glusterfs.list_volumes salt '*' glusterfs.list_volumes
''' '''
results = __salt__['cmd.run']('gluster volume list').splitlines() get_volume_list = 'gluster --xml volume list'
if results[0] == 'No volumes present in cluster': root = _gluster_xml('volume list')
return [] results = [x.text for x in root.iter('volume')]
else: return results
return results
def status(name): def status(name):
@ -237,126 +269,140 @@ def status(name):
salt '*' glusterfs.status myvolume salt '*' glusterfs.status myvolume
''' '''
# Get minor version
minor_version = _get_minor_version()
# Get volume status # Get volume status
cmd = 'gluster volume status {0}'.format(name) root = _gluster_xml('volume status {0}'.format(name))
result = __salt__['cmd.run'](cmd).splitlines()
if 'does not exist' in result[0]:
return result[0]
if 'is not started' in result[0]:
return result[0]
ret = {'bricks': {}, 'nfs': {}, 'healers': {}} ret = {'bricks': {}, 'nfs': {}, 'healers': {}}
# Iterate line by line, concatenating lines the gluster cli separated
for line_number in range(len(result)):
line = result[line_number]
if line.startswith('Brick'):
# See if this line is broken up into multiple lines
while len(line.split()) < 5:
line_number = line_number + 1
line = line.rstrip() + result[line_number]
# Parse Brick data def etree_legacy_wrap(t):
if minor_version >= 7: ret = _etree_to_dict(t)
brick, port, port_rdma, online, pid = line.split()[1:] ret['online'] = (ret['status'] == '1')
else: ret['host'] = ret['hostname']
brick, port, online, pid = line.split()[1:] return ret
host, path = brick.split(':')
data = {'port': port, 'pid': pid, 'host': host, 'path': path}
if online == 'Y':
data['online'] = True
else:
data['online'] = False
# Store, keyed by <host>:<brick> string
ret['bricks'][brick] = data
elif line.startswith('NFS Server on'):
# See if this line is broken up into multiple lines
while len(line.split()) < 5:
line_number = line_number + 1
line = line.rstrip() + result[line_number]
# Parse NFS Server data # Build a hash to map hostname to peerid
if minor_version >= 7: hostref = {}
host, port, port_rdma, online, pid = line.split()[3:] for node in root.iter('node'):
else: peerid = node.find('peerid').text
host, port, online, pid = line.split()[3:] hostname = node.find('hostname').text
data = {'port': port, 'pid': pid} if hostname not in ('NFS Server', 'Self-heal Daemon'):
if online == 'Y': hostref[peerid] = hostname
data['online'] = True
else: for node in root.iter('node'):
data['online'] = False hostname = node.find('hostname').text
# Store, keyed by hostname if hostname not in ('NFS Server', 'Self-heal Daemon'):
ret['nfs'][host] = data path = node.find('path').text
elif line.startswith('Self-heal Daemon on'): ret['bricks'][
# See if this line is broken up into multiple lines '{0}:{1}'.format(hostname, path)] = etree_legacy_wrap(node)
while len(line.split()) < 5: elif hostname == 'NFS Server':
line_number = line_number + 1 peerid = node.find('peerid').text
line = line.rstrip() + result[line_number] true_hostname = hostref[peerid]
ret['nfs'][true_hostname] = etree_legacy_wrap(node)
else:
peerid = node.find('peerid').text
true_hostname = hostref[peerid]
ret['healers'][true_hostname] = etree_legacy_wrap(node)
# Parse NFS Server data
if minor_version >= 7:
host, port, port_rdma, online, pid = line.split()[3:]
else:
host, port, online, pid = line.split()[3:]
data = {'port': port, 'pid': pid}
if online == 'Y':
data['online'] = True
else:
data['online'] = False
# Store, keyed by hostname
ret['healers'][host] = data
return ret return ret
def start_volume(name): def info(name):
'''
.. versionadded:: 2015.8.4
Return the gluster volume info.
name
Volume name
CLI Example:
.. code-block:: bash
salt '*' glusterfs.info myvolume
'''
cmd = 'volume info {0}'.format(name)
root = _gluster_xml(cmd)
volume = [x for x in root.iter('volume')][0]
ret = {name: _etree_to_dict(volume)}
bricks = {}
for i, brick in enumerate(volume.iter('brick'), start=1):
brickkey = 'brick{0}'.format(i)
bricks[brickkey] = {'path': brick.text}
for child in brick.getchildren():
if not child.tag == 'name':
bricks[brickkey].update({child.tag: child.text})
for k, v in brick.items():
bricks[brickkey][k] = v
ret[name]['bricks'] = bricks
options = {}
for option in volume.iter('option'):
options[option.find('name').text] = option.find('value').text
ret[name]['options'] = options
return ret
def start_volume(name, force=False):
''' '''
Start a gluster volume. Start a gluster volume.
name name
Volume name Volume name
force
Force the volume start even if the volume is started
.. versionadded:: 2015.8.4
CLI Example: CLI Example:
.. code-block:: bash .. code-block:: bash
salt '*' glusterfs.start mycluster salt '*' glusterfs.start mycluster
''' '''
volumes = list_volumes() cmd = 'volume start {0}'.format(name)
if name in volumes: if force:
if isinstance(status(name), dict): cmd = '{0} force'.format(cmd)
return 'Volume already started'
cmd = 'gluster volume start {0}'.format(name) volinfo = info(name)
result = __salt__['cmd.run'](cmd)
if result.endswith('success'): if not force and volinfo['status'] == '1':
return 'Volume {0} started'.format(name) return 'Volume already started'
else:
return result _gluster_xml(cmd)
return 'Volume does not exist' return 'Volume {0} started'.format(name)
def stop_volume(name): def stop_volume(name, force=False):
''' '''
Stop a gluster volume. Stop a gluster volume.
name name
Volume name Volume name
force
Force stop the volume
.. versionadded:: 2015.8.4
CLI Example: CLI Example:
.. code-block:: bash .. code-block:: bash
salt '*' glusterfs.stop_volume mycluster salt '*' glusterfs.stop_volume mycluster
''' '''
vol_status = status(name) status(name)
if isinstance(vol_status, dict):
cmd = 'yes | gluster volume stop {0}'.format(_cmd_quote(name)) cmd = 'volume stop {0}'.format(name)
result = __salt__['cmd.run'](cmd, python_shell=True) if force:
if result.splitlines()[0].endswith('success'): cmd += ' force'
return 'Volume {0} stopped'.format(name)
else: _gluster_xml(cmd)
return result return 'Volume {0} stopped'.format(name)
return vol_status
def delete(target, stop=True): def delete(target, stop=True):
@ -370,28 +416,25 @@ def delete(target, stop=True):
Stop volume before delete if it is started, True by default Stop volume before delete if it is started, True by default
''' '''
if target not in list_volumes(): if target not in list_volumes():
return 'Volume does not exist' raise SaltInvocationError('Volume {0} does not exist'.format(target))
cmd = 'yes | gluster volume delete {0}'.format(_cmd_quote(target))
# Stop volume if requested to and it is running # Stop volume if requested to and it is running
if stop is True and isinstance(status(target), dict): running = (info(target)['status'] == '1')
stop_volume(target)
stopped = True
else:
stopped = False
# Warn volume is running if stop not requested
if isinstance(status(target), dict):
return 'Error: Volume must be stopped before deletion'
result = __salt__['cmd.run'](cmd, python_shell=True) if not stop and running:
if result.splitlines()[0].endswith('success'): # Fail if volume is running if stop is not requested
if stopped: raise SaltInvocationError(
return 'Volume {0} stopped and deleted'.format(target) 'Volume {0} must be stopped before deletion'.format(target))
else:
return 'Volume {0} deleted'.format(target) if running:
stop_volume(target, force=True)
cmd = 'volume delete {0}'.format(target)
_gluster_xml(cmd)
if running:
return 'Volume {0} stopped and deleted'.format(target)
else: else:
return result return 'Volume {0} deleted'.format(target)
def add_volume_bricks(name, bricks): def add_volume_bricks(name, bricks):
@ -407,35 +450,27 @@ def add_volume_bricks(name, bricks):
new_bricks = [] new_bricks = []
cmd = 'echo yes | gluster volume add-brick {0}'.format(name) cmd = 'volume add-brick {0}'.format(name)
if isinstance(bricks, str): if isinstance(bricks, str):
bricks = [bricks] bricks = [bricks]
volume_bricks = status(name) volume_bricks = [x['path'] for x in info(name)['bricks'].values()]
if 'does not exist' in volume_bricks:
return volume_bricks
if 'is not started' in volume_bricks:
return volume_bricks
for brick in bricks: for brick in bricks:
if brick in volume_bricks['bricks']: if brick in volume_bricks:
log.debug('Brick {0} already in volume {1}...excluding from command'.format(brick, name)) log.debug(
'Brick {0} already in volume {1}...excluding from command'.format(brick, name))
else: else:
new_bricks.append(brick) new_bricks.append(brick)
if len(new_bricks) > 0: if len(new_bricks) > 0:
for brick in new_bricks: for brick in new_bricks:
cmd += ' '+str(brick) cmd += ' {0}'.format(brick)
result = __salt__['cmd.run'](cmd) _gluster_xml(cmd)
if result.endswith('success'): return '{0} bricks successfully added to the volume {1}'.format(len(new_bricks), name)
return '{0} bricks successfully added to the volume {1}'.format(len(new_bricks), name)
else:
return result
else: else:
return 'Bricks already in volume {0}'.format(name) return 'Bricks already in volume {0}'.format(name)

View File

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
''' '''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>` :codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
:codeauthor: :email:`Joe Julian <me@joejulian.name>`
''' '''
# Import Python libs # Import Python libs
@ -18,13 +19,154 @@ from salttesting.mock import (
# Import Salt Libs # Import Salt Libs
from salt.modules import glusterfs from salt.modules import glusterfs
import salt.utils.cloud as suc import salt.utils.cloud as suc
from salt.exceptions import CommandExecutionError, SaltInvocationError
# Globals # Globals
glusterfs.__salt__ = {} glusterfs.__salt__ = {}
xml_peer_present = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>0</opRet>
<peer>
<hostname>node02</hostname>
</peer>
</cliOutput>
"""
xml_volume_present = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>0</opRet>
<volList>
<volume>Newvolume1</volume>
<volume>Newvolume2</volume>
</volList>
</cliOutput>
"""
xml_volume_absent = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>0</opRet>
<volList>
<count>0</count>
</volList>
</cliOutput>
"""
xml_volume_status = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>0</opRet>
<volStatus>
<volumes>
<volume>
<volName>myvol1</volName>
<nodeCount>3</nodeCount>
<node>
<hostname>node01</hostname>
<path>/tmp/foo</path>
<peerid>830700d7-0684-497c-a12c-c02e365fb90b</peerid>
<status>1</status>
<port>49155</port>
<ports>
<tcp>49155</tcp>
<rdma>N/A</rdma>
</ports>
<pid>2470</pid>
</node>
<node>
<hostname>NFS Server</hostname>
<path>localhost</path>
<peerid>830700d7-0684-497c-a12c-c02e365fb90b</peerid>
<status>0</status>
<port>N/A</port>
<ports>
<tcp>N/A</tcp>
<rdma>N/A</rdma>
</ports>
<pid>-1</pid>
</node>
<tasks/>
</volume>
</volumes>
</volStatus>
</cliOutput>
"""
xml_volume_info_running = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>0</opRet>
<volInfo>
<volumes>
<volume>
<name>myvol1</name>
<id>f03c2180-cf55-4f77-ae0b-3650f57c82a1</id>
<status>1</status>
<statusStr>Started</statusStr>
<brickCount>1</brickCount>
<distCount>1</distCount>
<stripeCount>1</stripeCount>
<replicaCount>1</replicaCount>
<disperseCount>0</disperseCount>
<redundancyCount>0</redundancyCount>
<type>0</type>
<typeStr>Distribute</typeStr>
<transport>0</transport>
<bricks>
<brick uuid="830700d7-0684-497c-a12c-c02e365fb90b">node01:/tmp/foo<name>node01:/tmp/foo</name><hostUuid>830700d7-0684-497c-a12c-c02e365fb90b</hostUuid></brick>
</bricks>
<optCount>1</optCount>
<options>
<option>
<name>performance.readdir-ahead</name>
<value>on</value>
</option>
</options>
</volume>
<count>1</count>
</volumes>
</volInfo>
</cliOutput>
"""
xml_volume_info_stopped = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>0</opRet>
<volInfo>
<volumes>
<volume>
<name>myvol1</name>
<status>1</status>
</volume>
</volumes>
</volInfo>
</cliOutput>
"""
xml_command_success = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>0</opRet>
</cliOutput>
"""
xml_command_fail = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>-1</opRet>
<opErrno>0</opErrno>
<opErrstr>Command Failed</opErrstr>
</cliOutput>
"""
@skipIf(NO_MOCK, NO_MOCK_REASON) @skipIf(NO_MOCK, NO_MOCK_REASON)
class GlusterfsTestCase(TestCase): class GlusterfsTestCase(TestCase):
''' '''
Test cases for salt.modules.glusterfs Test cases for salt.modules.glusterfs
''' '''
@ -34,11 +176,11 @@ class GlusterfsTestCase(TestCase):
''' '''
Test if it return a list of gluster peers Test if it return a list of gluster peers
''' '''
mock = MagicMock(return_value='') mock = MagicMock(return_value=xml_peer_present)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertListEqual(glusterfs.list_peers(), []) self.assertListEqual(glusterfs.list_peers(), ['node02'])
mock = MagicMock(return_value='No peers present') mock = MagicMock(return_value=xml_command_success)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertIsNone(glusterfs.list_peers()) self.assertIsNone(glusterfs.list_peers())
@ -46,16 +188,15 @@ class GlusterfsTestCase(TestCase):
def test_peer(self): def test_peer(self):
''' '''
Test if it add another node into the peer list. Test if it adds another node into the peer list.
''' '''
mock = MagicMock(return_value='') mock = MagicMock(return_value=xml_command_success)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.peer('salt'), '') self.assertTrue(glusterfs.peer('salt'))
mock = MagicMock(return_value=True) mock = MagicMock(return_value=True)
with patch.object(suc, 'check_name', mock): with patch.object(suc, 'check_name', mock):
self.assertEqual(glusterfs.peer('a'), self.assertRaises(SaltInvocationError, glusterfs.peer, 'a')
'Invalid characters in peer name')
# 'create' function tests: 1 # 'create' function tests: 1
@ -65,27 +206,25 @@ class GlusterfsTestCase(TestCase):
''' '''
mock = MagicMock(return_value='') mock = MagicMock(return_value='')
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.create('newvolume', 'host1:brick'), self.assertRaises(
'Error: Brick paths must start with /') SaltInvocationError, glusterfs.create, 'newvolume', 'host1:brick')
mock = MagicMock(return_value='') mock = MagicMock(return_value='')
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.create('newvolume', 'host1/brick'), self.assertRaises(
'Error: Brick syntax is <peer>:<path>') SaltInvocationError, glusterfs.create, 'newvolume', 'host1/brick')
mock = MagicMock(return_value='creation success') mock = MagicMock(return_value=xml_command_fail)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertRaises(CommandExecutionError, glusterfs.create, 'newvolume', 'host1:/brick',
True, True, True, 'tcp', True)
mock = MagicMock(return_value=xml_command_success)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.create('newvolume', 'host1:/brick', self.assertEqual(glusterfs.create('newvolume', 'host1:/brick',
True, True, True, 'tcp', True), True, True, True, 'tcp', True),
'Volume newvolume created and started') 'Volume newvolume created and started')
mock = MagicMock(return_value='')
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.create('newvolume', 'host1:/brick',
True, True, True,
'tcp', True), '')
mock = MagicMock(return_value='')
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.create('newvolume', 'host1:/brick'), self.assertEqual(glusterfs.create('newvolume', 'host1:/brick'),
'Volume newvolume created. Start volume to use') 'Volume newvolume created. Start volume to use')
@ -96,11 +235,11 @@ class GlusterfsTestCase(TestCase):
''' '''
Test if it list configured volumes Test if it list configured volumes
''' '''
mock = MagicMock(return_value='No volumes present in cluster') mock = MagicMock(return_value=xml_volume_absent)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertListEqual(glusterfs.list_volumes(), []) self.assertListEqual(glusterfs.list_volumes(), [])
mock = MagicMock(return_value='Newvolume1\nNewvolume2') mock = MagicMock(return_value=xml_volume_present)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertListEqual(glusterfs.list_volumes(), self.assertListEqual(glusterfs.list_volumes(),
['Newvolume1', 'Newvolume2']) ['Newvolume1', 'Newvolume2'])
@ -111,50 +250,98 @@ class GlusterfsTestCase(TestCase):
''' '''
Test if it check the status of a gluster volume. Test if it check the status of a gluster volume.
''' '''
mock = MagicMock(return_value='No volumes present in cluster') mock = MagicMock(return_value=xml_command_fail)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertDictEqual(glusterfs.status('myvolume'), self.assertRaises(
{'bricks': {}, 'healers': {}, 'nfs': {}}) CommandExecutionError, glusterfs.status, 'myvol1')
mock = MagicMock(return_value='does not exist\n') res = {'bricks': {
'node01:/tmp/foo': {
'host': 'node01',
'hostname': 'node01',
'online': True,
'path': '/tmp/foo',
'peerid': '830700d7-0684-497c-a12c-c02e365fb90b',
'pid': '2470',
'port': '49155',
'ports': {
'rdma': 'N/A',
'tcp': '49155'},
'status': '1'}},
'healers': {},
'nfs': {
'node01': {
'host': 'NFS Server',
'hostname': 'NFS Server',
'online': False,
'path': 'localhost',
'peerid': '830700d7-0684-497c-a12c-c02e365fb90b',
'pid': '-1',
'port': 'N/A',
'ports': {
'rdma': 'N/A',
'tcp': 'N/A'},
'status': '0'}}}
mock = MagicMock(return_value=xml_volume_status)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.status('myvolume'), 'does not exist') self.assertDictEqual(glusterfs.status('myvol1'), res)
mock = MagicMock(return_value='is not started')
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.status('myvolume'), 'is not started')
# 'start_volume' function tests: 1 # 'start_volume' function tests: 1
def test_volume_info(self):
'''
Test if it returns the volume info.
'''
res = {'myvol1': {
'brickCount': '1',
'bricks': {
'brick1': {
'hostUuid': '830700d7-0684-497c-a12c-c02e365fb90b',
'path': 'node01:/tmp/foo',
'uuid': '830700d7-0684-497c-a12c-c02e365fb90b'}},
'disperseCount': '0',
'distCount': '1',
'id': 'f03c2180-cf55-4f77-ae0b-3650f57c82a1',
'name': 'myvol1',
'optCount': '1',
'options': {
'performance.readdir-ahead': 'on'},
'redundancyCount': '0',
'replicaCount': '1',
'status': '1',
'statusStr': 'Started',
'stripeCount': '1',
'transport': '0',
'type': '0',
'typeStr': 'Distribute'}}
mock = MagicMock(return_value=xml_volume_info_running)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertDictEqual(glusterfs.info('myvol1'), res)
def test_start_volume(self): def test_start_volume(self):
''' '''
Test if it start a gluster volume. Test if it start a gluster volume.
''' '''
mock_list = MagicMock(return_value=['Newvolume1', 'Newvolume2']) mock_list = MagicMock(return_value=['Newvolume1', 'Newvolume2'])
with patch.object(glusterfs, 'list_volumes', mock_list): with patch.object(glusterfs, 'list_volumes', mock_list):
mock_status = MagicMock(return_value={}) mock_status = MagicMock(return_value={'status': '1'})
with patch.object(glusterfs, 'status', mock_status): with patch.object(glusterfs, 'info', mock_status):
mock = MagicMock(return_value='creation success') mock = MagicMock(return_value=xml_command_success)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.start_volume('Newvolume1'), self.assertEqual(glusterfs.start_volume('Newvolume1'),
'Volume already started') 'Volume already started')
mock_status = MagicMock(return_value='') mock_status = MagicMock(return_value={'status': '0'})
with patch.object(glusterfs, 'status', mock_status): with patch.object(glusterfs, 'info', mock_status):
mock_run = MagicMock(return_value='creation success') mock_run = MagicMock(return_value=xml_command_success)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}):
self.assertEqual(glusterfs.start_volume('Newvolume1'), self.assertEqual(glusterfs.start_volume('Newvolume1'),
'Volume Newvolume1 started') 'Volume Newvolume1 started')
mock = MagicMock(return_value='does not exist') mock = MagicMock(return_value=xml_command_fail)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.start_volume('Newvolume1'), self.assertRaises(
'does not exist') CommandExecutionError, glusterfs.start_volume, 'Newvolume1')
mock_run = MagicMock(return_value='No volumes present in cluster')
with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}):
self.assertEqual(glusterfs.start_volume('mycluster'),
'Volume does not exist')
# 'stop_volume' function tests: 1 # 'stop_volume' function tests: 1
@ -164,19 +351,20 @@ class GlusterfsTestCase(TestCase):
''' '''
mock = MagicMock(return_value={}) mock = MagicMock(return_value={})
with patch.object(glusterfs, 'status', mock): with patch.object(glusterfs, 'status', mock):
mock = MagicMock(return_value='creation success') mock = MagicMock(return_value=xml_command_success)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.stop_volume('Newvolume1'), self.assertEqual(glusterfs.stop_volume('Newvolume1'),
'Volume Newvolume1 stopped') 'Volume Newvolume1 stopped')
mock = MagicMock(return_value='No volume exist') mock = MagicMock(return_value=xml_command_fail)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.stop_volume('Newvolume1'), self.assertRaises(
'No volume exist') CommandExecutionError, glusterfs.stop_volume, 'Newvolume1')
mock = MagicMock(return_value='') mock = MagicMock(return_value=xml_command_fail)
with patch.object(glusterfs, 'status', mock): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.stop_volume('Newvolume1'), '') self.assertRaises(
CommandExecutionError, glusterfs.stop_volume, 'Newvolume1')
# 'delete' function tests: 1 # 'delete' function tests: 1
@ -184,64 +372,67 @@ class GlusterfsTestCase(TestCase):
''' '''
Test if it deletes a gluster volume. Test if it deletes a gluster volume.
''' '''
ret = 'Error: Volume must be stopped before deletion'
mock = MagicMock(return_value=['Newvolume1', 'Newvolume2']) mock = MagicMock(return_value=['Newvolume1', 'Newvolume2'])
with patch.object(glusterfs, 'list_volumes', mock): with patch.object(glusterfs, 'list_volumes', mock):
self.assertEqual(glusterfs.delete('Newvolume3'), # volume doesn't exist
'Volume does not exist') self.assertRaises(
SaltInvocationError, glusterfs.delete, 'Newvolume3')
mock = MagicMock(return_value='creation success') mock = MagicMock(return_value={'status': '1'})
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.object(glusterfs, 'info', mock):
self.assertEqual(glusterfs.delete('Newvolume1', False), ret) mock = MagicMock(return_value=xml_command_success)
# volume exists, should not be stopped, and is started
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertRaises(
SaltInvocationError, glusterfs.delete, 'Newvolume1', False)
mock = MagicMock(return_value='creation success') # volume exists, should be stopped, and is started
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.delete('Newvolume1'), self.assertEqual(glusterfs.delete('Newvolume1'),
'Volume Newvolume1 stopped and deleted') 'Volume Newvolume1 stopped and deleted')
mock = MagicMock(return_value='') # volume exists and isn't started
with patch.object(glusterfs, 'status', mock): mock = MagicMock(return_value={'status': '0'})
mock = MagicMock(return_value='creation success') with patch.object(glusterfs, 'info', mock):
mock = MagicMock(return_value=xml_command_success)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.delete('Newvolume1'), self.assertEqual(glusterfs.delete('Newvolume1'),
'Volume Newvolume1 deleted') 'Volume Newvolume1 deleted')
mock = MagicMock(return_value='does not exist')
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.delete('Newvolume1'),
'does not exist')
# 'add_volume_bricks' function tests: 1 # 'add_volume_bricks' function tests: 1
def test_add_volume_bricks(self): def test_add_volume_bricks(self):
''' '''
Test if it add brick(s) to an existing volume Test if it add brick(s) to an existing volume
''' '''
mock = MagicMock(return_value='does not exist') # volume does not exist
with patch.object(glusterfs, 'status', mock): mock = MagicMock(return_value=xml_command_fail)
self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
['bricks']), self.assertRaises(
'does not exist') CommandExecutionError, glusterfs.add_volume_bricks, 'Newvolume1', ['bricks'])
mock = MagicMock(return_value='is not started')
with patch.object(glusterfs, 'status', mock):
self.assertEqual(glusterfs.add_volume_bricks('Newvolume1',
['bricks']),
'is not started')
ret = '1 bricks successfully added to the volume Newvolume1' ret = '1 bricks successfully added to the volume Newvolume1'
mock = MagicMock(return_value={'bricks': {}, 'healers': {}, 'nfs': {}}) # volume does exist
with patch.object(glusterfs, 'status', mock): mock = MagicMock(return_value={'bricks': {}})
mock = MagicMock(return_value='creation success') with patch.object(glusterfs, 'info', mock):
mock = MagicMock(return_value=xml_command_success)
# ... and the added brick does not exist
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', self.assertEqual(glusterfs.add_volume_bricks('Newvolume1',
['bricks']), ret) ['bricks']), ret)
mock = MagicMock(return_value='') mock = MagicMock(
return_value={'bricks': {'brick1': {'path': 'bricks'}}})
with patch.object(glusterfs, 'info', mock):
# ... and the added brick does exist
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}): with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', # As a list
['bricks']), '') self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', ['bricks']),
'Bricks already in volume Newvolume1')
# As a string
self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', 'bricks'),
'Bricks already in volume Newvolume1')
# And empty list
self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', []), self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', []),
'Bricks already in volume Newvolume1') 'Bricks already in volume Newvolume1')