Convert glusterfs module to use xml

The gluster cli has the option to produce xml output from most every
command, and to avoid pausing for questions when being run in script
mode. Make use of that capability and avoid scraping text output that is
often subject to change between versions.

This should help future-proof this module as any changes to the xml
output should be automatically integrated.

As part of this change, I added a glusterfs.info function and added the
force keywords to volume start and volume stop.

Fixes #30051
This commit is contained in:
Joe Julian 2015-12-28 21:24:12 -08:00
parent 3c63527313
commit 01a8e7ee10
2 changed files with 460 additions and 234 deletions

View File

@ -6,16 +6,17 @@ from __future__ import absolute_import
# Import python libs
import logging
import xml.etree.ElementTree as ET
# Import 3rd-party libs
# pylint: disable=import-error,redefined-builtin
from salt.ext.six.moves import range
from salt.ext.six.moves import shlex_quote as _cmd_quote
# pylint: enable=import-error,redefined-builtin
# Import salt libs
import salt.utils
import salt.utils.cloud as suc
from salt.exceptions import CommandExecutionError, SaltInvocationError
log = logging.getLogger(__name__)
@ -41,6 +42,40 @@ def _get_minor_version():
return version
def _gluster(cmd):
'''
Perform a gluster command.
'''
# We will pass the command string as stdin to allow for much longer
# command strings. This is especially useful for creating large volumes
# where the list of bricks exceeds 128 characters.
return __salt__['cmd.run'](
'gluster --mode=script', stdin="{0}\n".format(cmd))
def _gluster_xml(cmd):
'''
Perform a gluster --xml command and check for and raise errors.
'''
root = ET.fromstring(
__salt__['cmd.run'](
'gluster --xml --mode=script', stdin="{0}\n".format(cmd)
).replace("\n", ""))
if int(root.find('opRet').text) != 0:
raise CommandExecutionError(root.find('opErrstr').text)
return root
def _etree_to_dict(t):
if len(t.getchildren()) > 0:
d = {}
for child in t.getchildren():
d[child.tag] = _etree_to_dict(child)
else:
d = t.text
return d
def list_peers():
'''
Return a list of gluster peers
@ -70,12 +105,12 @@ def list_peers():
'''
get_peer_list = 'gluster peer status | awk \'/Hostname/ {print $2}\''
result = __salt__['cmd.run'](get_peer_list, python_shell=True)
if 'No peers present' in result:
root = _gluster_xml('peer status')
result = [x.find('hostname').text for x in root.iter('peer')]
if len(result) == 0:
return None
else:
return result.splitlines()
return result
def peer(name):
@ -109,10 +144,11 @@ def peer(name):
'''
if suc.check_name(name, 'a-zA-Z0-9._-'):
return 'Invalid characters in peer name'
raise SaltInvocationError(
'Invalid characters in peer name "{0}"'.format(name))
cmd = 'gluster peer probe {0}'.format(name)
return __salt__['cmd.run'](cmd)
cmd = 'peer probe {0}'.format(name)
return _gluster_xml(cmd).find('opRet').text == '0'
def create(name, bricks, stripe=False, replica=False, device_vg=False,
@ -165,20 +201,22 @@ def create(name, bricks, stripe=False, replica=False, device_vg=False,
# Error for block devices with multiple bricks
if device_vg and len(bricks) > 1:
return 'Error: Block device backend volume does not support multipl' +\
'bricks'
raise SaltInvocationError('Block device backend volume does not ' +
'support multiple bricks')
# Validate bricks syntax
for brick in bricks:
try:
peer_name, path = brick.split(':')
if not path.startswith('/'):
return 'Error: Brick paths must start with /'
raise SaltInvocationError(
'Brick paths must start with / in {0}'.format(brick))
except ValueError:
return 'Error: Brick syntax is <peer>:<path>'
raise SaltInvocationError(
'Brick syntax is <peer>:<path> got {0}'.format(brick))
# Format creation call
cmd = 'gluster volume create {0} '.format(name)
cmd = 'volume create {0} '.format(name)
if stripe:
cmd += 'stripe {0} '.format(stripe)
if replica:
@ -192,16 +230,11 @@ def create(name, bricks, stripe=False, replica=False, device_vg=False,
cmd += ' force'
log.debug('Clustering command:\n{0}'.format(cmd))
ret = __salt__['cmd.run'](cmd)
if 'failed' in ret:
return ret
_gluster_xml(cmd)
if start:
result = __salt__['cmd.run']('gluster volume start {0}'.format(name))
if result.endswith('success'):
return 'Volume {0} created and started'.format(name)
else:
return result
_gluster_xml('gluster volume start {0}'.format(name))
return 'Volume {0} created and started'.format(name)
else:
return 'Volume {0} created. Start volume to use'.format(name)
@ -217,11 +250,10 @@ def list_volumes():
salt '*' glusterfs.list_volumes
'''
results = __salt__['cmd.run']('gluster volume list').splitlines()
if results[0] == 'No volumes present in cluster':
return []
else:
return results
get_volume_list = 'gluster --xml volume list'
root = _gluster_xml('volume list')
results = [x.text for x in root.iter('volume')]
return results
def status(name):
@ -237,126 +269,140 @@ def status(name):
salt '*' glusterfs.status myvolume
'''
# Get minor version
minor_version = _get_minor_version()
# Get volume status
cmd = 'gluster volume status {0}'.format(name)
result = __salt__['cmd.run'](cmd).splitlines()
if 'does not exist' in result[0]:
return result[0]
if 'is not started' in result[0]:
return result[0]
root = _gluster_xml('volume status {0}'.format(name))
ret = {'bricks': {}, 'nfs': {}, 'healers': {}}
# Iterate line by line, concatenating lines the gluster cli separated
for line_number in range(len(result)):
line = result[line_number]
if line.startswith('Brick'):
# See if this line is broken up into multiple lines
while len(line.split()) < 5:
line_number = line_number + 1
line = line.rstrip() + result[line_number]
# Parse Brick data
if minor_version >= 7:
brick, port, port_rdma, online, pid = line.split()[1:]
else:
brick, port, online, pid = line.split()[1:]
host, path = brick.split(':')
data = {'port': port, 'pid': pid, 'host': host, 'path': path}
if online == 'Y':
data['online'] = True
else:
data['online'] = False
# Store, keyed by <host>:<brick> string
ret['bricks'][brick] = data
elif line.startswith('NFS Server on'):
# See if this line is broken up into multiple lines
while len(line.split()) < 5:
line_number = line_number + 1
line = line.rstrip() + result[line_number]
def etree_legacy_wrap(t):
ret = _etree_to_dict(t)
ret['online'] = (ret['status'] == '1')
ret['host'] = ret['hostname']
return ret
# Parse NFS Server data
if minor_version >= 7:
host, port, port_rdma, online, pid = line.split()[3:]
else:
host, port, online, pid = line.split()[3:]
data = {'port': port, 'pid': pid}
if online == 'Y':
data['online'] = True
else:
data['online'] = False
# Store, keyed by hostname
ret['nfs'][host] = data
elif line.startswith('Self-heal Daemon on'):
# See if this line is broken up into multiple lines
while len(line.split()) < 5:
line_number = line_number + 1
line = line.rstrip() + result[line_number]
# Build a hash to map hostname to peerid
hostref = {}
for node in root.iter('node'):
peerid = node.find('peerid').text
hostname = node.find('hostname').text
if hostname not in ('NFS Server', 'Self-heal Daemon'):
hostref[peerid] = hostname
for node in root.iter('node'):
hostname = node.find('hostname').text
if hostname not in ('NFS Server', 'Self-heal Daemon'):
path = node.find('path').text
ret['bricks'][
'{0}:{1}'.format(hostname, path)] = etree_legacy_wrap(node)
elif hostname == 'NFS Server':
peerid = node.find('peerid').text
true_hostname = hostref[peerid]
ret['nfs'][true_hostname] = etree_legacy_wrap(node)
else:
peerid = node.find('peerid').text
true_hostname = hostref[peerid]
ret['healers'][true_hostname] = etree_legacy_wrap(node)
# Parse NFS Server data
if minor_version >= 7:
host, port, port_rdma, online, pid = line.split()[3:]
else:
host, port, online, pid = line.split()[3:]
data = {'port': port, 'pid': pid}
if online == 'Y':
data['online'] = True
else:
data['online'] = False
# Store, keyed by hostname
ret['healers'][host] = data
return ret
def start_volume(name):
def info(name):
'''
.. versionadded:: 2015.8.4
Return the gluster volume info.
name
Volume name
CLI Example:
.. code-block:: bash
salt '*' glusterfs.info myvolume
'''
cmd = 'volume info {0}'.format(name)
root = _gluster_xml(cmd)
volume = [x for x in root.iter('volume')][0]
ret = {name: _etree_to_dict(volume)}
bricks = {}
for i, brick in enumerate(volume.iter('brick'), start=1):
brickkey = 'brick{0}'.format(i)
bricks[brickkey] = {'path': brick.text}
for child in brick.getchildren():
if not child.tag == 'name':
bricks[brickkey].update({child.tag: child.text})
for k, v in brick.items():
bricks[brickkey][k] = v
ret[name]['bricks'] = bricks
options = {}
for option in volume.iter('option'):
options[option.find('name').text] = option.find('value').text
ret[name]['options'] = options
return ret
def start_volume(name, force=False):
'''
Start a gluster volume.
name
Volume name
force
Force the volume start even if the volume is started
.. versionadded:: 2015.8.4
CLI Example:
.. code-block:: bash
salt '*' glusterfs.start mycluster
'''
volumes = list_volumes()
if name in volumes:
if isinstance(status(name), dict):
return 'Volume already started'
cmd = 'gluster volume start {0}'.format(name)
result = __salt__['cmd.run'](cmd)
if result.endswith('success'):
return 'Volume {0} started'.format(name)
else:
return result
return 'Volume does not exist'
cmd = 'volume start {0}'.format(name)
if force:
cmd = '{0} force'.format(cmd)
volinfo = info(name)
if not force and volinfo['status'] == '1':
return 'Volume already started'
_gluster_xml(cmd)
return 'Volume {0} started'.format(name)
def stop_volume(name):
def stop_volume(name, force=False):
'''
Stop a gluster volume.
name
Volume name
force
Force stop the volume
.. versionadded:: 2015.8.4
CLI Example:
.. code-block:: bash
salt '*' glusterfs.stop_volume mycluster
'''
vol_status = status(name)
if isinstance(vol_status, dict):
cmd = 'yes | gluster volume stop {0}'.format(_cmd_quote(name))
result = __salt__['cmd.run'](cmd, python_shell=True)
if result.splitlines()[0].endswith('success'):
return 'Volume {0} stopped'.format(name)
else:
return result
return vol_status
status(name)
cmd = 'volume stop {0}'.format(name)
if force:
cmd += ' force'
_gluster_xml(cmd)
return 'Volume {0} stopped'.format(name)
def delete(target, stop=True):
@ -370,28 +416,25 @@ def delete(target, stop=True):
Stop volume before delete if it is started, True by default
'''
if target not in list_volumes():
return 'Volume does not exist'
cmd = 'yes | gluster volume delete {0}'.format(_cmd_quote(target))
raise SaltInvocationError('Volume {0} does not exist'.format(target))
# Stop volume if requested to and it is running
if stop is True and isinstance(status(target), dict):
stop_volume(target)
stopped = True
else:
stopped = False
# Warn volume is running if stop not requested
if isinstance(status(target), dict):
return 'Error: Volume must be stopped before deletion'
running = (info(target)['status'] == '1')
result = __salt__['cmd.run'](cmd, python_shell=True)
if result.splitlines()[0].endswith('success'):
if stopped:
return 'Volume {0} stopped and deleted'.format(target)
else:
return 'Volume {0} deleted'.format(target)
if not stop and running:
# Fail if volume is running if stop is not requested
raise SaltInvocationError(
'Volume {0} must be stopped before deletion'.format(target))
if running:
stop_volume(target, force=True)
cmd = 'volume delete {0}'.format(target)
_gluster_xml(cmd)
if running:
return 'Volume {0} stopped and deleted'.format(target)
else:
return result
return 'Volume {0} deleted'.format(target)
def add_volume_bricks(name, bricks):
@ -407,35 +450,27 @@ def add_volume_bricks(name, bricks):
new_bricks = []
cmd = 'echo yes | gluster volume add-brick {0}'.format(name)
cmd = 'volume add-brick {0}'.format(name)
if isinstance(bricks, str):
bricks = [bricks]
volume_bricks = status(name)
if 'does not exist' in volume_bricks:
return volume_bricks
if 'is not started' in volume_bricks:
return volume_bricks
volume_bricks = [x['path'] for x in info(name)['bricks'].values()]
for brick in bricks:
if brick in volume_bricks['bricks']:
log.debug('Brick {0} already in volume {1}...excluding from command'.format(brick, name))
if brick in volume_bricks:
log.debug(
'Brick {0} already in volume {1}...excluding from command'.format(brick, name))
else:
new_bricks.append(brick)
if len(new_bricks) > 0:
for brick in new_bricks:
cmd += ' '+str(brick)
cmd += ' {0}'.format(brick)
result = __salt__['cmd.run'](cmd)
_gluster_xml(cmd)
if result.endswith('success'):
return '{0} bricks successfully added to the volume {1}'.format(len(new_bricks), name)
else:
return result
return '{0} bricks successfully added to the volume {1}'.format(len(new_bricks), name)
else:
return 'Bricks already in volume {0}'.format(name)

View File

@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
:codeauthor: :email:`Joe Julian <me@joejulian.name>`
'''
# Import Python libs
@ -18,13 +19,154 @@ from salttesting.mock import (
# Import Salt Libs
from salt.modules import glusterfs
import salt.utils.cloud as suc
from salt.exceptions import CommandExecutionError, SaltInvocationError
# Globals
glusterfs.__salt__ = {}
xml_peer_present = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>0</opRet>
<peer>
<hostname>node02</hostname>
</peer>
</cliOutput>
"""
xml_volume_present = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>0</opRet>
<volList>
<volume>Newvolume1</volume>
<volume>Newvolume2</volume>
</volList>
</cliOutput>
"""
xml_volume_absent = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>0</opRet>
<volList>
<count>0</count>
</volList>
</cliOutput>
"""
xml_volume_status = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>0</opRet>
<volStatus>
<volumes>
<volume>
<volName>myvol1</volName>
<nodeCount>3</nodeCount>
<node>
<hostname>node01</hostname>
<path>/tmp/foo</path>
<peerid>830700d7-0684-497c-a12c-c02e365fb90b</peerid>
<status>1</status>
<port>49155</port>
<ports>
<tcp>49155</tcp>
<rdma>N/A</rdma>
</ports>
<pid>2470</pid>
</node>
<node>
<hostname>NFS Server</hostname>
<path>localhost</path>
<peerid>830700d7-0684-497c-a12c-c02e365fb90b</peerid>
<status>0</status>
<port>N/A</port>
<ports>
<tcp>N/A</tcp>
<rdma>N/A</rdma>
</ports>
<pid>-1</pid>
</node>
<tasks/>
</volume>
</volumes>
</volStatus>
</cliOutput>
"""
xml_volume_info_running = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>0</opRet>
<volInfo>
<volumes>
<volume>
<name>myvol1</name>
<id>f03c2180-cf55-4f77-ae0b-3650f57c82a1</id>
<status>1</status>
<statusStr>Started</statusStr>
<brickCount>1</brickCount>
<distCount>1</distCount>
<stripeCount>1</stripeCount>
<replicaCount>1</replicaCount>
<disperseCount>0</disperseCount>
<redundancyCount>0</redundancyCount>
<type>0</type>
<typeStr>Distribute</typeStr>
<transport>0</transport>
<bricks>
<brick uuid="830700d7-0684-497c-a12c-c02e365fb90b">node01:/tmp/foo<name>node01:/tmp/foo</name><hostUuid>830700d7-0684-497c-a12c-c02e365fb90b</hostUuid></brick>
</bricks>
<optCount>1</optCount>
<options>
<option>
<name>performance.readdir-ahead</name>
<value>on</value>
</option>
</options>
</volume>
<count>1</count>
</volumes>
</volInfo>
</cliOutput>
"""
xml_volume_info_stopped = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>0</opRet>
<volInfo>
<volumes>
<volume>
<name>myvol1</name>
<status>1</status>
</volume>
</volumes>
</volInfo>
</cliOutput>
"""
xml_command_success = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>0</opRet>
</cliOutput>
"""
xml_command_fail = """
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<cliOutput>
<opRet>-1</opRet>
<opErrno>0</opErrno>
<opErrstr>Command Failed</opErrstr>
</cliOutput>
"""
@skipIf(NO_MOCK, NO_MOCK_REASON)
class GlusterfsTestCase(TestCase):
'''
Test cases for salt.modules.glusterfs
'''
@ -34,11 +176,11 @@ class GlusterfsTestCase(TestCase):
'''
Test if it return a list of gluster peers
'''
mock = MagicMock(return_value='')
mock = MagicMock(return_value=xml_peer_present)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertListEqual(glusterfs.list_peers(), [])
self.assertListEqual(glusterfs.list_peers(), ['node02'])
mock = MagicMock(return_value='No peers present')
mock = MagicMock(return_value=xml_command_success)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertIsNone(glusterfs.list_peers())
@ -46,16 +188,15 @@ class GlusterfsTestCase(TestCase):
def test_peer(self):
'''
Test if it add another node into the peer list.
Test if it adds another node into the peer list.
'''
mock = MagicMock(return_value='')
mock = MagicMock(return_value=xml_command_success)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.peer('salt'), '')
self.assertTrue(glusterfs.peer('salt'))
mock = MagicMock(return_value=True)
with patch.object(suc, 'check_name', mock):
self.assertEqual(glusterfs.peer('a'),
'Invalid characters in peer name')
self.assertRaises(SaltInvocationError, glusterfs.peer, 'a')
# 'create' function tests: 1
@ -65,27 +206,25 @@ class GlusterfsTestCase(TestCase):
'''
mock = MagicMock(return_value='')
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.create('newvolume', 'host1:brick'),
'Error: Brick paths must start with /')
self.assertRaises(
SaltInvocationError, glusterfs.create, 'newvolume', 'host1:brick')
mock = MagicMock(return_value='')
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.create('newvolume', 'host1/brick'),
'Error: Brick syntax is <peer>:<path>')
self.assertRaises(
SaltInvocationError, glusterfs.create, 'newvolume', 'host1/brick')
mock = MagicMock(return_value='creation success')
mock = MagicMock(return_value=xml_command_fail)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertRaises(CommandExecutionError, glusterfs.create, 'newvolume', 'host1:/brick',
True, True, True, 'tcp', True)
mock = MagicMock(return_value=xml_command_success)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.create('newvolume', 'host1:/brick',
True, True, True, 'tcp', True),
'Volume newvolume created and started')
mock = MagicMock(return_value='')
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.create('newvolume', 'host1:/brick',
True, True, True,
'tcp', True), '')
mock = MagicMock(return_value='')
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.create('newvolume', 'host1:/brick'),
'Volume newvolume created. Start volume to use')
@ -96,11 +235,11 @@ class GlusterfsTestCase(TestCase):
'''
Test if it list configured volumes
'''
mock = MagicMock(return_value='No volumes present in cluster')
mock = MagicMock(return_value=xml_volume_absent)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertListEqual(glusterfs.list_volumes(), [])
mock = MagicMock(return_value='Newvolume1\nNewvolume2')
mock = MagicMock(return_value=xml_volume_present)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertListEqual(glusterfs.list_volumes(),
['Newvolume1', 'Newvolume2'])
@ -111,50 +250,98 @@ class GlusterfsTestCase(TestCase):
'''
Test if it check the status of a gluster volume.
'''
mock = MagicMock(return_value='No volumes present in cluster')
mock = MagicMock(return_value=xml_command_fail)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertDictEqual(glusterfs.status('myvolume'),
{'bricks': {}, 'healers': {}, 'nfs': {}})
self.assertRaises(
CommandExecutionError, glusterfs.status, 'myvol1')
mock = MagicMock(return_value='does not exist\n')
res = {'bricks': {
'node01:/tmp/foo': {
'host': 'node01',
'hostname': 'node01',
'online': True,
'path': '/tmp/foo',
'peerid': '830700d7-0684-497c-a12c-c02e365fb90b',
'pid': '2470',
'port': '49155',
'ports': {
'rdma': 'N/A',
'tcp': '49155'},
'status': '1'}},
'healers': {},
'nfs': {
'node01': {
'host': 'NFS Server',
'hostname': 'NFS Server',
'online': False,
'path': 'localhost',
'peerid': '830700d7-0684-497c-a12c-c02e365fb90b',
'pid': '-1',
'port': 'N/A',
'ports': {
'rdma': 'N/A',
'tcp': 'N/A'},
'status': '0'}}}
mock = MagicMock(return_value=xml_volume_status)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.status('myvolume'), 'does not exist')
mock = MagicMock(return_value='is not started')
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.status('myvolume'), 'is not started')
self.assertDictEqual(glusterfs.status('myvol1'), res)
# 'start_volume' function tests: 1
def test_volume_info(self):
'''
Test if it returns the volume info.
'''
res = {'myvol1': {
'brickCount': '1',
'bricks': {
'brick1': {
'hostUuid': '830700d7-0684-497c-a12c-c02e365fb90b',
'path': 'node01:/tmp/foo',
'uuid': '830700d7-0684-497c-a12c-c02e365fb90b'}},
'disperseCount': '0',
'distCount': '1',
'id': 'f03c2180-cf55-4f77-ae0b-3650f57c82a1',
'name': 'myvol1',
'optCount': '1',
'options': {
'performance.readdir-ahead': 'on'},
'redundancyCount': '0',
'replicaCount': '1',
'status': '1',
'statusStr': 'Started',
'stripeCount': '1',
'transport': '0',
'type': '0',
'typeStr': 'Distribute'}}
mock = MagicMock(return_value=xml_volume_info_running)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertDictEqual(glusterfs.info('myvol1'), res)
def test_start_volume(self):
'''
Test if it start a gluster volume.
'''
mock_list = MagicMock(return_value=['Newvolume1', 'Newvolume2'])
with patch.object(glusterfs, 'list_volumes', mock_list):
mock_status = MagicMock(return_value={})
with patch.object(glusterfs, 'status', mock_status):
mock = MagicMock(return_value='creation success')
mock_status = MagicMock(return_value={'status': '1'})
with patch.object(glusterfs, 'info', mock_status):
mock = MagicMock(return_value=xml_command_success)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.start_volume('Newvolume1'),
'Volume already started')
mock_status = MagicMock(return_value='')
with patch.object(glusterfs, 'status', mock_status):
mock_run = MagicMock(return_value='creation success')
mock_status = MagicMock(return_value={'status': '0'})
with patch.object(glusterfs, 'info', mock_status):
mock_run = MagicMock(return_value=xml_command_success)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}):
self.assertEqual(glusterfs.start_volume('Newvolume1'),
'Volume Newvolume1 started')
mock = MagicMock(return_value='does not exist')
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.start_volume('Newvolume1'),
'does not exist')
mock_run = MagicMock(return_value='No volumes present in cluster')
with patch.dict(glusterfs.__salt__, {'cmd.run': mock_run}):
self.assertEqual(glusterfs.start_volume('mycluster'),
'Volume does not exist')
mock = MagicMock(return_value=xml_command_fail)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertRaises(
CommandExecutionError, glusterfs.start_volume, 'Newvolume1')
# 'stop_volume' function tests: 1
@ -164,19 +351,20 @@ class GlusterfsTestCase(TestCase):
'''
mock = MagicMock(return_value={})
with patch.object(glusterfs, 'status', mock):
mock = MagicMock(return_value='creation success')
mock = MagicMock(return_value=xml_command_success)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.stop_volume('Newvolume1'),
'Volume Newvolume1 stopped')
mock = MagicMock(return_value='No volume exist')
mock = MagicMock(return_value=xml_command_fail)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.stop_volume('Newvolume1'),
'No volume exist')
self.assertRaises(
CommandExecutionError, glusterfs.stop_volume, 'Newvolume1')
mock = MagicMock(return_value='')
with patch.object(glusterfs, 'status', mock):
self.assertEqual(glusterfs.stop_volume('Newvolume1'), '')
mock = MagicMock(return_value=xml_command_fail)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertRaises(
CommandExecutionError, glusterfs.stop_volume, 'Newvolume1')
# 'delete' function tests: 1
@ -184,64 +372,67 @@ class GlusterfsTestCase(TestCase):
'''
Test if it deletes a gluster volume.
'''
ret = 'Error: Volume must be stopped before deletion'
mock = MagicMock(return_value=['Newvolume1', 'Newvolume2'])
with patch.object(glusterfs, 'list_volumes', mock):
self.assertEqual(glusterfs.delete('Newvolume3'),
'Volume does not exist')
# volume doesn't exist
self.assertRaises(
SaltInvocationError, glusterfs.delete, 'Newvolume3')
mock = MagicMock(return_value='creation success')
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.delete('Newvolume1', False), ret)
mock = MagicMock(return_value={'status': '1'})
with patch.object(glusterfs, 'info', mock):
mock = MagicMock(return_value=xml_command_success)
# volume exists, should not be stopped, and is started
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertRaises(
SaltInvocationError, glusterfs.delete, 'Newvolume1', False)
mock = MagicMock(return_value='creation success')
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.delete('Newvolume1'),
'Volume Newvolume1 stopped and deleted')
# volume exists, should be stopped, and is started
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.delete('Newvolume1'),
'Volume Newvolume1 stopped and deleted')
mock = MagicMock(return_value='')
with patch.object(glusterfs, 'status', mock):
mock = MagicMock(return_value='creation success')
# volume exists and isn't started
mock = MagicMock(return_value={'status': '0'})
with patch.object(glusterfs, 'info', mock):
mock = MagicMock(return_value=xml_command_success)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.delete('Newvolume1'),
'Volume Newvolume1 deleted')
mock = MagicMock(return_value='does not exist')
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.delete('Newvolume1'),
'does not exist')
# 'add_volume_bricks' function tests: 1
def test_add_volume_bricks(self):
'''
Test if it add brick(s) to an existing volume
'''
mock = MagicMock(return_value='does not exist')
with patch.object(glusterfs, 'status', mock):
self.assertEqual(glusterfs.add_volume_bricks('Newvolume1',
['bricks']),
'does not exist')
mock = MagicMock(return_value='is not started')
with patch.object(glusterfs, 'status', mock):
self.assertEqual(glusterfs.add_volume_bricks('Newvolume1',
['bricks']),
'is not started')
# volume does not exist
mock = MagicMock(return_value=xml_command_fail)
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertRaises(
CommandExecutionError, glusterfs.add_volume_bricks, 'Newvolume1', ['bricks'])
ret = '1 bricks successfully added to the volume Newvolume1'
mock = MagicMock(return_value={'bricks': {}, 'healers': {}, 'nfs': {}})
with patch.object(glusterfs, 'status', mock):
mock = MagicMock(return_value='creation success')
# volume does exist
mock = MagicMock(return_value={'bricks': {}})
with patch.object(glusterfs, 'info', mock):
mock = MagicMock(return_value=xml_command_success)
# ... and the added brick does not exist
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.add_volume_bricks('Newvolume1',
['bricks']), ret)
mock = MagicMock(return_value='')
mock = MagicMock(
return_value={'bricks': {'brick1': {'path': 'bricks'}}})
with patch.object(glusterfs, 'info', mock):
# ... and the added brick does exist
with patch.dict(glusterfs.__salt__, {'cmd.run': mock}):
self.assertEqual(glusterfs.add_volume_bricks('Newvolume1',
['bricks']), '')
# As a list
self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', ['bricks']),
'Bricks already in volume Newvolume1')
# As a string
self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', 'bricks'),
'Bricks already in volume Newvolume1')
# And empty list
self.assertEqual(glusterfs.add_volume_bricks('Newvolume1', []),
'Bricks already in volume Newvolume1')